aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-05-10 07:08:37 -0400
committerPaul Mackerras <paulus@samba.org>2007-05-10 07:08:37 -0400
commit2ecf042ef530dd0943e41d84b6344f507941af3e (patch)
tree73100361dd74e3f80f14c7c81ba4675948983f44 /drivers/net
parent32a56ebb24f23da1bbaf24292acf85b6c04526ab (diff)
parentde5603748af8bf7deac403e6ba92887f8d18e812 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c509.c5
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/Kconfig16
-rw-r--r--drivers/net/Makefile9
-rw-r--r--drivers/net/atl1/atl1_main.c12
-rw-r--r--drivers/net/atp.c8
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/epic100.c10
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c2
-rw-r--r--drivers/net/meth.h2
-rw-r--r--drivers/net/mlx4/Makefile4
-rw-r--r--drivers/net/mlx4/alloc.c179
-rw-r--r--drivers/net/mlx4/catas.c70
-rw-r--r--drivers/net/mlx4/cmd.c429
-rw-r--r--drivers/net/mlx4/cq.c254
-rw-r--r--drivers/net/mlx4/eq.c696
-rw-r--r--drivers/net/mlx4/fw.c775
-rw-r--r--drivers/net/mlx4/fw.h167
-rw-r--r--drivers/net/mlx4/icm.c379
-rw-r--r--drivers/net/mlx4/icm.h135
-rw-r--r--drivers/net/mlx4/intf.c165
-rw-r--r--drivers/net/mlx4/main.c936
-rw-r--r--drivers/net/mlx4/mcg.c380
-rw-r--r--drivers/net/mlx4/mlx4.h348
-rw-r--r--drivers/net/mlx4/mr.c479
-rw-r--r--drivers/net/mlx4/pd.c102
-rw-r--r--drivers/net/mlx4/profile.c238
-rw-r--r--drivers/net/mlx4/qp.c280
-rw-r--r--drivers/net/mlx4/reset.c181
-rw-r--r--drivers/net/mlx4/srq.c227
-rw-r--r--drivers/net/natsemi.c1
-rw-r--r--drivers/net/ne2k-pci.c3
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c14
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/skge.c4
-rw-r--r--drivers/net/sundance.c3
-rw-r--r--drivers/net/tg3.c11
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/tulip/interrupt.c2
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/usb/Kconfig338
-rw-r--r--drivers/net/usb/Makefile23
-rw-r--r--drivers/net/usb/asix.c1490
-rw-r--r--drivers/net/usb/catc.c963
-rw-r--r--drivers/net/usb/cdc_ether.c570
-rw-r--r--drivers/net/usb/cdc_subset.c344
-rw-r--r--drivers/net/usb/dm9601.c619
-rw-r--r--drivers/net/usb/gl620a.c245
-rw-r--r--drivers/net/usb/kaweth.c1337
-rw-r--r--drivers/net/usb/kawethfw.h557
-rw-r--r--drivers/net/usb/mcs7830.c534
-rw-r--r--drivers/net/usb/net1080.c615
-rw-r--r--drivers/net/usb/pegasus.c1504
-rw-r--r--drivers/net/usb/pegasus.h307
-rw-r--r--drivers/net/usb/plusb.c150
-rw-r--r--drivers/net/usb/rndis_host.c727
-rw-r--r--drivers/net/usb/rtl8150.c1004
-rw-r--r--drivers/net/usb/usbnet.c1304
-rw-r--r--drivers/net/usb/usbnet.h200
-rw-r--r--drivers/net/usb/zaurus.c385
-rw-r--r--drivers/net/wireless/airport.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h18
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c4
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c81
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h19
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c4
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c4
-rw-r--r--drivers/net/yellowfin.c1
78 files changed, 19732 insertions, 177 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 9588da3a30e7..127f60841b10 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -95,8 +95,7 @@ static int max_interrupt_work = 10;
95#include <asm/io.h> 95#include <asm/io.h>
96#include <asm/irq.h> 96#include <asm/irq.h>
97 97
98static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; 98static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
99static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n";
100 99
101#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA)) 100#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA))
102#define EL3_SUSPEND 101#define EL3_SUSPEND
@@ -360,7 +359,7 @@ static int __init el3_common_init(struct net_device *dev)
360 printk(", IRQ %d.\n", dev->irq); 359 printk(", IRQ %d.\n", dev->irq);
361 360
362 if (el3_debug > 0) 361 if (el3_debug > 0)
363 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 362 printk(KERN_INFO "%s", version);
364 return 0; 363 return 0;
365 364
366} 365}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80924f76dee8..f26ca331615e 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -103,7 +103,7 @@ static int vortex_debug = 1;
103 103
104 104
105static char version[] __devinitdata = 105static char version[] __devinitdata =
106DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n"; 106DRV_NAME ": Donald Becker and others.\n";
107 107
108MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 108MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); 109MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b86ccd2ecd5b..fa489b10c38c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2493,12 +2493,28 @@ config PASEMI_MAC
2493 This driver supports the on-chip 1/10Gbit Ethernet controller on 2493 This driver supports the on-chip 1/10Gbit Ethernet controller on
2494 PA Semi's PWRficient line of chips. 2494 PA Semi's PWRficient line of chips.
2495 2495
2496config MLX4_CORE
2497 tristate
2498 depends on PCI
2499 default n
2500
2501config MLX4_DEBUG
2502 bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
2503 default y
2504 ---help---
2505 This option causes debugging code to be compiled into the
2506 mlx4_core driver. The output can be turned on via the
2507 debug_level module parameter (which can also be set after
2508 the driver is loaded through sysfs).
2509
2496endmenu 2510endmenu
2497 2511
2498source "drivers/net/tokenring/Kconfig" 2512source "drivers/net/tokenring/Kconfig"
2499 2513
2500source "drivers/net/wireless/Kconfig" 2514source "drivers/net/wireless/Kconfig"
2501 2515
2516source "drivers/net/usb/Kconfig"
2517
2502source "drivers/net/pcmcia/Kconfig" 2518source "drivers/net/pcmcia/Kconfig"
2503 2519
2504source "drivers/net/wan/Kconfig" 2520source "drivers/net/wan/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 59c0459a037c..a77affa4f6e6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -197,6 +197,7 @@ obj-$(CONFIG_SMC911X) += smc911x.o
197obj-$(CONFIG_DM9000) += dm9000.o 197obj-$(CONFIG_DM9000) += dm9000.o
198obj-$(CONFIG_FEC_8XX) += fec_8xx/ 198obj-$(CONFIG_FEC_8XX) += fec_8xx/
199obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o 199obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
200obj-$(CONFIG_MLX4_CORE) += mlx4/
200 201
201obj-$(CONFIG_MACB) += macb.o 202obj-$(CONFIG_MACB) += macb.o
202 203
@@ -206,6 +207,14 @@ obj-$(CONFIG_TR) += tokenring/
206obj-$(CONFIG_WAN) += wan/ 207obj-$(CONFIG_WAN) += wan/
207obj-$(CONFIG_ARCNET) += arcnet/ 208obj-$(CONFIG_ARCNET) += arcnet/
208obj-$(CONFIG_NET_PCMCIA) += pcmcia/ 209obj-$(CONFIG_NET_PCMCIA) += pcmcia/
210
211obj-$(CONFIG_USB_CATC) += usb/
212obj-$(CONFIG_USB_KAWETH) += usb/
213obj-$(CONFIG_USB_PEGASUS) += usb/
214obj-$(CONFIG_USB_RTL8150) += usb/
215obj-$(CONFIG_USB_USBNET) += usb/
216obj-$(CONFIG_USB_ZD1201) += usb/
217
209obj-y += wireless/ 218obj-y += wireless/
210obj-$(CONFIG_NET_TULIP) += tulip/ 219obj-$(CONFIG_NET_TULIP) += tulip/
211obj-$(CONFIG_HAMRADIO) += hamradio/ 220obj-$(CONFIG_HAMRADIO) += hamradio/
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index d28f88bbdd5f..78cf00ff3d38 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -2038,6 +2038,15 @@ static int atl1_close(struct net_device *netdev)
2038 return 0; 2038 return 0;
2039} 2039}
2040 2040
2041#ifdef CONFIG_NET_POLL_CONTROLLER
2042static void atl1_poll_controller(struct net_device *netdev)
2043{
2044 disable_irq(netdev->irq);
2045 atl1_intr(netdev->irq, netdev);
2046 enable_irq(netdev->irq);
2047}
2048#endif
2049
2041/* 2050/*
2042 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT 2051 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
2043 * will assert. We do soft reset <0x1400=1> according 2052 * will assert. We do soft reset <0x1400=1> according
@@ -2190,6 +2199,9 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2190 netdev->do_ioctl = &atl1_ioctl; 2199 netdev->do_ioctl = &atl1_ioctl;
2191 netdev->tx_timeout = &atl1_tx_timeout; 2200 netdev->tx_timeout = &atl1_tx_timeout;
2192 netdev->watchdog_timeo = 5 * HZ; 2201 netdev->watchdog_timeo = 5 * HZ;
2202#ifdef CONFIG_NET_POLL_CONTROLLER
2203 netdev->poll_controller = atl1_poll_controller;
2204#endif
2193 netdev->vlan_rx_register = atl1_vlan_rx_register; 2205 netdev->vlan_rx_register = atl1_vlan_rx_register;
2194 netdev->vlan_rx_add_vid = atl1_vlan_rx_add_vid; 2206 netdev->vlan_rx_add_vid = atl1_vlan_rx_add_vid;
2195 netdev->vlan_rx_kill_vid = atl1_vlan_rx_kill_vid; 2207 netdev->vlan_rx_kill_vid = atl1_vlan_rx_kill_vid;
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 18aba838c1ff..82d78ff8399b 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -31,10 +31,8 @@
31 31
32*/ 32*/
33 33
34static const char versionA[] = 34static const char version[] =
35"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n"; 35"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n";
36static const char versionB[] =
37" http://www.scyld.com/network/atp.html\n";
38 36
39/* The user-configurable values. 37/* The user-configurable values.
40 These may be modified when a driver module is loaded.*/ 38 These may be modified when a driver module is loaded.*/
@@ -324,7 +322,7 @@ static int __init atp_probe1(long ioaddr)
324 322
325#ifndef MODULE 323#ifndef MODULE
326 if (net_debug) 324 if (net_debug)
327 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 325 printk(KERN_INFO "%s", version);
328#endif 326#endif
329 327
330 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM " 328 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM "
@@ -926,7 +924,7 @@ static void set_rx_mode_8012(struct net_device *dev)
926 924
927static int __init atp_init_module(void) { 925static int __init atp_init_module(void) {
928 if (debug) /* Emit version even if no cards detected. */ 926 if (debug) /* Emit version even if no cards detected. */
929 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 927 printk(KERN_INFO "%s", version);
930 return atp_init(); 928 return atp_init();
931} 929}
932 930
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 724bce51f936..223517dcbcfd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3461,7 +3461,7 @@ void bond_unregister_arp(struct bonding *bond)
3461/*---------------------------- Hashing Policies -----------------------------*/ 3461/*---------------------------- Hashing Policies -----------------------------*/
3462 3462
3463/* 3463/*
3464 * Hash for the the output device based upon layer 3 and layer 4 data. If 3464 * Hash for the output device based upon layer 3 and layer 4 data. If
3465 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is 3465 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
3466 * altogether not IP, mimic bond_xmit_hash_policy_l2() 3466 * altogether not IP, mimic bond_xmit_hash_policy_l2()
3467 */ 3467 */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 3a03a74c0609..637ae8f68791 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1214,7 +1214,7 @@ e1000_remove(struct pci_dev *pdev)
1214 int i; 1214 int i;
1215#endif 1215#endif
1216 1216
1217 flush_scheduled_work(); 1217 cancel_work_sync(&adapter->reset_task);
1218 1218
1219 e1000_release_manageability(adapter); 1219 e1000_release_manageability(adapter);
1220 1220
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 39654e1e2bed..47680237f783 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1126,7 +1126,7 @@ static void eepro_tx_timeout (struct net_device *dev)
1126 printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name, 1126 printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name,
1127 "network cable problem"); 1127 "network cable problem");
1128 /* This is not a duplicate. One message for the console, 1128 /* This is not a duplicate. One message for the console,
1129 one for the the log file */ 1129 one for the log file */
1130 printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name, 1130 printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name,
1131 "network cable problem"); 1131 "network cable problem");
1132 eepro_complete_selreset(ioaddr); 1132 eepro_complete_selreset(ioaddr);
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 6c267c38df97..9800341956a2 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -28,7 +28,7 @@
28*/ 28*/
29 29
30static const char * const version = 30static const char * const version =
31"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n" 31"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"; 32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33 33
34/* A few user-configurable values that apply to all boards. 34/* A few user-configurable values that apply to all boards.
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 4e3f14c9c717..5e517946f46a 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -93,8 +93,6 @@ static int rx_copybreak;
93static char version[] __devinitdata = 93static char version[] __devinitdata =
94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n"; 94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95static char version2[] __devinitdata = 95static char version2[] __devinitdata =
96" http://www.scyld.com/network/epic100.html\n";
97static char version3[] __devinitdata =
98" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; 96" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
99 97
100MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
@@ -323,8 +321,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
323#ifndef MODULE 321#ifndef MODULE
324 static int printed_version; 322 static int printed_version;
325 if (!printed_version++) 323 if (!printed_version++)
326 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", 324 printk (KERN_INFO "%s" KERN_INFO "%s",
327 version, version2, version3); 325 version, version2);
328#endif 326#endif
329 327
330 card_idx++; 328 card_idx++;
@@ -1596,8 +1594,8 @@ static int __init epic_init (void)
1596{ 1594{
1597/* when a module, this is printed whether or not devices are found in probe */ 1595/* when a module, this is printed whether or not devices are found in probe */
1598#ifdef MODULE 1596#ifdef MODULE
1599 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", 1597 printk (KERN_INFO "%s" KERN_INFO "%s",
1600 version, version2, version3); 1598 version, version2);
1601#endif 1599#endif
1602 1600
1603 return pci_register_driver(&epic_driver); 1601 return pci_register_driver(&epic_driver);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 6e90619b3b41..36d2c7d4f4d0 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -140,7 +140,7 @@ config BAYCOM_SER_HDX
140 modems that connect to a serial interface. The driver supports the 140 modems that connect to a serial interface. The driver supports the
141 ser12 design in half-duplex mode. This is the old driver. It is 141 ser12 design in half-duplex mode. This is the old driver. It is
142 still provided in case your serial interface chip does not work with 142 still provided in case your serial interface chip does not work with
143 the full-duplex driver. This driver is depreciated. To configure 143 the full-duplex driver. This driver is deprecated. To configure
144 the driver, use the sethdlc utility available in the standard ax25 144 the driver, use the sethdlc utility available in the standard ax25
145 utilities package. For information on the modems, see 145 utilities package. For information on the modems, see
146 <http://www.baycom.de/> and 146 <http://www.baycom.de/> and
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 2ab173d9a0e4..1e67720f1066 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -113,7 +113,7 @@
113/* RxOver overflow in Recv FIFO */ 113/* RxOver overflow in Recv FIFO */
114/* SipRcv received serial gap (or other condition you set) */ 114/* SipRcv received serial gap (or other condition you set) */
115/* Interrupts are enabled by writing a one to the IER register */ 115/* Interrupts are enabled by writing a one to the IER register */
116/* Interrupts are cleared by writting a one to the ISR register */ 116/* Interrupts are cleared by writing a one to the ISR register */
117/* */ 117/* */
118/* 6. The remaining registers: 0x6 and 0x3 appear to be */ 118/* 6. The remaining registers: 0x6 and 0x3 appear to be */
119/* reserved parts of 16 or 32 bit registersthe remainder */ 119/* reserved parts of 16 or 32 bit registersthe remainder */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index f15aebde7b90..52c99d01d568 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -315,7 +315,7 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
315 * hw - Struct containing variables accessed by shared code 315 * hw - Struct containing variables accessed by shared code
316 * 316 *
317 * Reads the first 64 16 bit words of the EEPROM and sums the values read. 317 * Reads the first 64 16 bit words of the EEPROM and sums the values read.
318 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is 318 * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
319 * valid. 319 * valid.
320 * 320 *
321 * Returns: 321 * Returns:
diff --git a/drivers/net/meth.h b/drivers/net/meth.h
index 84960dae2a22..ea3b8fc86d1e 100644
--- a/drivers/net/meth.h
+++ b/drivers/net/meth.h
@@ -126,7 +126,7 @@ typedef struct rx_packet {
126 /* Note: when loopback is set this bit becomes collision control. Setting this bit will */ 126 /* Note: when loopback is set this bit becomes collision control. Setting this bit will */
127 /* cause a collision to be reported. */ 127 /* cause a collision to be reported. */
128 128
129 /* Bits 5 and 6 are used to determine the the Destination address filter mode */ 129 /* Bits 5 and 6 are used to determine the Destination address filter mode */
130#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */ 130#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */
131#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */ 131#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */
132#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */ 132#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
new file mode 100644
index 000000000000..0952a6528f58
--- /dev/null
+++ b/drivers/net/mlx4/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
2
3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
4 mr.o pd.o profile.o qp.o reset.o srq.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
new file mode 100644
index 000000000000..9ffdb9d29da9
--- /dev/null
+++ b/drivers/net/mlx4/alloc.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/errno.h>
34#include <linux/slab.h>
35#include <linux/bitmap.h>
36
37#include "mlx4.h"
38
39u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
40{
41 u32 obj;
42
43 spin_lock(&bitmap->lock);
44
45 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
46 if (obj >= bitmap->max) {
47 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
48 obj = find_first_zero_bit(bitmap->table, bitmap->max);
49 }
50
51 if (obj < bitmap->max) {
52 set_bit(obj, bitmap->table);
53 obj |= bitmap->top;
54 bitmap->last = obj + 1;
55 } else
56 obj = -1;
57
58 spin_unlock(&bitmap->lock);
59
60 return obj;
61}
62
63void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
64{
65 obj &= bitmap->max - 1;
66
67 spin_lock(&bitmap->lock);
68 clear_bit(obj, bitmap->table);
69 bitmap->last = min(bitmap->last, obj);
70 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
71 spin_unlock(&bitmap->lock);
72}
73
74int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved)
75{
76 int i;
77
78 /* num must be a power of 2 */
79 if (num != roundup_pow_of_two(num))
80 return -EINVAL;
81
82 bitmap->last = 0;
83 bitmap->top = 0;
84 bitmap->max = num;
85 bitmap->mask = mask;
86 spin_lock_init(&bitmap->lock);
87 bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL);
88 if (!bitmap->table)
89 return -ENOMEM;
90
91 for (i = 0; i < reserved; ++i)
92 set_bit(i, bitmap->table);
93
94 return 0;
95}
96
97void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
98{
99 kfree(bitmap->table);
100}
101
102/*
103 * Handling for queue buffers -- we allocate a bunch of memory and
104 * register it in a memory region at HCA virtual address 0. If the
105 * requested size is > max_direct, we split the allocation into
106 * multiple pages, so we don't require too much contiguous memory.
107 */
108
109int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
110 struct mlx4_buf *buf)
111{
112 dma_addr_t t;
113
114 if (size <= max_direct) {
115 buf->nbufs = 1;
116 buf->npages = 1;
117 buf->page_shift = get_order(size) + PAGE_SHIFT;
118 buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
119 size, &t, GFP_KERNEL);
120 if (!buf->u.direct.buf)
121 return -ENOMEM;
122
123 buf->u.direct.map = t;
124
125 while (t & ((1 << buf->page_shift) - 1)) {
126 --buf->page_shift;
127 buf->npages *= 2;
128 }
129
130 memset(buf->u.direct.buf, 0, size);
131 } else {
132 int i;
133
134 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
135 buf->npages = buf->nbufs;
136 buf->page_shift = PAGE_SHIFT;
137 buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list,
138 GFP_KERNEL);
139 if (!buf->u.page_list)
140 return -ENOMEM;
141
142 for (i = 0; i < buf->nbufs; ++i) {
143 buf->u.page_list[i].buf =
144 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
145 &t, GFP_KERNEL);
146 if (!buf->u.page_list[i].buf)
147 goto err_free;
148
149 buf->u.page_list[i].map = t;
150
151 memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
152 }
153 }
154
155 return 0;
156
157err_free:
158 mlx4_buf_free(dev, size, buf);
159
160 return -ENOMEM;
161}
162EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
163
164void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
165{
166 int i;
167
168 if (buf->nbufs == 1)
169 dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
170 buf->u.direct.map);
171 else {
172 for (i = 0; i < buf->nbufs; ++i)
173 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
174 buf->u.page_list[i].buf,
175 buf->u.page_list[i].map);
176 kfree(buf->u.page_list);
177 }
178}
179EXPORT_SYMBOL_GPL(mlx4_buf_free);
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
new file mode 100644
index 000000000000..1bb088aeaf71
--- /dev/null
+++ b/drivers/net/mlx4/catas.c
@@ -0,0 +1,70 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "mlx4.h"
34
35void mlx4_handle_catas_err(struct mlx4_dev *dev)
36{
37 struct mlx4_priv *priv = mlx4_priv(dev);
38
39 int i;
40
41 mlx4_err(dev, "Catastrophic error detected:\n");
42 for (i = 0; i < priv->fw.catas_size; ++i)
43 mlx4_err(dev, " buf[%02x]: %08x\n",
44 i, swab32(readl(priv->catas_err.map + i)));
45
46 mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
47}
48
49void mlx4_map_catas_buf(struct mlx4_dev *dev)
50{
51 struct mlx4_priv *priv = mlx4_priv(dev);
52 unsigned long addr;
53
54 addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
55 priv->fw.catas_offset;
56
57 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
58 if (!priv->catas_err.map)
59 mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n",
60 addr);
61
62}
63
64void mlx4_unmap_catas_buf(struct mlx4_dev *dev)
65{
66 struct mlx4_priv *priv = mlx4_priv(dev);
67
68 if (priv->catas_err.map)
69 iounmap(priv->catas_err.map);
70}
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
new file mode 100644
index 000000000000..c1f81a993f5d
--- /dev/null
+++ b/drivers/net/mlx4/cmd.c
@@ -0,0 +1,429 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/errno.h>
38
39#include <linux/mlx4/cmd.h>
40
41#include <asm/io.h>
42
43#include "mlx4.h"
44
45#define CMD_POLL_TOKEN 0xffff
46
47enum {
48 /* command completed successfully: */
49 CMD_STAT_OK = 0x00,
50 /* Internal error (such as a bus error) occurred while processing command: */
51 CMD_STAT_INTERNAL_ERR = 0x01,
52 /* Operation/command not supported or opcode modifier not supported: */
53 CMD_STAT_BAD_OP = 0x02,
54 /* Parameter not supported or parameter out of range: */
55 CMD_STAT_BAD_PARAM = 0x03,
56 /* System not enabled or bad system state: */
57 CMD_STAT_BAD_SYS_STATE = 0x04,
58 /* Attempt to access reserved or unallocaterd resource: */
59 CMD_STAT_BAD_RESOURCE = 0x05,
60 /* Requested resource is currently executing a command, or is otherwise busy: */
61 CMD_STAT_RESOURCE_BUSY = 0x06,
62 /* Required capability exceeds device limits: */
63 CMD_STAT_EXCEED_LIM = 0x08,
64 /* Resource is not in the appropriate state or ownership: */
65 CMD_STAT_BAD_RES_STATE = 0x09,
66 /* Index out of range: */
67 CMD_STAT_BAD_INDEX = 0x0a,
68 /* FW image corrupted: */
69 CMD_STAT_BAD_NVMEM = 0x0b,
70 /* Attempt to modify a QP/EE which is not in the presumed state: */
71 CMD_STAT_BAD_QP_STATE = 0x10,
72 /* Bad segment parameters (Address/Size): */
73 CMD_STAT_BAD_SEG_PARAM = 0x20,
74 /* Memory Region has Memory Windows bound to: */
75 CMD_STAT_REG_BOUND = 0x21,
76 /* HCA local attached memory not present: */
77 CMD_STAT_LAM_NOT_PRE = 0x22,
78 /* Bad management packet (silently discarded): */
79 CMD_STAT_BAD_PKT = 0x30,
80 /* More outstanding CQEs in CQ than new CQ size: */
81 CMD_STAT_BAD_SIZE = 0x40
82};
83
84enum {
85 HCR_IN_PARAM_OFFSET = 0x00,
86 HCR_IN_MODIFIER_OFFSET = 0x08,
87 HCR_OUT_PARAM_OFFSET = 0x0c,
88 HCR_TOKEN_OFFSET = 0x14,
89 HCR_STATUS_OFFSET = 0x18,
90
91 HCR_OPMOD_SHIFT = 12,
92 HCR_T_BIT = 21,
93 HCR_E_BIT = 22,
94 HCR_GO_BIT = 23
95};
96
97enum {
98 GO_BIT_TIMEOUT = 10000
99};
100
101struct mlx4_cmd_context {
102 struct completion done;
103 int result;
104 int next;
105 u64 out_param;
106 u16 token;
107};
108
109static int mlx4_status_to_errno(u8 status) {
110 static const int trans_table[] = {
111 [CMD_STAT_INTERNAL_ERR] = -EIO,
112 [CMD_STAT_BAD_OP] = -EPERM,
113 [CMD_STAT_BAD_PARAM] = -EINVAL,
114 [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
115 [CMD_STAT_BAD_RESOURCE] = -EBADF,
116 [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
117 [CMD_STAT_EXCEED_LIM] = -ENOMEM,
118 [CMD_STAT_BAD_RES_STATE] = -EBADF,
119 [CMD_STAT_BAD_INDEX] = -EBADF,
120 [CMD_STAT_BAD_NVMEM] = -EFAULT,
121 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
122 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
123 [CMD_STAT_REG_BOUND] = -EBUSY,
124 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
125 [CMD_STAT_BAD_PKT] = -EINVAL,
126 [CMD_STAT_BAD_SIZE] = -ENOMEM,
127 };
128
129 if (status >= ARRAY_SIZE(trans_table) ||
130 (status != CMD_STAT_OK && trans_table[status] == 0))
131 return -EIO;
132
133 return trans_table[status];
134}
135
136static int cmd_pending(struct mlx4_dev *dev)
137{
138 u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
139
140 return (status & swab32(1 << HCR_GO_BIT)) ||
141 (mlx4_priv(dev)->cmd.toggle ==
142 !!(status & swab32(1 << HCR_T_BIT)));
143}
144
145static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
146 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
147 int event)
148{
149 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
150 u32 __iomem *hcr = cmd->hcr;
151 int ret = -EAGAIN;
152 unsigned long end;
153
154 mutex_lock(&cmd->hcr_mutex);
155
156 end = jiffies;
157 if (event)
158 end += HZ * 10;
159
160 while (cmd_pending(dev)) {
161 if (time_after_eq(jiffies, end))
162 goto out;
163 cond_resched();
164 }
165
166 /*
167 * We use writel (instead of something like memcpy_toio)
168 * because writes of less than 32 bits to the HCR don't work
169 * (and some architectures such as ia64 implement memcpy_toio
170 * in terms of writeb).
171 */
172 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
173 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
174 __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
175 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
176 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
177 __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
178
179 /* __raw_writel may not order writes. */
180 wmb();
181
182 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
183 (cmd->toggle << HCR_T_BIT) |
184 (event ? (1 << HCR_E_BIT) : 0) |
185 (op_modifier << HCR_OPMOD_SHIFT) |
186 op), hcr + 6);
187 cmd->toggle = cmd->toggle ^ 1;
188
189 ret = 0;
190
191out:
192 mutex_unlock(&cmd->hcr_mutex);
193 return ret;
194}
195
196static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
197 int out_is_imm, u32 in_modifier, u8 op_modifier,
198 u16 op, unsigned long timeout)
199{
200 struct mlx4_priv *priv = mlx4_priv(dev);
201 void __iomem *hcr = priv->cmd.hcr;
202 int err = 0;
203 unsigned long end;
204
205 down(&priv->cmd.poll_sem);
206
207 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
208 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
209 if (err)
210 goto out;
211
212 end = msecs_to_jiffies(timeout) + jiffies;
213 while (cmd_pending(dev) && time_before(jiffies, end))
214 cond_resched();
215
216 if (cmd_pending(dev)) {
217 err = -ETIMEDOUT;
218 goto out;
219 }
220
221 if (out_is_imm)
222 *out_param =
223 (u64) be32_to_cpu((__force __be32)
224 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
225 (u64) be32_to_cpu((__force __be32)
226 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
227
228 err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
229 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
230
231out:
232 up(&priv->cmd.poll_sem);
233 return err;
234}
235
236void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
237{
238 struct mlx4_priv *priv = mlx4_priv(dev);
239 struct mlx4_cmd_context *context =
240 &priv->cmd.context[token & priv->cmd.token_mask];
241
242 /* previously timed out command completing at long last */
243 if (token != context->token)
244 return;
245
246 context->result = mlx4_status_to_errno(status);
247 context->out_param = out_param;
248
249 context->token += priv->cmd.token_mask + 1;
250
251 complete(&context->done);
252}
253
254static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
255 int out_is_imm, u32 in_modifier, u8 op_modifier,
256 u16 op, unsigned long timeout)
257{
258 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
259 struct mlx4_cmd_context *context;
260 int err = 0;
261
262 down(&cmd->event_sem);
263
264 spin_lock(&cmd->context_lock);
265 BUG_ON(cmd->free_head < 0);
266 context = &cmd->context[cmd->free_head];
267 cmd->free_head = context->next;
268 spin_unlock(&cmd->context_lock);
269
270 init_completion(&context->done);
271
272 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
273 in_modifier, op_modifier, op, context->token, 1);
274
275 if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
276 err = -EBUSY;
277 goto out;
278 }
279
280 err = context->result;
281 if (err)
282 goto out;
283
284 if (out_is_imm)
285 *out_param = context->out_param;
286
287out:
288 spin_lock(&cmd->context_lock);
289 context->next = cmd->free_head;
290 cmd->free_head = context - cmd->context;
291 spin_unlock(&cmd->context_lock);
292
293 up(&cmd->event_sem);
294 return err;
295}
296
297int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
298 int out_is_imm, u32 in_modifier, u8 op_modifier,
299 u16 op, unsigned long timeout)
300{
301 if (mlx4_priv(dev)->cmd.use_events)
302 return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
303 in_modifier, op_modifier, op, timeout);
304 else
305 return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
306 in_modifier, op_modifier, op, timeout);
307}
308EXPORT_SYMBOL_GPL(__mlx4_cmd);
309
310int mlx4_cmd_init(struct mlx4_dev *dev)
311{
312 struct mlx4_priv *priv = mlx4_priv(dev);
313
314 mutex_init(&priv->cmd.hcr_mutex);
315 sema_init(&priv->cmd.poll_sem, 1);
316 priv->cmd.use_events = 0;
317 priv->cmd.toggle = 1;
318
319 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
320 MLX4_HCR_SIZE);
321 if (!priv->cmd.hcr) {
322 mlx4_err(dev, "Couldn't map command register.");
323 return -ENOMEM;
324 }
325
326 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
327 MLX4_MAILBOX_SIZE,
328 MLX4_MAILBOX_SIZE, 0);
329 if (!priv->cmd.pool) {
330 iounmap(priv->cmd.hcr);
331 return -ENOMEM;
332 }
333
334 return 0;
335}
336
337void mlx4_cmd_cleanup(struct mlx4_dev *dev)
338{
339 struct mlx4_priv *priv = mlx4_priv(dev);
340
341 pci_pool_destroy(priv->cmd.pool);
342 iounmap(priv->cmd.hcr);
343}
344
345/*
346 * Switch to using events to issue FW commands (can only be called
347 * after event queue for command events has been initialized).
348 */
349int mlx4_cmd_use_events(struct mlx4_dev *dev)
350{
351 struct mlx4_priv *priv = mlx4_priv(dev);
352 int i;
353
354 priv->cmd.context = kmalloc(priv->cmd.max_cmds *
355 sizeof (struct mlx4_cmd_context),
356 GFP_KERNEL);
357 if (!priv->cmd.context)
358 return -ENOMEM;
359
360 for (i = 0; i < priv->cmd.max_cmds; ++i) {
361 priv->cmd.context[i].token = i;
362 priv->cmd.context[i].next = i + 1;
363 }
364
365 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
366 priv->cmd.free_head = 0;
367
368 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
369 spin_lock_init(&priv->cmd.context_lock);
370
371 for (priv->cmd.token_mask = 1;
372 priv->cmd.token_mask < priv->cmd.max_cmds;
373 priv->cmd.token_mask <<= 1)
374 ; /* nothing */
375 --priv->cmd.token_mask;
376
377 priv->cmd.use_events = 1;
378
379 down(&priv->cmd.poll_sem);
380
381 return 0;
382}
383
384/*
385 * Switch back to polling (used when shutting down the device)
386 */
387void mlx4_cmd_use_polling(struct mlx4_dev *dev)
388{
389 struct mlx4_priv *priv = mlx4_priv(dev);
390 int i;
391
392 priv->cmd.use_events = 0;
393
394 for (i = 0; i < priv->cmd.max_cmds; ++i)
395 down(&priv->cmd.event_sem);
396
397 kfree(priv->cmd.context);
398
399 up(&priv->cmd.poll_sem);
400}
401
402struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
403{
404 struct mlx4_cmd_mailbox *mailbox;
405
406 mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
407 if (!mailbox)
408 return ERR_PTR(-ENOMEM);
409
410 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
411 &mailbox->dma);
412 if (!mailbox->buf) {
413 kfree(mailbox);
414 return ERR_PTR(-ENOMEM);
415 }
416
417 return mailbox;
418}
419EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
420
421void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
422{
423 if (!mailbox)
424 return;
425
426 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
427 kfree(mailbox);
428}
429EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
new file mode 100644
index 000000000000..437d78ad0912
--- /dev/null
+++ b/drivers/net/mlx4/cq.c
@@ -0,0 +1,254 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#include <linux/init.h>
38#include <linux/hardirq.h>
39
40#include <linux/mlx4/cmd.h>
41
42#include "mlx4.h"
43#include "icm.h"
44
45struct mlx4_cq_context {
46 __be32 flags;
47 u16 reserved1[3];
48 __be16 page_offset;
49 __be32 logsize_usrpage;
50 u8 reserved2;
51 u8 cq_period;
52 u8 reserved3;
53 u8 cq_max_count;
54 u8 reserved4[3];
55 u8 comp_eqn;
56 u8 log_page_size;
57 u8 reserved5[2];
58 u8 mtt_base_addr_h;
59 __be32 mtt_base_addr_l;
60 __be32 last_notified_index;
61 __be32 solicit_producer_index;
62 __be32 consumer_index;
63 __be32 producer_index;
64 u8 reserved6[2];
65 __be64 db_rec_addr;
66};
67
68#define MLX4_CQ_STATUS_OK ( 0 << 28)
69#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
70#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
71#define MLX4_CQ_FLAG_CC ( 1 << 18)
72#define MLX4_CQ_FLAG_OI ( 1 << 17)
73#define MLX4_CQ_STATE_ARMED ( 9 << 8)
74#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
75#define MLX4_EQ_STATE_FIRED (10 << 8)
76
77void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
78{
79 struct mlx4_cq *cq;
80
81 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
82 cqn & (dev->caps.num_cqs - 1));
83 if (!cq) {
84 mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
85 return;
86 }
87
88 ++cq->arm_sn;
89
90 cq->comp(cq);
91}
92
93void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
94{
95 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
96 struct mlx4_cq *cq;
97
98 spin_lock(&cq_table->lock);
99
100 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
101 if (cq)
102 atomic_inc(&cq->refcount);
103
104 spin_unlock(&cq_table->lock);
105
106 if (!cq) {
107 mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
108 return;
109 }
110
111 cq->event(cq, event_type);
112
113 if (atomic_dec_and_test(&cq->refcount))
114 complete(&cq->free);
115}
116
117static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
118 int cq_num)
119{
120 return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
121 MLX4_CMD_TIME_CLASS_A);
122}
123
124static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
125 int cq_num)
126{
127 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
128 mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
129 MLX4_CMD_TIME_CLASS_A);
130}
131
132int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
133 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq)
134{
135 struct mlx4_priv *priv = mlx4_priv(dev);
136 struct mlx4_cq_table *cq_table = &priv->cq_table;
137 struct mlx4_cmd_mailbox *mailbox;
138 struct mlx4_cq_context *cq_context;
139 u64 mtt_addr;
140 int err;
141
142 cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
143 if (cq->cqn == -1)
144 return -ENOMEM;
145
146 err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
147 if (err)
148 goto err_out;
149
150 err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
151 if (err)
152 goto err_put;
153
154 spin_lock_irq(&cq_table->lock);
155 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
156 spin_unlock_irq(&cq_table->lock);
157 if (err)
158 goto err_cmpt_put;
159
160 mailbox = mlx4_alloc_cmd_mailbox(dev);
161 if (IS_ERR(mailbox)) {
162 err = PTR_ERR(mailbox);
163 goto err_radix;
164 }
165
166 cq_context = mailbox->buf;
167 memset(cq_context, 0, sizeof *cq_context);
168
169 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
170 cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn;
171 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
172
173 mtt_addr = mlx4_mtt_addr(dev, mtt);
174 cq_context->mtt_base_addr_h = mtt_addr >> 32;
175 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
176 cq_context->db_rec_addr = cpu_to_be64(db_rec);
177
178 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
179 mlx4_free_cmd_mailbox(dev, mailbox);
180 if (err)
181 goto err_radix;
182
183 cq->cons_index = 0;
184 cq->arm_sn = 1;
185 cq->uar = uar;
186 atomic_set(&cq->refcount, 1);
187 init_completion(&cq->free);
188
189 return 0;
190
191err_radix:
192 spin_lock_irq(&cq_table->lock);
193 radix_tree_delete(&cq_table->tree, cq->cqn);
194 spin_unlock_irq(&cq_table->lock);
195
196err_cmpt_put:
197 mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
198
199err_put:
200 mlx4_table_put(dev, &cq_table->table, cq->cqn);
201
202err_out:
203 mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
204
205 return err;
206}
207EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
208
209void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
210{
211 struct mlx4_priv *priv = mlx4_priv(dev);
212 struct mlx4_cq_table *cq_table = &priv->cq_table;
213 int err;
214
215 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
216 if (err)
217 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
218
219 synchronize_irq(priv->eq_table.eq[MLX4_EQ_COMP].irq);
220
221 spin_lock_irq(&cq_table->lock);
222 radix_tree_delete(&cq_table->tree, cq->cqn);
223 spin_unlock_irq(&cq_table->lock);
224
225 if (atomic_dec_and_test(&cq->refcount))
226 complete(&cq->free);
227 wait_for_completion(&cq->free);
228
229 mlx4_table_put(dev, &cq_table->table, cq->cqn);
230 mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
231}
232EXPORT_SYMBOL_GPL(mlx4_cq_free);
233
234int __devinit mlx4_init_cq_table(struct mlx4_dev *dev)
235{
236 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
237 int err;
238
239 spin_lock_init(&cq_table->lock);
240 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
241
242 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
243 dev->caps.num_cqs - 1, dev->caps.reserved_cqs);
244 if (err)
245 return err;
246
247 return 0;
248}
249
250void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
251{
252 /* Nothing to do to clean up radix_tree */
253 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
254}
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
new file mode 100644
index 000000000000..acf1c801a1b8
--- /dev/null
+++ b/drivers/net/mlx4/eq.c
@@ -0,0 +1,696 @@
1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/init.h>
35#include <linux/interrupt.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40#include "fw.h"
41
42enum {
43 MLX4_NUM_ASYNC_EQE = 0x100,
44 MLX4_NUM_SPARE_EQE = 0x80,
45 MLX4_EQ_ENTRY_SIZE = 0x20
46};
47
48/*
49 * Must be packed because start is 64 bits but only aligned to 32 bits.
50 */
51struct mlx4_eq_context {
52 __be32 flags;
53 u16 reserved1[3];
54 __be16 page_offset;
55 u8 log_eq_size;
56 u8 reserved2[4];
57 u8 eq_period;
58 u8 reserved3;
59 u8 eq_max_count;
60 u8 reserved4[3];
61 u8 intr;
62 u8 log_page_size;
63 u8 reserved5[2];
64 u8 mtt_base_addr_h;
65 __be32 mtt_base_addr_l;
66 u32 reserved6[2];
67 __be32 consumer_index;
68 __be32 producer_index;
69 u32 reserved7[4];
70};
71
72#define MLX4_EQ_STATUS_OK ( 0 << 28)
73#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
74#define MLX4_EQ_OWNER_SW ( 0 << 24)
75#define MLX4_EQ_OWNER_HW ( 1 << 24)
76#define MLX4_EQ_FLAG_EC ( 1 << 18)
77#define MLX4_EQ_FLAG_OI ( 1 << 17)
78#define MLX4_EQ_STATE_ARMED ( 9 << 8)
79#define MLX4_EQ_STATE_FIRED (10 << 8)
80#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
81
82#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
83 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
84 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
85 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
86 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
87 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
88 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
89 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
90 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
91 (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
92 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
93 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
94 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
95 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
96 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
97 (1ull << MLX4_EVENT_TYPE_CMD))
98#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
99
100struct mlx4_eqe {
101 u8 reserved1;
102 u8 type;
103 u8 reserved2;
104 u8 subtype;
105 union {
106 u32 raw[6];
107 struct {
108 __be32 cqn;
109 } __attribute__((packed)) comp;
110 struct {
111 u16 reserved1;
112 __be16 token;
113 u32 reserved2;
114 u8 reserved3[3];
115 u8 status;
116 __be64 out_param;
117 } __attribute__((packed)) cmd;
118 struct {
119 __be32 qpn;
120 } __attribute__((packed)) qp;
121 struct {
122 __be32 srqn;
123 } __attribute__((packed)) srq;
124 struct {
125 __be32 cqn;
126 u32 reserved1;
127 u8 reserved2[3];
128 u8 syndrome;
129 } __attribute__((packed)) cq_err;
130 struct {
131 u32 reserved1[2];
132 __be32 port;
133 } __attribute__((packed)) port_change;
134 } event;
135 u8 reserved3[3];
136 u8 owner;
137} __attribute__((packed));
138
139static void eq_set_ci(struct mlx4_eq *eq, int req_not)
140{
141 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
142 req_not << 31),
143 eq->doorbell);
144 /* We still want ordering, just not swabbing, so add a barrier */
145 mb();
146}
147
148static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
149{
150 unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
151 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
152}
153
154static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
155{
156 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
157 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
158}
159
160static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
161{
162 struct mlx4_eqe *eqe;
163 int cqn;
164 int eqes_found = 0;
165 int set_ci = 0;
166
167 while ((eqe = next_eqe_sw(eq))) {
168 /*
169 * Make sure we read EQ entry contents after we've
170 * checked the ownership bit.
171 */
172 rmb();
173
174 switch (eqe->type) {
175 case MLX4_EVENT_TYPE_COMP:
176 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
177 mlx4_cq_completion(dev, cqn);
178 break;
179
180 case MLX4_EVENT_TYPE_PATH_MIG:
181 case MLX4_EVENT_TYPE_COMM_EST:
182 case MLX4_EVENT_TYPE_SQ_DRAINED:
183 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
184 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
185 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
186 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
187 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
188 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
189 eqe->type);
190 break;
191
192 case MLX4_EVENT_TYPE_SRQ_LIMIT:
193 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
194 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
195 eqe->type);
196 break;
197
198 case MLX4_EVENT_TYPE_CMD:
199 mlx4_cmd_event(dev,
200 be16_to_cpu(eqe->event.cmd.token),
201 eqe->event.cmd.status,
202 be64_to_cpu(eqe->event.cmd.out_param));
203 break;
204
205 case MLX4_EVENT_TYPE_PORT_CHANGE:
206 mlx4_dispatch_event(dev, eqe->type, eqe->subtype,
207 be32_to_cpu(eqe->event.port_change.port) >> 28);
208 break;
209
210 case MLX4_EVENT_TYPE_CQ_ERROR:
211 mlx4_warn(dev, "CQ %s on CQN %06x\n",
212 eqe->event.cq_err.syndrome == 1 ?
213 "overrun" : "access violation",
214 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
215 mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
216 eqe->type);
217 break;
218
219 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
220 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
221 break;
222
223 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
224 case MLX4_EVENT_TYPE_ECC_DETECT:
225 default:
226 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
227 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
228 break;
229 };
230
231 ++eq->cons_index;
232 eqes_found = 1;
233 ++set_ci;
234
235 /*
236 * The HCA will think the queue has overflowed if we
237 * don't tell it we've been processing events. We
238 * create our EQs with MLX4_NUM_SPARE_EQE extra
239 * entries, so we must update our consumer index at
240 * least that often.
241 */
242 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
243 /*
244 * Conditional on hca_type is OK here because
245 * this is a rare case, not the fast path.
246 */
247 eq_set_ci(eq, 0);
248 set_ci = 0;
249 }
250 }
251
252 eq_set_ci(eq, 1);
253
254 return eqes_found;
255}
256
257static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
258{
259 struct mlx4_dev *dev = dev_ptr;
260 struct mlx4_priv *priv = mlx4_priv(dev);
261 int work = 0;
262 int i;
263
264 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
265
266 for (i = 0; i < MLX4_EQ_CATAS; ++i)
267 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
268
269 return IRQ_RETVAL(work);
270}
271
272static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
273{
274 struct mlx4_eq *eq = eq_ptr;
275 struct mlx4_dev *dev = eq->dev;
276
277 mlx4_eq_int(dev, eq);
278
279 /* MSI-X vectors always belong to us */
280 return IRQ_HANDLED;
281}
282
283static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
284{
285 mlx4_handle_catas_err(dev_ptr);
286
287 /* MSI-X vectors always belong to us */
288 return IRQ_HANDLED;
289}
290
291static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
292 int eq_num)
293{
294 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
295 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
296}
297
298static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
299 int eq_num)
300{
301 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
302 MLX4_CMD_TIME_CLASS_A);
303}
304
305static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
306 int eq_num)
307{
308 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
309 MLX4_CMD_TIME_CLASS_A);
310}
311
312static void __devinit __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev,
313 struct mlx4_eq *eq)
314{
315 struct mlx4_priv *priv = mlx4_priv(dev);
316 int index;
317
318 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
319
320 if (!priv->eq_table.uar_map[index]) {
321 priv->eq_table.uar_map[index] =
322 ioremap(pci_resource_start(dev->pdev, 2) +
323 ((eq->eqn / 4) << PAGE_SHIFT),
324 PAGE_SIZE);
325 if (!priv->eq_table.uar_map[index]) {
326 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
327 eq->eqn);
328 return NULL;
329 }
330 }
331
332 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
333}
334
335static int __devinit mlx4_create_eq(struct mlx4_dev *dev, int nent,
336 u8 intr, struct mlx4_eq *eq)
337{
338 struct mlx4_priv *priv = mlx4_priv(dev);
339 struct mlx4_cmd_mailbox *mailbox;
340 struct mlx4_eq_context *eq_context;
341 int npages;
342 u64 *dma_list = NULL;
343 dma_addr_t t;
344 u64 mtt_addr;
345 int err = -ENOMEM;
346 int i;
347
348 eq->dev = dev;
349 eq->nent = roundup_pow_of_two(max(nent, 2));
350 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
351
352 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
353 GFP_KERNEL);
354 if (!eq->page_list)
355 goto err_out;
356
357 for (i = 0; i < npages; ++i)
358 eq->page_list[i].buf = NULL;
359
360 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
361 if (!dma_list)
362 goto err_out_free;
363
364 mailbox = mlx4_alloc_cmd_mailbox(dev);
365 if (IS_ERR(mailbox))
366 goto err_out_free;
367 eq_context = mailbox->buf;
368
369 for (i = 0; i < npages; ++i) {
370 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
371 PAGE_SIZE, &t, GFP_KERNEL);
372 if (!eq->page_list[i].buf)
373 goto err_out_free_pages;
374
375 dma_list[i] = t;
376 eq->page_list[i].map = t;
377
378 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
379 }
380
381 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
382 if (eq->eqn == -1)
383 goto err_out_free_pages;
384
385 eq->doorbell = mlx4_get_eq_uar(dev, eq);
386 if (!eq->doorbell) {
387 err = -ENOMEM;
388 goto err_out_free_eq;
389 }
390
391 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
392 if (err)
393 goto err_out_free_eq;
394
395 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
396 if (err)
397 goto err_out_free_mtt;
398
399 memset(eq_context, 0, sizeof *eq_context);
400 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
401 MLX4_EQ_STATE_ARMED);
402 eq_context->log_eq_size = ilog2(eq->nent);
403 eq_context->intr = intr;
404 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
405
406 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
407 eq_context->mtt_base_addr_h = mtt_addr >> 32;
408 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
409
410 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
411 if (err) {
412 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
413 goto err_out_free_mtt;
414 }
415
416 kfree(dma_list);
417 mlx4_free_cmd_mailbox(dev, mailbox);
418
419 eq->cons_index = 0;
420
421 return err;
422
423err_out_free_mtt:
424 mlx4_mtt_cleanup(dev, &eq->mtt);
425
426err_out_free_eq:
427 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
428
429err_out_free_pages:
430 for (i = 0; i < npages; ++i)
431 if (eq->page_list[i].buf)
432 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
433 eq->page_list[i].buf,
434 eq->page_list[i].map);
435
436 mlx4_free_cmd_mailbox(dev, mailbox);
437
438err_out_free:
439 kfree(eq->page_list);
440 kfree(dma_list);
441
442err_out:
443 return err;
444}
445
446static void mlx4_free_eq(struct mlx4_dev *dev,
447 struct mlx4_eq *eq)
448{
449 struct mlx4_priv *priv = mlx4_priv(dev);
450 struct mlx4_cmd_mailbox *mailbox;
451 int err;
452 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
453 int i;
454
455 mailbox = mlx4_alloc_cmd_mailbox(dev);
456 if (IS_ERR(mailbox))
457 return;
458
459 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
460 if (err)
461 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
462
463 if (0) {
464 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
465 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
466 if (i % 4 == 0)
467 printk("[%02x] ", i * 4);
468 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
469 if ((i + 1) % 4 == 0)
470 printk("\n");
471 }
472 }
473
474 mlx4_mtt_cleanup(dev, &eq->mtt);
475 for (i = 0; i < npages; ++i)
476 pci_free_consistent(dev->pdev, PAGE_SIZE,
477 eq->page_list[i].buf,
478 eq->page_list[i].map);
479
480 kfree(eq->page_list);
481 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
482 mlx4_free_cmd_mailbox(dev, mailbox);
483}
484
485static void mlx4_free_irqs(struct mlx4_dev *dev)
486{
487 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
488 int i;
489
490 if (eq_table->have_irq)
491 free_irq(dev->pdev->irq, dev);
492 for (i = 0; i < MLX4_NUM_EQ; ++i)
493 if (eq_table->eq[i].have_irq)
494 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
495}
496
497static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
498{
499 struct mlx4_priv *priv = mlx4_priv(dev);
500
501 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
502 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
503 if (!priv->clr_base) {
504 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
505 return -ENOMEM;
506 }
507
508 return 0;
509}
510
511static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
512{
513 struct mlx4_priv *priv = mlx4_priv(dev);
514
515 iounmap(priv->clr_base);
516}
517
518int __devinit mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
519{
520 struct mlx4_priv *priv = mlx4_priv(dev);
521 int ret;
522
523 /*
524 * We assume that mapping one page is enough for the whole EQ
525 * context table. This is fine with all current HCAs, because
526 * we only use 32 EQs and each EQ uses 64 bytes of context
527 * memory, or 1 KB total.
528 */
529 priv->eq_table.icm_virt = icm_virt;
530 priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
531 if (!priv->eq_table.icm_page)
532 return -ENOMEM;
533 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
534 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
535 if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
536 __free_page(priv->eq_table.icm_page);
537 return -ENOMEM;
538 }
539
540 ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
541 if (ret) {
542 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
543 PCI_DMA_BIDIRECTIONAL);
544 __free_page(priv->eq_table.icm_page);
545 }
546
547 return ret;
548}
549
550void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
551{
552 struct mlx4_priv *priv = mlx4_priv(dev);
553
554 mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
555 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
556 PCI_DMA_BIDIRECTIONAL);
557 __free_page(priv->eq_table.icm_page);
558}
559
560int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
561{
562 struct mlx4_priv *priv = mlx4_priv(dev);
563 int err;
564 int i;
565
566 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
567 dev->caps.num_eqs - 1, dev->caps.reserved_eqs);
568 if (err)
569 return err;
570
571 for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
572 priv->eq_table.uar_map[i] = NULL;
573
574 err = mlx4_map_clr_int(dev);
575 if (err)
576 goto err_out_free;
577
578 priv->eq_table.clr_mask =
579 swab32(1 << (priv->eq_table.inta_pin & 31));
580 priv->eq_table.clr_int = priv->clr_base +
581 (priv->eq_table.inta_pin < 32 ? 4 : 0);
582
583 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
584 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
585 &priv->eq_table.eq[MLX4_EQ_COMP]);
586 if (err)
587 goto err_out_unmap;
588
589 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
590 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
591 &priv->eq_table.eq[MLX4_EQ_ASYNC]);
592 if (err)
593 goto err_out_comp;
594
595 if (dev->flags & MLX4_FLAG_MSI_X) {
596 static const char *eq_name[] = {
597 [MLX4_EQ_COMP] = DRV_NAME " (comp)",
598 [MLX4_EQ_ASYNC] = DRV_NAME " (async)",
599 [MLX4_EQ_CATAS] = DRV_NAME " (catas)"
600 };
601
602 err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
603 &priv->eq_table.eq[MLX4_EQ_CATAS]);
604 if (err)
605 goto err_out_async;
606
607 for (i = 0; i < MLX4_EQ_CATAS; ++i) {
608 err = request_irq(priv->eq_table.eq[i].irq,
609 mlx4_msi_x_interrupt,
610 0, eq_name[i], priv->eq_table.eq + i);
611 if (err)
612 goto err_out_catas;
613
614 priv->eq_table.eq[i].have_irq = 1;
615 }
616
617 err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
618 mlx4_catas_interrupt, 0,
619 eq_name[MLX4_EQ_CATAS], dev);
620 if (err)
621 goto err_out_catas;
622
623 priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
624 } else {
625 err = request_irq(dev->pdev->irq, mlx4_interrupt,
626 SA_SHIRQ, DRV_NAME, dev);
627 if (err)
628 goto err_out_async;
629
630 priv->eq_table.have_irq = 1;
631 }
632
633 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
634 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
635 if (err)
636 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
637 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
638
639 for (i = 0; i < MLX4_EQ_CATAS; ++i)
640 eq_set_ci(&priv->eq_table.eq[i], 1);
641
642 if (dev->flags & MLX4_FLAG_MSI_X) {
643 err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
644 priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
645 if (err)
646 mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
647 priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
648 }
649
650 return 0;
651
652err_out_catas:
653 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
654
655err_out_async:
656 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
657
658err_out_comp:
659 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]);
660
661err_out_unmap:
662 mlx4_unmap_clr_int(dev);
663 mlx4_free_irqs(dev);
664
665err_out_free:
666 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
667 return err;
668}
669
670void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
671{
672 struct mlx4_priv *priv = mlx4_priv(dev);
673 int i;
674
675 if (dev->flags & MLX4_FLAG_MSI_X)
676 mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
677 priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
678
679 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
680 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
681
682 mlx4_free_irqs(dev);
683
684 for (i = 0; i < MLX4_EQ_CATAS; ++i)
685 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
686 if (dev->flags & MLX4_FLAG_MSI_X)
687 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
688
689 mlx4_unmap_clr_int(dev);
690
691 for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
692 if (priv->eq_table.uar_map[i])
693 iounmap(priv->eq_table.uar_map[i]);
694
695 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
696}
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
new file mode 100644
index 000000000000..c42717313663
--- /dev/null
+++ b/drivers/net/mlx4/fw.c
@@ -0,0 +1,775 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/mlx4/cmd.h>
36
37#include "fw.h"
38#include "icm.h"
39
40extern void __buggy_use_of_MLX4_GET(void);
41extern void __buggy_use_of_MLX4_PUT(void);
42
43#define MLX4_GET(dest, source, offset) \
44 do { \
45 void *__p = (char *) (source) + (offset); \
46 switch (sizeof (dest)) { \
47 case 1: (dest) = *(u8 *) __p; break; \
48 case 2: (dest) = be16_to_cpup(__p); break; \
49 case 4: (dest) = be32_to_cpup(__p); break; \
50 case 8: (dest) = be64_to_cpup(__p); break; \
51 default: __buggy_use_of_MLX4_GET(); \
52 } \
53 } while (0)
54
55#define MLX4_PUT(dest, source, offset) \
56 do { \
57 void *__d = ((char *) (dest) + (offset)); \
58 switch (sizeof(source)) { \
59 case 1: *(u8 *) __d = (source); break; \
60 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
61 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
62 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
63 default: __buggy_use_of_MLX4_PUT(); \
64 } \
65 } while (0)
66
67static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
68{
69 static const char *fname[] = {
70 [ 0] = "RC transport",
71 [ 1] = "UC transport",
72 [ 2] = "UD transport",
73 [ 3] = "SRC transport",
74 [ 4] = "reliable multicast",
75 [ 5] = "FCoIB support",
76 [ 6] = "SRQ support",
77 [ 7] = "IPoIB checksum offload",
78 [ 8] = "P_Key violation counter",
79 [ 9] = "Q_Key violation counter",
80 [10] = "VMM",
81 [16] = "MW support",
82 [17] = "APM support",
83 [18] = "Atomic ops support",
84 [19] = "Raw multicast support",
85 [20] = "Address vector port checking support",
86 [21] = "UD multicast support",
87 [24] = "Demand paging support",
88 [25] = "Router support"
89 };
90 int i;
91
92 mlx4_dbg(dev, "DEV_CAP flags:\n");
93 for (i = 0; i < 32; ++i)
94 if (fname[i] && (flags & (1 << i)))
95 mlx4_dbg(dev, " %s\n", fname[i]);
96}
97
98int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
99{
100 struct mlx4_cmd_mailbox *mailbox;
101 u32 *outbox;
102 u8 field;
103 u16 size;
104 u16 stat_rate;
105 int err;
106
107#define QUERY_DEV_CAP_OUT_SIZE 0x100
108#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
109#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
110#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
111#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
112#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
113#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
114#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
115#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
116#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
117#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
118#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
119#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
120#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
121#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
122#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
123#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
124#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
125#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
126#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
127#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
128#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
129#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
130#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
131#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
132#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
133#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
134#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
135#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
136#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
137#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
138#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
139#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
140#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
141#define QUERY_DEV_CAP_BF_OFFSET 0x4c
142#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
143#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
144#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
145#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
146#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
147#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
148#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
149#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
150#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
151#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
152#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
153#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
154#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
155#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
156#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
157#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
158#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
159#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
160#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
161#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
162#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
163#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
164#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97
165#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
166#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
167
168 mailbox = mlx4_alloc_cmd_mailbox(dev);
169 if (IS_ERR(mailbox))
170 return PTR_ERR(mailbox);
171 outbox = mailbox->buf;
172
173 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
174 MLX4_CMD_TIME_CLASS_A);
175
176 if (err)
177 goto out;
178
179 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
180 dev_cap->reserved_qps = 1 << (field & 0xf);
181 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
182 dev_cap->max_qps = 1 << (field & 0x1f);
183 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
184 dev_cap->reserved_srqs = 1 << (field >> 4);
185 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
186 dev_cap->max_srqs = 1 << (field & 0x1f);
187 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
188 dev_cap->max_cq_sz = 1 << field;
189 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
190 dev_cap->reserved_cqs = 1 << (field & 0xf);
191 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
192 dev_cap->max_cqs = 1 << (field & 0x1f);
193 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
194 dev_cap->max_mpts = 1 << (field & 0x3f);
195 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
196 dev_cap->reserved_eqs = 1 << (field & 0xf);
197 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
198 dev_cap->max_eqs = 1 << (field & 0x7);
199 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
200 dev_cap->reserved_mtts = 1 << (field >> 4);
201 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
202 dev_cap->max_mrw_sz = 1 << field;
203 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
204 dev_cap->reserved_mrws = 1 << (field & 0xf);
205 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
206 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
207 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
208 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
209 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
210 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
211 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
212 dev_cap->max_rdma_global = 1 << (field & 0x3f);
213 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
214 dev_cap->local_ca_ack_delay = field & 0x1f;
215 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
216 dev_cap->max_mtu = field >> 4;
217 dev_cap->max_port_width = field & 0xf;
218 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
219 dev_cap->max_vl = field >> 4;
220 dev_cap->num_ports = field & 0xf;
221 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
222 dev_cap->max_gids = 1 << (field & 0xf);
223 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
224 dev_cap->stat_rate_support = stat_rate;
225 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
226 dev_cap->max_pkeys = 1 << (field & 0xf);
227 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
228 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
229 dev_cap->reserved_uars = field >> 4;
230 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
231 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
232 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
233 dev_cap->min_page_sz = 1 << field;
234
235 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
236 if (field & 0x80) {
237 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
238 dev_cap->bf_reg_size = 1 << (field & 0x1f);
239 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
240 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
241 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
242 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
243 } else {
244 dev_cap->bf_reg_size = 0;
245 mlx4_dbg(dev, "BlueFlame not available\n");
246 }
247
248 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
249 dev_cap->max_sq_sg = field;
250 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
251 dev_cap->max_sq_desc_sz = size;
252
253 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
254 dev_cap->max_qp_per_mcg = 1 << field;
255 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
256 dev_cap->reserved_mgms = field & 0xf;
257 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
258 dev_cap->max_mcgs = 1 << field;
259 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
260 dev_cap->reserved_pds = field >> 4;
261 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
262 dev_cap->max_pds = 1 << (field & 0x3f);
263
264 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
265 dev_cap->rdmarc_entry_sz = size;
266 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
267 dev_cap->qpc_entry_sz = size;
268 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
269 dev_cap->aux_entry_sz = size;
270 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
271 dev_cap->altc_entry_sz = size;
272 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
273 dev_cap->eqc_entry_sz = size;
274 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
275 dev_cap->cqc_entry_sz = size;
276 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
277 dev_cap->srq_entry_sz = size;
278 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
279 dev_cap->cmpt_entry_sz = size;
280 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
281 dev_cap->mtt_entry_sz = size;
282 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
283 dev_cap->dmpt_entry_sz = size;
284
285 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
286 dev_cap->max_srq_sz = 1 << field;
287 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
288 dev_cap->max_qp_sz = 1 << field;
289 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
290 dev_cap->resize_srq = field & 1;
291 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
292 dev_cap->max_rq_sg = field;
293 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
294 dev_cap->max_rq_desc_sz = size;
295
296 MLX4_GET(dev_cap->bmme_flags, outbox,
297 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
298 MLX4_GET(dev_cap->reserved_lkey, outbox,
299 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
300 MLX4_GET(dev_cap->max_icm_sz, outbox,
301 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
302
303 if (dev_cap->bmme_flags & 1)
304 mlx4_dbg(dev, "Base MM extensions: yes "
305 "(flags %d, rsvd L_Key %08x)\n",
306 dev_cap->bmme_flags, dev_cap->reserved_lkey);
307 else
308 mlx4_dbg(dev, "Base MM extensions: no\n");
309
310 /*
311 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
312 * we can't use any EQs whose doorbell falls on that page,
313 * even if the EQ itself isn't reserved.
314 */
315 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
316 dev_cap->reserved_eqs);
317
318 mlx4_dbg(dev, "Max ICM size %lld MB\n",
319 (unsigned long long) dev_cap->max_icm_sz >> 20);
320 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
321 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
322 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
323 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
324 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
325 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
326 mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
327 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
328 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
329 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
330 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
331 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
332 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
333 dev_cap->max_pds, dev_cap->reserved_mgms);
334 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
335 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
336 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
337 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu,
338 dev_cap->max_port_width);
339 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
340 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
341 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
342 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
343
344 dump_dev_cap_flags(dev, dev_cap->flags);
345
346out:
347 mlx4_free_cmd_mailbox(dev, mailbox);
348 return err;
349}
350
351int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
352{
353 struct mlx4_cmd_mailbox *mailbox;
354 struct mlx4_icm_iter iter;
355 __be64 *pages;
356 int lg;
357 int nent = 0;
358 int i;
359 int err = 0;
360 int ts = 0, tc = 0;
361
362 mailbox = mlx4_alloc_cmd_mailbox(dev);
363 if (IS_ERR(mailbox))
364 return PTR_ERR(mailbox);
365 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
366 pages = mailbox->buf;
367
368 for (mlx4_icm_first(icm, &iter);
369 !mlx4_icm_last(&iter);
370 mlx4_icm_next(&iter)) {
371 /*
372 * We have to pass pages that are aligned to their
373 * size, so find the least significant 1 in the
374 * address or size and use that as our log2 size.
375 */
376 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
377 if (lg < MLX4_ICM_PAGE_SHIFT) {
378 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
379 MLX4_ICM_PAGE_SIZE,
380 (unsigned long long) mlx4_icm_addr(&iter),
381 mlx4_icm_size(&iter));
382 err = -EINVAL;
383 goto out;
384 }
385
386 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
387 if (virt != -1) {
388 pages[nent * 2] = cpu_to_be64(virt);
389 virt += 1 << lg;
390 }
391
392 pages[nent * 2 + 1] =
393 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
394 (lg - MLX4_ICM_PAGE_SHIFT));
395 ts += 1 << (lg - 10);
396 ++tc;
397
398 if (++nent == MLX4_MAILBOX_SIZE / 16) {
399 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
400 MLX4_CMD_TIME_CLASS_B);
401 if (err)
402 goto out;
403 nent = 0;
404 }
405 }
406 }
407
408 if (nent)
409 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
410 if (err)
411 goto out;
412
413 switch (op) {
414 case MLX4_CMD_MAP_FA:
415 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
416 break;
417 case MLX4_CMD_MAP_ICM_AUX:
418 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
419 break;
420 case MLX4_CMD_MAP_ICM:
421 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
422 tc, ts, (unsigned long long) virt - (ts << 10));
423 break;
424 }
425
426out:
427 mlx4_free_cmd_mailbox(dev, mailbox);
428 return err;
429}
430
431int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
432{
433 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
434}
435
436int mlx4_UNMAP_FA(struct mlx4_dev *dev)
437{
438 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
439}
440
441
442int mlx4_RUN_FW(struct mlx4_dev *dev)
443{
444 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
445}
446
447int mlx4_QUERY_FW(struct mlx4_dev *dev)
448{
449 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
450 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
451 struct mlx4_cmd_mailbox *mailbox;
452 u32 *outbox;
453 int err = 0;
454 u64 fw_ver;
455 u8 lg;
456
457#define QUERY_FW_OUT_SIZE 0x100
458#define QUERY_FW_VER_OFFSET 0x00
459#define QUERY_FW_MAX_CMD_OFFSET 0x0f
460#define QUERY_FW_ERR_START_OFFSET 0x30
461#define QUERY_FW_ERR_SIZE_OFFSET 0x38
462#define QUERY_FW_ERR_BAR_OFFSET 0x3c
463
464#define QUERY_FW_SIZE_OFFSET 0x00
465#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
466#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
467
468 mailbox = mlx4_alloc_cmd_mailbox(dev);
469 if (IS_ERR(mailbox))
470 return PTR_ERR(mailbox);
471 outbox = mailbox->buf;
472
473 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
474 MLX4_CMD_TIME_CLASS_A);
475 if (err)
476 goto out;
477
478 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
479 /*
480 * FW subminor version is at more signifant bits than minor
481 * version, so swap here.
482 */
483 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
484 ((fw_ver & 0xffff0000ull) >> 16) |
485 ((fw_ver & 0x0000ffffull) << 16);
486
487 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
488 cmd->max_cmds = 1 << lg;
489
490 mlx4_dbg(dev, "FW version %d.%d.%03d, max commands %d\n",
491 (int) (dev->caps.fw_ver >> 32),
492 (int) (dev->caps.fw_ver >> 16) & 0xffff,
493 (int) dev->caps.fw_ver & 0xffff,
494 cmd->max_cmds);
495
496 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
497 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
498 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
499 fw->catas_bar = (fw->catas_bar >> 6) * 2;
500
501 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
502 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
503
504 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
505 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
506 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
507 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
508
509 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
510
511 /*
512 * Round up number of system pages needed in case
513 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
514 */
515 fw->fw_pages =
516 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
517 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
518
519 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
520 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
521
522out:
523 mlx4_free_cmd_mailbox(dev, mailbox);
524 return err;
525}
526
527static void get_board_id(void *vsd, char *board_id)
528{
529 int i;
530
531#define VSD_OFFSET_SIG1 0x00
532#define VSD_OFFSET_SIG2 0xde
533#define VSD_OFFSET_MLX_BOARD_ID 0xd0
534#define VSD_OFFSET_TS_BOARD_ID 0x20
535
536#define VSD_SIGNATURE_TOPSPIN 0x5ad
537
538 memset(board_id, 0, MLX4_BOARD_ID_LEN);
539
540 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
541 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
542 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
543 } else {
544 /*
545 * The board ID is a string but the firmware byte
546 * swaps each 4-byte word before passing it back to
547 * us. Therefore we need to swab it before printing.
548 */
549 for (i = 0; i < 4; ++i)
550 ((u32 *) board_id)[i] =
551 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
552 }
553}
554
555int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
556{
557 struct mlx4_cmd_mailbox *mailbox;
558 u32 *outbox;
559 int err;
560
561#define QUERY_ADAPTER_OUT_SIZE 0x100
562#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00
563#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
564#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
565#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
566#define QUERY_ADAPTER_VSD_OFFSET 0x20
567
568 mailbox = mlx4_alloc_cmd_mailbox(dev);
569 if (IS_ERR(mailbox))
570 return PTR_ERR(mailbox);
571 outbox = mailbox->buf;
572
573 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
574 MLX4_CMD_TIME_CLASS_A);
575 if (err)
576 goto out;
577
578 MLX4_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
579 MLX4_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
580 MLX4_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
581 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
582
583 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
584 adapter->board_id);
585
586out:
587 mlx4_free_cmd_mailbox(dev, mailbox);
588 return err;
589}
590
591int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
592{
593 struct mlx4_cmd_mailbox *mailbox;
594 __be32 *inbox;
595 int err;
596
597#define INIT_HCA_IN_SIZE 0x200
598#define INIT_HCA_VERSION_OFFSET 0x000
599#define INIT_HCA_VERSION 2
600#define INIT_HCA_FLAGS_OFFSET 0x014
601#define INIT_HCA_QPC_OFFSET 0x020
602#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
603#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
604#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
605#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
606#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
607#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
608#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
609#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
610#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
611#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
612#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
613#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
614#define INIT_HCA_MCAST_OFFSET 0x0c0
615#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
616#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
617#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
618#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
619#define INIT_HCA_TPT_OFFSET 0x0f0
620#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
621#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
622#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
623#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
624#define INIT_HCA_UAR_OFFSET 0x120
625#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
626#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
627
628 mailbox = mlx4_alloc_cmd_mailbox(dev);
629 if (IS_ERR(mailbox))
630 return PTR_ERR(mailbox);
631 inbox = mailbox->buf;
632
633 memset(inbox, 0, INIT_HCA_IN_SIZE);
634
635 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
636
637#if defined(__LITTLE_ENDIAN)
638 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
639#elif defined(__BIG_ENDIAN)
640 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
641#else
642#error Host endianness not defined
643#endif
644 /* Check port for UD address vector: */
645 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
646
647 /* QPC/EEC/CQC/EQC/RDMARC attributes */
648
649 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
650 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
651 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
652 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
653 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
654 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
655 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
656 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
657 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
658 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
659 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
660 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
661
662 /* multicast attributes */
663
664 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
665 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
666 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
667 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
668
669 /* TPT attributes */
670
671 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
672 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
673 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
674 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
675
676 /* UAR attributes */
677
678 MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
679 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
680
681 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 1000);
682
683 if (err)
684 mlx4_err(dev, "INIT_HCA returns %d\n", err);
685
686 mlx4_free_cmd_mailbox(dev, mailbox);
687 return err;
688}
689
690int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int port)
691{
692 struct mlx4_cmd_mailbox *mailbox;
693 u32 *inbox;
694 int err;
695 u32 flags;
696
697#define INIT_PORT_IN_SIZE 256
698#define INIT_PORT_FLAGS_OFFSET 0x00
699#define INIT_PORT_FLAG_SIG (1 << 18)
700#define INIT_PORT_FLAG_NG (1 << 17)
701#define INIT_PORT_FLAG_G0 (1 << 16)
702#define INIT_PORT_VL_SHIFT 4
703#define INIT_PORT_PORT_WIDTH_SHIFT 8
704#define INIT_PORT_MTU_OFFSET 0x04
705#define INIT_PORT_MAX_GID_OFFSET 0x06
706#define INIT_PORT_MAX_PKEY_OFFSET 0x0a
707#define INIT_PORT_GUID0_OFFSET 0x10
708#define INIT_PORT_NODE_GUID_OFFSET 0x18
709#define INIT_PORT_SI_GUID_OFFSET 0x20
710
711 mailbox = mlx4_alloc_cmd_mailbox(dev);
712 if (IS_ERR(mailbox))
713 return PTR_ERR(mailbox);
714 inbox = mailbox->buf;
715
716 memset(inbox, 0, INIT_PORT_IN_SIZE);
717
718 flags = 0;
719 flags |= param->set_guid0 ? INIT_PORT_FLAG_G0 : 0;
720 flags |= param->set_node_guid ? INIT_PORT_FLAG_NG : 0;
721 flags |= param->set_si_guid ? INIT_PORT_FLAG_SIG : 0;
722 flags |= (param->vl_cap & 0xf) << INIT_PORT_VL_SHIFT;
723 flags |= (param->port_width_cap & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
724 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
725
726 MLX4_PUT(inbox, param->mtu, INIT_PORT_MTU_OFFSET);
727 MLX4_PUT(inbox, param->max_gid, INIT_PORT_MAX_GID_OFFSET);
728 MLX4_PUT(inbox, param->max_pkey, INIT_PORT_MAX_PKEY_OFFSET);
729 MLX4_PUT(inbox, param->guid0, INIT_PORT_GUID0_OFFSET);
730 MLX4_PUT(inbox, param->node_guid, INIT_PORT_NODE_GUID_OFFSET);
731 MLX4_PUT(inbox, param->si_guid, INIT_PORT_SI_GUID_OFFSET);
732
733 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
734 MLX4_CMD_TIME_CLASS_A);
735
736 mlx4_free_cmd_mailbox(dev, mailbox);
737
738 return err;
739}
740EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
741
742int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
743{
744 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
745}
746EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
747
748int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
749{
750 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
751}
752
753int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
754{
755 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
756 MLX4_CMD_SET_ICM_SIZE,
757 MLX4_CMD_TIME_CLASS_A);
758 if (ret)
759 return ret;
760
761 /*
762 * Round up number of system pages needed in case
763 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
764 */
765 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
766 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
767
768 return 0;
769}
770
771int mlx4_NOP(struct mlx4_dev *dev)
772{
773 /* Input modifier of 0x1f means "finish as soon as possible." */
774 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
775}
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
new file mode 100644
index 000000000000..2616fa53d4d0
--- /dev/null
+++ b/drivers/net/mlx4/fw.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef MLX4_FW_H
36#define MLX4_FW_H
37
38#include "mlx4.h"
39#include "icm.h"
40
41struct mlx4_dev_cap {
42 int max_srq_sz;
43 int max_qp_sz;
44 int reserved_qps;
45 int max_qps;
46 int reserved_srqs;
47 int max_srqs;
48 int max_cq_sz;
49 int reserved_cqs;
50 int max_cqs;
51 int max_mpts;
52 int reserved_eqs;
53 int max_eqs;
54 int reserved_mtts;
55 int max_mrw_sz;
56 int reserved_mrws;
57 int max_mtt_seg;
58 int max_requester_per_qp;
59 int max_responder_per_qp;
60 int max_rdma_global;
61 int local_ca_ack_delay;
62 int max_mtu;
63 int max_port_width;
64 int max_vl;
65 int num_ports;
66 int max_gids;
67 u16 stat_rate_support;
68 int max_pkeys;
69 u32 flags;
70 int reserved_uars;
71 int uar_size;
72 int min_page_sz;
73 int bf_reg_size;
74 int bf_regs_per_page;
75 int max_sq_sg;
76 int max_sq_desc_sz;
77 int max_rq_sg;
78 int max_rq_desc_sz;
79 int max_qp_per_mcg;
80 int reserved_mgms;
81 int max_mcgs;
82 int reserved_pds;
83 int max_pds;
84 int qpc_entry_sz;
85 int rdmarc_entry_sz;
86 int altc_entry_sz;
87 int aux_entry_sz;
88 int srq_entry_sz;
89 int cqc_entry_sz;
90 int eqc_entry_sz;
91 int dmpt_entry_sz;
92 int cmpt_entry_sz;
93 int mtt_entry_sz;
94 int resize_srq;
95 u8 bmme_flags;
96 u32 reserved_lkey;
97 u64 max_icm_sz;
98};
99
100struct mlx4_adapter {
101 u32 vendor_id;
102 u32 device_id;
103 u32 revision_id;
104 char board_id[MLX4_BOARD_ID_LEN];
105 u8 inta_pin;
106};
107
108struct mlx4_init_hca_param {
109 u64 qpc_base;
110 u64 rdmarc_base;
111 u64 auxc_base;
112 u64 altc_base;
113 u64 srqc_base;
114 u64 cqc_base;
115 u64 eqc_base;
116 u64 mc_base;
117 u64 dmpt_base;
118 u64 cmpt_base;
119 u64 mtt_base;
120 u16 log_mc_entry_sz;
121 u16 log_mc_hash_sz;
122 u8 log_num_qps;
123 u8 log_num_srqs;
124 u8 log_num_cqs;
125 u8 log_num_eqs;
126 u8 log_rd_per_qp;
127 u8 log_mc_table_sz;
128 u8 log_mpt_sz;
129 u8 log_uar_sz;
130};
131
132struct mlx4_init_ib_param {
133 int port_width;
134 int vl_cap;
135 int mtu_cap;
136 u16 gid_cap;
137 u16 pkey_cap;
138 int set_guid0;
139 u64 guid0;
140 int set_node_guid;
141 u64 node_guid;
142 int set_si_guid;
143 u64 si_guid;
144};
145
146struct mlx4_set_ib_param {
147 int set_si_guid;
148 int reset_qkey_viol;
149 u64 si_guid;
150 u32 cap_mask;
151};
152
153int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
154int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
155int mlx4_UNMAP_FA(struct mlx4_dev *dev);
156int mlx4_RUN_FW(struct mlx4_dev *dev);
157int mlx4_QUERY_FW(struct mlx4_dev *dev);
158int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
159int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
160int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
161int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
162int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
163int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
164int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
165int mlx4_NOP(struct mlx4_dev *dev);
166
167#endif /* MLX4_FW_H */
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
new file mode 100644
index 000000000000..e96feaed6ed4
--- /dev/null
+++ b/drivers/net/mlx4/icm.c
@@ -0,0 +1,379 @@
1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/init.h>
35#include <linux/errno.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40#include "icm.h"
41#include "fw.h"
42
43/*
44 * We allocate in as big chunks as we can, up to a maximum of 256 KB
45 * per chunk.
46 */
47enum {
48 MLX4_ICM_ALLOC_SIZE = 1 << 18,
49 MLX4_TABLE_CHUNK_SIZE = 1 << 18
50};
51
52void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm)
53{
54 struct mlx4_icm_chunk *chunk, *tmp;
55 int i;
56
57 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
58 if (chunk->nsg > 0)
59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
60 PCI_DMA_BIDIRECTIONAL);
61
62 for (i = 0; i < chunk->npages; ++i)
63 __free_pages(chunk->mem[i].page,
64 get_order(chunk->mem[i].length));
65
66 kfree(chunk);
67 }
68
69 kfree(icm);
70}
71
72struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
73 gfp_t gfp_mask)
74{
75 struct mlx4_icm *icm;
76 struct mlx4_icm_chunk *chunk = NULL;
77 int cur_order;
78
79 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
80 if (!icm)
81 return icm;
82
83 icm->refcount = 0;
84 INIT_LIST_HEAD(&icm->chunk_list);
85
86 cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
87
88 while (npages > 0) {
89 if (!chunk) {
90 chunk = kmalloc(sizeof *chunk,
91 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
92 if (!chunk)
93 goto fail;
94
95 chunk->npages = 0;
96 chunk->nsg = 0;
97 list_add_tail(&chunk->list, &icm->chunk_list);
98 }
99
100 while (1 << cur_order > npages)
101 --cur_order;
102
103 chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
104 if (chunk->mem[chunk->npages].page) {
105 chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
106 chunk->mem[chunk->npages].offset = 0;
107
108 if (++chunk->npages == MLX4_ICM_CHUNK_LEN) {
109 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
110 chunk->npages,
111 PCI_DMA_BIDIRECTIONAL);
112
113 if (chunk->nsg <= 0)
114 goto fail;
115
116 chunk = NULL;
117 }
118
119 npages -= 1 << cur_order;
120 } else {
121 --cur_order;
122 if (cur_order < 0)
123 goto fail;
124 }
125 }
126
127 if (chunk) {
128 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
129 chunk->npages,
130 PCI_DMA_BIDIRECTIONAL);
131
132 if (chunk->nsg <= 0)
133 goto fail;
134 }
135
136 return icm;
137
138fail:
139 mlx4_free_icm(dev, icm);
140 return NULL;
141}
142
143static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
144{
145 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
146}
147
148int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
149{
150 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
151 MLX4_CMD_TIME_CLASS_B);
152}
153
154int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
155{
156 struct mlx4_cmd_mailbox *mailbox;
157 __be64 *inbox;
158 int err;
159
160 mailbox = mlx4_alloc_cmd_mailbox(dev);
161 if (IS_ERR(mailbox))
162 return PTR_ERR(mailbox);
163 inbox = mailbox->buf;
164
165 inbox[0] = cpu_to_be64(virt);
166 inbox[1] = cpu_to_be64(dma_addr);
167
168 err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
169 MLX4_CMD_TIME_CLASS_B);
170
171 mlx4_free_cmd_mailbox(dev, mailbox);
172
173 if (!err)
174 mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
175 (unsigned long long) dma_addr, (unsigned long long) virt);
176
177 return err;
178}
179
180int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
181{
182 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
183}
184
185int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
186{
187 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
188}
189
190int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
191{
192 int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
193 int ret = 0;
194
195 mutex_lock(&table->mutex);
196
197 if (table->icm[i]) {
198 ++table->icm[i]->refcount;
199 goto out;
200 }
201
202 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
203 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
204 __GFP_NOWARN);
205 if (!table->icm[i]) {
206 ret = -ENOMEM;
207 goto out;
208 }
209
210 if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
211 (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
212 mlx4_free_icm(dev, table->icm[i]);
213 table->icm[i] = NULL;
214 ret = -ENOMEM;
215 goto out;
216 }
217
218 ++table->icm[i]->refcount;
219
220out:
221 mutex_unlock(&table->mutex);
222 return ret;
223}
224
225void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
226{
227 int i;
228
229 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
230
231 mutex_lock(&table->mutex);
232
233 if (--table->icm[i]->refcount == 0) {
234 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
235 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
236 mlx4_free_icm(dev, table->icm[i]);
237 table->icm[i] = NULL;
238 }
239
240 mutex_unlock(&table->mutex);
241}
242
243void *mlx4_table_find(struct mlx4_icm_table *table, int obj)
244{
245 int idx, offset, i;
246 struct mlx4_icm_chunk *chunk;
247 struct mlx4_icm *icm;
248 struct page *page = NULL;
249
250 if (!table->lowmem)
251 return NULL;
252
253 mutex_lock(&table->mutex);
254
255 idx = obj & (table->num_obj - 1);
256 icm = table->icm[idx / (MLX4_TABLE_CHUNK_SIZE / table->obj_size)];
257 offset = idx % (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
258
259 if (!icm)
260 goto out;
261
262 list_for_each_entry(chunk, &icm->chunk_list, list) {
263 for (i = 0; i < chunk->npages; ++i) {
264 if (chunk->mem[i].length > offset) {
265 page = chunk->mem[i].page;
266 goto out;
267 }
268 offset -= chunk->mem[i].length;
269 }
270 }
271
272out:
273 mutex_unlock(&table->mutex);
274 return page ? lowmem_page_address(page) + offset : NULL;
275}
276
277int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
278 int start, int end)
279{
280 int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
281 int i, err;
282
283 for (i = start; i <= end; i += inc) {
284 err = mlx4_table_get(dev, table, i);
285 if (err)
286 goto fail;
287 }
288
289 return 0;
290
291fail:
292 while (i > start) {
293 i -= inc;
294 mlx4_table_put(dev, table, i);
295 }
296
297 return err;
298}
299
300void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
301 int start, int end)
302{
303 int i;
304
305 for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
306 mlx4_table_put(dev, table, i);
307}
308
309int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
310 u64 virt, int obj_size, int nobj, int reserved,
311 int use_lowmem)
312{
313 int obj_per_chunk;
314 int num_icm;
315 unsigned chunk_size;
316 int i;
317
318 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
319 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
320
321 table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
322 if (!table->icm)
323 return -ENOMEM;
324 table->virt = virt;
325 table->num_icm = num_icm;
326 table->num_obj = nobj;
327 table->obj_size = obj_size;
328 table->lowmem = use_lowmem;
329 mutex_init(&table->mutex);
330
331 for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
332 chunk_size = MLX4_TABLE_CHUNK_SIZE;
333 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
334 chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
335
336 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
337 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
338 __GFP_NOWARN);
339 if (!table->icm[i])
340 goto err;
341 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
342 mlx4_free_icm(dev, table->icm[i]);
343 table->icm[i] = NULL;
344 goto err;
345 }
346
347 /*
348 * Add a reference to this ICM chunk so that it never
349 * gets freed (since it contains reserved firmware objects).
350 */
351 ++table->icm[i]->refcount;
352 }
353
354 return 0;
355
356err:
357 for (i = 0; i < num_icm; ++i)
358 if (table->icm[i]) {
359 mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
360 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
361 mlx4_free_icm(dev, table->icm[i]);
362 }
363
364 return -ENOMEM;
365}
366
367void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
368{
369 int i;
370
371 for (i = 0; i < table->num_icm; ++i)
372 if (table->icm[i]) {
373 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
374 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
375 mlx4_free_icm(dev, table->icm[i]);
376 }
377
378 kfree(table->icm);
379}
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
new file mode 100644
index 000000000000..bea223d879a5
--- /dev/null
+++ b/drivers/net/mlx4/icm.h
@@ -0,0 +1,135 @@
1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef MLX4_ICM_H
35#define MLX4_ICM_H
36
37#include <linux/list.h>
38#include <linux/pci.h>
39#include <linux/mutex.h>
40
41#define MLX4_ICM_CHUNK_LEN \
42 ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
43 (sizeof (struct scatterlist)))
44
45enum {
46 MLX4_ICM_PAGE_SHIFT = 12,
47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
48};
49
50struct mlx4_icm_chunk {
51 struct list_head list;
52 int npages;
53 int nsg;
54 struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
55};
56
57struct mlx4_icm {
58 struct list_head chunk_list;
59 int refcount;
60};
61
62struct mlx4_icm_iter {
63 struct mlx4_icm *icm;
64 struct mlx4_icm_chunk *chunk;
65 int page_idx;
66};
67
68struct mlx4_dev;
69
70struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask);
71void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm);
72
73int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
74void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
75int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
76 int start, int end);
77void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
78 int start, int end);
79int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
80 u64 virt, int obj_size, int nobj, int reserved,
81 int use_lowmem);
82void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
83int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
84void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
85void *mlx4_table_find(struct mlx4_icm_table *table, int obj);
86int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
87 int start, int end);
88void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
89 int start, int end);
90
91static inline void mlx4_icm_first(struct mlx4_icm *icm,
92 struct mlx4_icm_iter *iter)
93{
94 iter->icm = icm;
95 iter->chunk = list_empty(&icm->chunk_list) ?
96 NULL : list_entry(icm->chunk_list.next,
97 struct mlx4_icm_chunk, list);
98 iter->page_idx = 0;
99}
100
101static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
102{
103 return !iter->chunk;
104}
105
106static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
107{
108 if (++iter->page_idx >= iter->chunk->nsg) {
109 if (iter->chunk->list.next == &iter->icm->chunk_list) {
110 iter->chunk = NULL;
111 return;
112 }
113
114 iter->chunk = list_entry(iter->chunk->list.next,
115 struct mlx4_icm_chunk, list);
116 iter->page_idx = 0;
117 }
118}
119
120static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
121{
122 return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
123}
124
125static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
126{
127 return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
128}
129
130int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
131int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
132int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
133int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
134
135#endif /* MLX4_ICM_H */
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
new file mode 100644
index 000000000000..65854f9e9c76
--- /dev/null
+++ b/drivers/net/mlx4/intf.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx4/driver.h>
34
35#include "mlx4.h"
36
37struct mlx4_device_context {
38 struct list_head list;
39 struct mlx4_interface *intf;
40 void *context;
41};
42
43static LIST_HEAD(intf_list);
44static LIST_HEAD(dev_list);
45static DEFINE_MUTEX(intf_mutex);
46
47static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
48{
49 struct mlx4_device_context *dev_ctx;
50
51 dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
52 if (!dev_ctx)
53 return;
54
55 dev_ctx->intf = intf;
56 dev_ctx->context = intf->add(&priv->dev);
57
58 if (dev_ctx->context) {
59 spin_lock_irq(&priv->ctx_lock);
60 list_add_tail(&dev_ctx->list, &priv->ctx_list);
61 spin_unlock_irq(&priv->ctx_lock);
62 } else
63 kfree(dev_ctx);
64}
65
66static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
67{
68 struct mlx4_device_context *dev_ctx;
69
70 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
71 if (dev_ctx->intf == intf) {
72 spin_lock_irq(&priv->ctx_lock);
73 list_del(&dev_ctx->list);
74 spin_unlock_irq(&priv->ctx_lock);
75
76 intf->remove(&priv->dev, dev_ctx->context);
77 kfree(dev_ctx);
78 return;
79 }
80}
81
82int mlx4_register_interface(struct mlx4_interface *intf)
83{
84 struct mlx4_priv *priv;
85
86 if (!intf->add || !intf->remove)
87 return -EINVAL;
88
89 mutex_lock(&intf_mutex);
90
91 list_add_tail(&intf->list, &intf_list);
92 list_for_each_entry(priv, &dev_list, dev_list)
93 mlx4_add_device(intf, priv);
94
95 mutex_unlock(&intf_mutex);
96
97 return 0;
98}
99EXPORT_SYMBOL_GPL(mlx4_register_interface);
100
101void mlx4_unregister_interface(struct mlx4_interface *intf)
102{
103 struct mlx4_priv *priv;
104
105 mutex_lock(&intf_mutex);
106
107 list_for_each_entry(priv, &dev_list, dev_list)
108 mlx4_remove_device(intf, priv);
109
110 list_del(&intf->list);
111
112 mutex_unlock(&intf_mutex);
113}
114EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
115
116void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
117 int subtype, int port)
118{
119 struct mlx4_priv *priv = mlx4_priv(dev);
120 struct mlx4_device_context *dev_ctx;
121 unsigned long flags;
122
123 spin_lock_irqsave(&priv->ctx_lock, flags);
124
125 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
126 if (dev_ctx->intf->event)
127 dev_ctx->intf->event(dev, dev_ctx->context, type,
128 subtype, port);
129
130 spin_unlock_irqrestore(&priv->ctx_lock, flags);
131}
132
133int mlx4_register_device(struct mlx4_dev *dev)
134{
135 struct mlx4_priv *priv = mlx4_priv(dev);
136 struct mlx4_interface *intf;
137
138 INIT_LIST_HEAD(&priv->ctx_list);
139 spin_lock_init(&priv->ctx_lock);
140
141 mutex_lock(&intf_mutex);
142
143 list_add_tail(&priv->dev_list, &dev_list);
144 list_for_each_entry(intf, &intf_list, list)
145 mlx4_add_device(intf, priv);
146
147 mutex_unlock(&intf_mutex);
148
149 return 0;
150}
151
152void mlx4_unregister_device(struct mlx4_dev *dev)
153{
154 struct mlx4_priv *priv = mlx4_priv(dev);
155 struct mlx4_interface *intf;
156
157 mutex_lock(&intf_mutex);
158
159 list_for_each_entry(intf, &intf_list, list)
160 mlx4_remove_device(intf, priv);
161
162 list_del(&priv->dev_list);
163
164 mutex_unlock(&intf_mutex);
165}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
new file mode 100644
index 000000000000..4debb024eaf9
--- /dev/null
+++ b/drivers/net/mlx4/main.c
@@ -0,0 +1,936 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/errno.h>
39#include <linux/pci.h>
40#include <linux/dma-mapping.h>
41
42#include <linux/mlx4/device.h>
43#include <linux/mlx4/doorbell.h>
44
45#include "mlx4.h"
46#include "fw.h"
47#include "icm.h"
48
49MODULE_AUTHOR("Roland Dreier");
50MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
51MODULE_LICENSE("Dual BSD/GPL");
52MODULE_VERSION(DRV_VERSION);
53
54#ifdef CONFIG_MLX4_DEBUG
55
56int mlx4_debug_level = 0;
57module_param_named(debug_level, mlx4_debug_level, int, 0644);
58MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
59
60#endif /* CONFIG_MLX4_DEBUG */
61
62#ifdef CONFIG_PCI_MSI
63
64static int msi_x;
65module_param(msi_x, int, 0444);
66MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
67
68#else /* CONFIG_PCI_MSI */
69
70#define msi_x (0)
71
72#endif /* CONFIG_PCI_MSI */
73
74static const char mlx4_version[] __devinitdata =
75 DRV_NAME ": Mellanox ConnectX core driver v"
76 DRV_VERSION " (" DRV_RELDATE ")\n";
77
78static struct mlx4_profile default_profile = {
79 .num_qp = 1 << 16,
80 .num_srq = 1 << 16,
81 .rdmarc_per_qp = 4,
82 .num_cq = 1 << 16,
83 .num_mcg = 1 << 13,
84 .num_mpt = 1 << 17,
85 .num_mtt = 1 << 20,
86};
87
88static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
89{
90 int err;
91
92 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
93 if (err) {
94 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
95 return err;
96 }
97
98 if (dev_cap->min_page_sz > PAGE_SIZE) {
99 mlx4_err(dev, "HCA minimum page size of %d bigger than "
100 "kernel PAGE_SIZE of %ld, aborting.\n",
101 dev_cap->min_page_sz, PAGE_SIZE);
102 return -ENODEV;
103 }
104 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
105 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
106 "aborting.\n",
107 dev_cap->num_ports, MLX4_MAX_PORTS);
108 return -ENODEV;
109 }
110
111 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
112 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
113 "PCI resource 2 size of 0x%llx, aborting.\n",
114 dev_cap->uar_size,
115 (unsigned long long) pci_resource_len(dev->pdev, 2));
116 return -ENODEV;
117 }
118
119 dev->caps.num_ports = dev_cap->num_ports;
120 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
121 dev->caps.vl_cap = dev_cap->max_vl;
122 dev->caps.mtu_cap = dev_cap->max_mtu;
123 dev->caps.gid_table_len = dev_cap->max_gids;
124 dev->caps.pkey_table_len = dev_cap->max_pkeys;
125 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
126 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
127 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
128 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
129 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
130 dev->caps.max_wqes = dev_cap->max_qp_sz;
131 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
132 dev->caps.reserved_qps = dev_cap->reserved_qps;
133 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
134 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
135 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
136 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
137 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
138 dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
139 /*
140 * Subtract 1 from the limit because we need to allocate a
141 * spare CQE so the HCA HW can tell the difference between an
142 * empty CQ and a full CQ.
143 */
144 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
145 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
146 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
147 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
148 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
149 dev->caps.reserved_uars = dev_cap->reserved_uars;
150 dev->caps.reserved_pds = dev_cap->reserved_pds;
151 dev->caps.port_width_cap = dev_cap->max_port_width;
152 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
153 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
154 dev->caps.flags = dev_cap->flags;
155 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
156
157 return 0;
158}
159
160static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
161{
162 struct mlx4_priv *priv = mlx4_priv(dev);
163 int err;
164
165 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
166 GFP_HIGHUSER | __GFP_NOWARN);
167 if (!priv->fw.fw_icm) {
168 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
169 return -ENOMEM;
170 }
171
172 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
173 if (err) {
174 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
175 goto err_free;
176 }
177
178 err = mlx4_RUN_FW(dev);
179 if (err) {
180 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
181 goto err_unmap_fa;
182 }
183
184 return 0;
185
186err_unmap_fa:
187 mlx4_UNMAP_FA(dev);
188
189err_free:
190 mlx4_free_icm(dev, priv->fw.fw_icm);
191 return err;
192}
193
194static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
195 int cmpt_entry_sz)
196{
197 struct mlx4_priv *priv = mlx4_priv(dev);
198 int err;
199
200 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
201 cmpt_base +
202 ((u64) (MLX4_CMPT_TYPE_QP *
203 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
204 cmpt_entry_sz, dev->caps.num_qps,
205 dev->caps.reserved_qps, 0);
206 if (err)
207 goto err;
208
209 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
210 cmpt_base +
211 ((u64) (MLX4_CMPT_TYPE_SRQ *
212 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
213 cmpt_entry_sz, dev->caps.num_srqs,
214 dev->caps.reserved_srqs, 0);
215 if (err)
216 goto err_qp;
217
218 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
219 cmpt_base +
220 ((u64) (MLX4_CMPT_TYPE_CQ *
221 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
222 cmpt_entry_sz, dev->caps.num_cqs,
223 dev->caps.reserved_cqs, 0);
224 if (err)
225 goto err_srq;
226
227 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
228 cmpt_base +
229 ((u64) (MLX4_CMPT_TYPE_EQ *
230 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
231 cmpt_entry_sz,
232 roundup_pow_of_two(MLX4_NUM_EQ +
233 dev->caps.reserved_eqs),
234 MLX4_NUM_EQ + dev->caps.reserved_eqs, 0);
235 if (err)
236 goto err_cq;
237
238 return 0;
239
240err_cq:
241 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
242
243err_srq:
244 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
245
246err_qp:
247 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
248
249err:
250 return err;
251}
252
253static int __devinit mlx4_init_icm(struct mlx4_dev *dev,
254 struct mlx4_dev_cap *dev_cap,
255 struct mlx4_init_hca_param *init_hca,
256 u64 icm_size)
257{
258 struct mlx4_priv *priv = mlx4_priv(dev);
259 u64 aux_pages;
260 int err;
261
262 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
263 if (err) {
264 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
265 return err;
266 }
267
268 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
269 (unsigned long long) icm_size >> 10,
270 (unsigned long long) aux_pages << 2);
271
272 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
273 GFP_HIGHUSER | __GFP_NOWARN);
274 if (!priv->fw.aux_icm) {
275 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
276 return -ENOMEM;
277 }
278
279 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
280 if (err) {
281 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
282 goto err_free_aux;
283 }
284
285 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
286 if (err) {
287 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
288 goto err_unmap_aux;
289 }
290
291 err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
292 if (err) {
293 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
294 goto err_unmap_cmpt;
295 }
296
297 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
298 init_hca->mtt_base,
299 dev->caps.mtt_entry_sz,
300 dev->caps.num_mtt_segs,
301 dev->caps.reserved_mtts, 1);
302 if (err) {
303 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
304 goto err_unmap_eq;
305 }
306
307 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
308 init_hca->dmpt_base,
309 dev_cap->dmpt_entry_sz,
310 dev->caps.num_mpts,
311 dev->caps.reserved_mrws, 1);
312 if (err) {
313 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
314 goto err_unmap_mtt;
315 }
316
317 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
318 init_hca->qpc_base,
319 dev_cap->qpc_entry_sz,
320 dev->caps.num_qps,
321 dev->caps.reserved_qps, 0);
322 if (err) {
323 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
324 goto err_unmap_dmpt;
325 }
326
327 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
328 init_hca->auxc_base,
329 dev_cap->aux_entry_sz,
330 dev->caps.num_qps,
331 dev->caps.reserved_qps, 0);
332 if (err) {
333 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
334 goto err_unmap_qp;
335 }
336
337 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
338 init_hca->altc_base,
339 dev_cap->altc_entry_sz,
340 dev->caps.num_qps,
341 dev->caps.reserved_qps, 0);
342 if (err) {
343 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
344 goto err_unmap_auxc;
345 }
346
347 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
348 init_hca->rdmarc_base,
349 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
350 dev->caps.num_qps,
351 dev->caps.reserved_qps, 0);
352 if (err) {
353 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
354 goto err_unmap_altc;
355 }
356
357 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
358 init_hca->cqc_base,
359 dev_cap->cqc_entry_sz,
360 dev->caps.num_cqs,
361 dev->caps.reserved_cqs, 0);
362 if (err) {
363 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
364 goto err_unmap_rdmarc;
365 }
366
367 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
368 init_hca->srqc_base,
369 dev_cap->srq_entry_sz,
370 dev->caps.num_srqs,
371 dev->caps.reserved_srqs, 0);
372 if (err) {
373 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
374 goto err_unmap_cq;
375 }
376
377 /*
378 * It's not strictly required, but for simplicity just map the
379 * whole multicast group table now. The table isn't very big
380 * and it's a lot easier than trying to track ref counts.
381 */
382 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
383 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
384 dev->caps.num_mgms + dev->caps.num_amgms,
385 dev->caps.num_mgms + dev->caps.num_amgms,
386 0);
387 if (err) {
388 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
389 goto err_unmap_srq;
390 }
391
392 return 0;
393
394err_unmap_srq:
395 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
396
397err_unmap_cq:
398 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
399
400err_unmap_rdmarc:
401 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
402
403err_unmap_altc:
404 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
405
406err_unmap_auxc:
407 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
408
409err_unmap_qp:
410 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
411
412err_unmap_dmpt:
413 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
414
415err_unmap_mtt:
416 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
417
418err_unmap_eq:
419 mlx4_unmap_eq_icm(dev);
420
421err_unmap_cmpt:
422 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
423 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
424 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
425 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
426
427err_unmap_aux:
428 mlx4_UNMAP_ICM_AUX(dev);
429
430err_free_aux:
431 mlx4_free_icm(dev, priv->fw.aux_icm);
432
433 return err;
434}
435
436static void mlx4_free_icms(struct mlx4_dev *dev)
437{
438 struct mlx4_priv *priv = mlx4_priv(dev);
439
440 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
441 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
442 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
443 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
444 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
445 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
446 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
447 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
448 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
449 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
450 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
451 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
452 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
453 mlx4_unmap_eq_icm(dev);
454
455 mlx4_UNMAP_ICM_AUX(dev);
456 mlx4_free_icm(dev, priv->fw.aux_icm);
457}
458
459static void mlx4_close_hca(struct mlx4_dev *dev)
460{
461 mlx4_CLOSE_HCA(dev, 0);
462 mlx4_free_icms(dev);
463 mlx4_UNMAP_FA(dev);
464 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm);
465}
466
467static int __devinit mlx4_init_hca(struct mlx4_dev *dev)
468{
469 struct mlx4_priv *priv = mlx4_priv(dev);
470 struct mlx4_adapter adapter;
471 struct mlx4_dev_cap dev_cap;
472 struct mlx4_profile profile;
473 struct mlx4_init_hca_param init_hca;
474 u64 icm_size;
475 int err;
476
477 err = mlx4_QUERY_FW(dev);
478 if (err) {
479 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
480 return err;
481 }
482
483 err = mlx4_load_fw(dev);
484 if (err) {
485 mlx4_err(dev, "Failed to start FW, aborting.\n");
486 return err;
487 }
488
489 err = mlx4_dev_cap(dev, &dev_cap);
490 if (err) {
491 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
492 goto err_stop_fw;
493 }
494
495 profile = default_profile;
496
497 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
498 if ((long long) icm_size < 0) {
499 err = icm_size;
500 goto err_stop_fw;
501 }
502
503 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
504
505 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
506 if (err)
507 goto err_stop_fw;
508
509 err = mlx4_INIT_HCA(dev, &init_hca);
510 if (err) {
511 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
512 goto err_free_icm;
513 }
514
515 err = mlx4_QUERY_ADAPTER(dev, &adapter);
516 if (err) {
517 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
518 goto err_close;
519 }
520
521 priv->eq_table.inta_pin = adapter.inta_pin;
522 priv->rev_id = adapter.revision_id;
523 memcpy(priv->board_id, adapter.board_id, sizeof priv->board_id);
524
525 return 0;
526
527err_close:
528 mlx4_close_hca(dev);
529
530err_free_icm:
531 mlx4_free_icms(dev);
532
533err_stop_fw:
534 mlx4_UNMAP_FA(dev);
535 mlx4_free_icm(dev, priv->fw.fw_icm);
536
537 return err;
538}
539
540static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
541{
542 struct mlx4_priv *priv = mlx4_priv(dev);
543 int err;
544
545 MLX4_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
546
547 err = mlx4_init_uar_table(dev);
548 if (err) {
549 mlx4_err(dev, "Failed to initialize "
550 "user access region table, aborting.\n");
551 return err;
552 }
553
554 err = mlx4_uar_alloc(dev, &priv->driver_uar);
555 if (err) {
556 mlx4_err(dev, "Failed to allocate driver access region, "
557 "aborting.\n");
558 goto err_uar_table_free;
559 }
560
561 priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
562 if (!priv->kar) {
563 mlx4_err(dev, "Couldn't map kernel access region, "
564 "aborting.\n");
565 err = -ENOMEM;
566 goto err_uar_free;
567 }
568
569 err = mlx4_init_pd_table(dev);
570 if (err) {
571 mlx4_err(dev, "Failed to initialize "
572 "protection domain table, aborting.\n");
573 goto err_kar_unmap;
574 }
575
576 err = mlx4_init_mr_table(dev);
577 if (err) {
578 mlx4_err(dev, "Failed to initialize "
579 "memory region table, aborting.\n");
580 goto err_pd_table_free;
581 }
582
583 mlx4_map_catas_buf(dev);
584
585 err = mlx4_init_eq_table(dev);
586 if (err) {
587 mlx4_err(dev, "Failed to initialize "
588 "event queue table, aborting.\n");
589 goto err_catas_buf;
590 }
591
592 err = mlx4_cmd_use_events(dev);
593 if (err) {
594 mlx4_err(dev, "Failed to switch to event-driven "
595 "firmware commands, aborting.\n");
596 goto err_eq_table_free;
597 }
598
599 err = mlx4_NOP(dev);
600 if (err) {
601 mlx4_err(dev, "NOP command failed to generate interrupt "
602 "(IRQ %d), aborting.\n",
603 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
604 if (dev->flags & MLX4_FLAG_MSI_X)
605 mlx4_err(dev, "Try again with MSI-X disabled.\n");
606 else
607 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
608
609 goto err_cmd_poll;
610 }
611
612 mlx4_dbg(dev, "NOP command IRQ test passed\n");
613
614 err = mlx4_init_cq_table(dev);
615 if (err) {
616 mlx4_err(dev, "Failed to initialize "
617 "completion queue table, aborting.\n");
618 goto err_cmd_poll;
619 }
620
621 err = mlx4_init_srq_table(dev);
622 if (err) {
623 mlx4_err(dev, "Failed to initialize "
624 "shared receive queue table, aborting.\n");
625 goto err_cq_table_free;
626 }
627
628 err = mlx4_init_qp_table(dev);
629 if (err) {
630 mlx4_err(dev, "Failed to initialize "
631 "queue pair table, aborting.\n");
632 goto err_srq_table_free;
633 }
634
635 err = mlx4_init_mcg_table(dev);
636 if (err) {
637 mlx4_err(dev, "Failed to initialize "
638 "multicast group table, aborting.\n");
639 goto err_qp_table_free;
640 }
641
642 return 0;
643
644err_qp_table_free:
645 mlx4_cleanup_qp_table(dev);
646
647err_srq_table_free:
648 mlx4_cleanup_srq_table(dev);
649
650err_cq_table_free:
651 mlx4_cleanup_cq_table(dev);
652
653err_cmd_poll:
654 mlx4_cmd_use_polling(dev);
655
656err_eq_table_free:
657 mlx4_cleanup_eq_table(dev);
658
659err_catas_buf:
660 mlx4_unmap_catas_buf(dev);
661 mlx4_cleanup_mr_table(dev);
662
663err_pd_table_free:
664 mlx4_cleanup_pd_table(dev);
665
666err_kar_unmap:
667 iounmap(priv->kar);
668
669err_uar_free:
670 mlx4_uar_free(dev, &priv->driver_uar);
671
672err_uar_table_free:
673 mlx4_cleanup_uar_table(dev);
674 return err;
675}
676
677static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev)
678{
679 struct mlx4_priv *priv = mlx4_priv(dev);
680 struct msix_entry entries[MLX4_NUM_EQ];
681 int err;
682 int i;
683
684 if (msi_x) {
685 for (i = 0; i < MLX4_NUM_EQ; ++i)
686 entries[i].entry = i;
687
688 err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries));
689 if (err) {
690 if (err > 0)
691 mlx4_info(dev, "Only %d MSI-X vectors available, "
692 "not using MSI-X\n", err);
693 goto no_msi;
694 }
695
696 for (i = 0; i < MLX4_NUM_EQ; ++i)
697 priv->eq_table.eq[i].irq = entries[i].vector;
698
699 dev->flags |= MLX4_FLAG_MSI_X;
700 return;
701 }
702
703no_msi:
704 for (i = 0; i < MLX4_NUM_EQ; ++i)
705 priv->eq_table.eq[i].irq = dev->pdev->irq;
706}
707
708static int __devinit mlx4_init_one(struct pci_dev *pdev,
709 const struct pci_device_id *id)
710{
711 static int mlx4_version_printed;
712 struct mlx4_priv *priv;
713 struct mlx4_dev *dev;
714 int err;
715
716 if (!mlx4_version_printed) {
717 printk(KERN_INFO "%s", mlx4_version);
718 ++mlx4_version_printed;
719 }
720
721 printk(KERN_INFO PFX "Initializing %s\n",
722 pci_name(pdev));
723
724 err = pci_enable_device(pdev);
725 if (err) {
726 dev_err(&pdev->dev, "Cannot enable PCI device, "
727 "aborting.\n");
728 return err;
729 }
730
731 /*
732 * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not
733 * be present)
734 */
735 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
736 pci_resource_len(pdev, 0) != 1 << 20) {
737 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
738 err = -ENODEV;
739 goto err_disable_pdev;
740 }
741 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
742 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
743 err = -ENODEV;
744 goto err_disable_pdev;
745 }
746
747 err = pci_request_region(pdev, 0, DRV_NAME);
748 if (err) {
749 dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
750 goto err_disable_pdev;
751 }
752
753 err = pci_request_region(pdev, 2, DRV_NAME);
754 if (err) {
755 dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
756 goto err_release_bar0;
757 }
758
759 pci_set_master(pdev);
760
761 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
762 if (err) {
763 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
764 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
765 if (err) {
766 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
767 goto err_release_bar2;
768 }
769 }
770 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
771 if (err) {
772 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
773 "consistent PCI DMA mask.\n");
774 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
775 if (err) {
776 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
777 "aborting.\n");
778 goto err_release_bar2;
779 }
780 }
781
782 priv = kzalloc(sizeof *priv, GFP_KERNEL);
783 if (!priv) {
784 dev_err(&pdev->dev, "Device struct alloc failed, "
785 "aborting.\n");
786 err = -ENOMEM;
787 goto err_release_bar2;
788 }
789
790 dev = &priv->dev;
791 dev->pdev = pdev;
792
793 /*
794 * Now reset the HCA before we touch the PCI capabilities or
795 * attempt a firmware command, since a boot ROM may have left
796 * the HCA in an undefined state.
797 */
798 err = mlx4_reset(dev);
799 if (err) {
800 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
801 goto err_free_dev;
802 }
803
804 mlx4_enable_msi_x(dev);
805
806 if (mlx4_cmd_init(dev)) {
807 mlx4_err(dev, "Failed to init command interface, aborting.\n");
808 goto err_free_dev;
809 }
810
811 err = mlx4_init_hca(dev);
812 if (err)
813 goto err_cmd;
814
815 err = mlx4_setup_hca(dev);
816 if (err)
817 goto err_close;
818
819 err = mlx4_register_device(dev);
820 if (err)
821 goto err_cleanup;
822
823 pci_set_drvdata(pdev, dev);
824
825 return 0;
826
827err_cleanup:
828 mlx4_cleanup_mcg_table(dev);
829 mlx4_cleanup_qp_table(dev);
830 mlx4_cleanup_srq_table(dev);
831 mlx4_cleanup_cq_table(dev);
832 mlx4_cmd_use_polling(dev);
833 mlx4_cleanup_eq_table(dev);
834
835 mlx4_unmap_catas_buf(dev);
836
837 mlx4_cleanup_mr_table(dev);
838 mlx4_cleanup_pd_table(dev);
839 mlx4_cleanup_uar_table(dev);
840
841err_close:
842 mlx4_close_hca(dev);
843
844err_cmd:
845 mlx4_cmd_cleanup(dev);
846
847err_free_dev:
848 if (dev->flags & MLX4_FLAG_MSI_X)
849 pci_disable_msix(pdev);
850
851 kfree(priv);
852
853err_release_bar2:
854 pci_release_region(pdev, 2);
855
856err_release_bar0:
857 pci_release_region(pdev, 0);
858
859err_disable_pdev:
860 pci_disable_device(pdev);
861 pci_set_drvdata(pdev, NULL);
862 return err;
863}
864
865static void __devexit mlx4_remove_one(struct pci_dev *pdev)
866{
867 struct mlx4_dev *dev = pci_get_drvdata(pdev);
868 struct mlx4_priv *priv = mlx4_priv(dev);
869 int p;
870
871 if (dev) {
872 mlx4_unregister_device(dev);
873
874 for (p = 1; p <= dev->caps.num_ports; ++p)
875 mlx4_CLOSE_PORT(dev, p);
876
877 mlx4_cleanup_mcg_table(dev);
878 mlx4_cleanup_qp_table(dev);
879 mlx4_cleanup_srq_table(dev);
880 mlx4_cleanup_cq_table(dev);
881 mlx4_cmd_use_polling(dev);
882 mlx4_cleanup_eq_table(dev);
883
884 mlx4_unmap_catas_buf(dev);
885
886 mlx4_cleanup_mr_table(dev);
887 mlx4_cleanup_pd_table(dev);
888
889 iounmap(priv->kar);
890 mlx4_uar_free(dev, &priv->driver_uar);
891 mlx4_cleanup_uar_table(dev);
892 mlx4_close_hca(dev);
893 mlx4_cmd_cleanup(dev);
894
895 if (dev->flags & MLX4_FLAG_MSI_X)
896 pci_disable_msix(pdev);
897
898 kfree(priv);
899 pci_release_region(pdev, 2);
900 pci_release_region(pdev, 0);
901 pci_disable_device(pdev);
902 pci_set_drvdata(pdev, NULL);
903 }
904}
905
906static struct pci_device_id mlx4_pci_table[] = {
907 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
908 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
909 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
910 { 0, }
911};
912
913MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
914
915static struct pci_driver mlx4_driver = {
916 .name = DRV_NAME,
917 .id_table = mlx4_pci_table,
918 .probe = mlx4_init_one,
919 .remove = __devexit_p(mlx4_remove_one)
920};
921
922static int __init mlx4_init(void)
923{
924 int ret;
925
926 ret = pci_register_driver(&mlx4_driver);
927 return ret < 0 ? ret : 0;
928}
929
930static void __exit mlx4_cleanup(void)
931{
932 pci_unregister_driver(&mlx4_driver);
933}
934
935module_init(mlx4_init);
936module_exit(mlx4_cleanup);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
new file mode 100644
index 000000000000..672024a0ee71
--- /dev/null
+++ b/drivers/net/mlx4/mcg.c
@@ -0,0 +1,380 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/init.h>
34#include <linux/string.h>
35#include <linux/slab.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40
41struct mlx4_mgm {
42 __be32 next_gid_index;
43 __be32 members_count;
44 u32 reserved[2];
45 u8 gid[16];
46 __be32 qp[MLX4_QP_PER_MGM];
47};
48
49static const u8 zero_gid[16]; /* automatically initialized to 0 */
50
51static int mlx4_READ_MCG(struct mlx4_dev *dev, int index,
52 struct mlx4_cmd_mailbox *mailbox)
53{
54 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
55 MLX4_CMD_TIME_CLASS_A);
56}
57
58static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index,
59 struct mlx4_cmd_mailbox *mailbox)
60{
61 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
62 MLX4_CMD_TIME_CLASS_A);
63}
64
65static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
66 u16 *hash)
67{
68 u64 imm;
69 int err;
70
71 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH,
72 MLX4_CMD_TIME_CLASS_A);
73
74 if (!err)
75 *hash = imm;
76
77 return err;
78}
79
80/*
81 * Caller must hold MCG table semaphore. gid and mgm parameters must
82 * be properly aligned for command interface.
83 *
84 * Returns 0 unless a firmware command error occurs.
85 *
86 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
87 * and *mgm holds MGM entry.
88 *
89 * if GID is found in AMGM, *index = index in AMGM, *prev = index of
90 * previous entry in hash chain and *mgm holds AMGM entry.
91 *
92 * If no AMGM exists for given gid, *index = -1, *prev = index of last
93 * entry in hash chain and *mgm holds end of hash chain.
94 */
95static int find_mgm(struct mlx4_dev *dev,
96 u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox,
97 u16 *hash, int *prev, int *index)
98{
99 struct mlx4_cmd_mailbox *mailbox;
100 struct mlx4_mgm *mgm = mgm_mailbox->buf;
101 u8 *mgid;
102 int err;
103
104 mailbox = mlx4_alloc_cmd_mailbox(dev);
105 if (IS_ERR(mailbox))
106 return -ENOMEM;
107 mgid = mailbox->buf;
108
109 memcpy(mgid, gid, 16);
110
111 err = mlx4_MGID_HASH(dev, mailbox, hash);
112 mlx4_free_cmd_mailbox(dev, mailbox);
113 if (err)
114 return err;
115
116 if (0)
117 mlx4_dbg(dev, "Hash for %04x:%04x:%04x:%04x:"
118 "%04x:%04x:%04x:%04x is %04x\n",
119 be16_to_cpu(((__be16 *) gid)[0]),
120 be16_to_cpu(((__be16 *) gid)[1]),
121 be16_to_cpu(((__be16 *) gid)[2]),
122 be16_to_cpu(((__be16 *) gid)[3]),
123 be16_to_cpu(((__be16 *) gid)[4]),
124 be16_to_cpu(((__be16 *) gid)[5]),
125 be16_to_cpu(((__be16 *) gid)[6]),
126 be16_to_cpu(((__be16 *) gid)[7]),
127 *hash);
128
129 *index = *hash;
130 *prev = -1;
131
132 do {
133 err = mlx4_READ_MCG(dev, *index, mgm_mailbox);
134 if (err)
135 return err;
136
137 if (!memcmp(mgm->gid, zero_gid, 16)) {
138 if (*index != *hash) {
139 mlx4_err(dev, "Found zero MGID in AMGM.\n");
140 err = -EINVAL;
141 }
142 return err;
143 }
144
145 if (!memcmp(mgm->gid, gid, 16))
146 return err;
147
148 *prev = *index;
149 *index = be32_to_cpu(mgm->next_gid_index) >> 6;
150 } while (*index);
151
152 *index = -1;
153 return err;
154}
155
156int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
157{
158 struct mlx4_priv *priv = mlx4_priv(dev);
159 struct mlx4_cmd_mailbox *mailbox;
160 struct mlx4_mgm *mgm;
161 u32 members_count;
162 u16 hash;
163 int index, prev;
164 int link = 0;
165 int i;
166 int err;
167
168 mailbox = mlx4_alloc_cmd_mailbox(dev);
169 if (IS_ERR(mailbox))
170 return PTR_ERR(mailbox);
171 mgm = mailbox->buf;
172
173 mutex_lock(&priv->mcg_table.mutex);
174
175 err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
176 if (err)
177 goto out;
178
179 if (index != -1) {
180 if (!memcmp(mgm->gid, zero_gid, 16))
181 memcpy(mgm->gid, gid, 16);
182 } else {
183 link = 1;
184
185 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
186 if (index == -1) {
187 mlx4_err(dev, "No AMGM entries left\n");
188 err = -ENOMEM;
189 goto out;
190 }
191 index += dev->caps.num_mgms;
192
193 err = mlx4_READ_MCG(dev, index, mailbox);
194 if (err)
195 goto out;
196
197 memset(mgm, 0, sizeof *mgm);
198 memcpy(mgm->gid, gid, 16);
199 }
200
201 members_count = be32_to_cpu(mgm->members_count);
202 if (members_count == MLX4_QP_PER_MGM) {
203 mlx4_err(dev, "MGM at index %x is full.\n", index);
204 err = -ENOMEM;
205 goto out;
206 }
207
208 for (i = 0; i < members_count; ++i)
209 if (mgm->qp[i] == cpu_to_be32(qp->qpn)) {
210 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
211 err = 0;
212 goto out;
213 }
214
215 mgm->qp[members_count++] = cpu_to_be32(qp->qpn);
216 mgm->members_count = cpu_to_be32(members_count);
217
218 err = mlx4_WRITE_MCG(dev, index, mailbox);
219 if (err)
220 goto out;
221
222 if (!link)
223 goto out;
224
225 err = mlx4_READ_MCG(dev, prev, mailbox);
226 if (err)
227 goto out;
228
229 mgm->next_gid_index = cpu_to_be32(index << 6);
230
231 err = mlx4_WRITE_MCG(dev, prev, mailbox);
232 if (err)
233 goto out;
234
235out:
236 if (err && link && index != -1) {
237 if (index < dev->caps.num_mgms)
238 mlx4_warn(dev, "Got AMGM index %d < %d",
239 index, dev->caps.num_mgms);
240 else
241 mlx4_bitmap_free(&priv->mcg_table.bitmap,
242 index - dev->caps.num_mgms);
243 }
244 mutex_unlock(&priv->mcg_table.mutex);
245
246 mlx4_free_cmd_mailbox(dev, mailbox);
247 return err;
248}
249EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
250
251int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
252{
253 struct mlx4_priv *priv = mlx4_priv(dev);
254 struct mlx4_cmd_mailbox *mailbox;
255 struct mlx4_mgm *mgm;
256 u32 members_count;
257 u16 hash;
258 int prev, index;
259 int i, loc;
260 int err;
261
262 mailbox = mlx4_alloc_cmd_mailbox(dev);
263 if (IS_ERR(mailbox))
264 return PTR_ERR(mailbox);
265 mgm = mailbox->buf;
266
267 mutex_lock(&priv->mcg_table.mutex);
268
269 err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
270 if (err)
271 goto out;
272
273 if (index == -1) {
274 mlx4_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
275 "not found\n",
276 be16_to_cpu(((__be16 *) gid)[0]),
277 be16_to_cpu(((__be16 *) gid)[1]),
278 be16_to_cpu(((__be16 *) gid)[2]),
279 be16_to_cpu(((__be16 *) gid)[3]),
280 be16_to_cpu(((__be16 *) gid)[4]),
281 be16_to_cpu(((__be16 *) gid)[5]),
282 be16_to_cpu(((__be16 *) gid)[6]),
283 be16_to_cpu(((__be16 *) gid)[7]));
284 err = -EINVAL;
285 goto out;
286 }
287
288 members_count = be32_to_cpu(mgm->members_count);
289 for (loc = -1, i = 0; i < members_count; ++i)
290 if (mgm->qp[i] == cpu_to_be32(qp->qpn))
291 loc = i;
292
293 if (loc == -1) {
294 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
295 err = -EINVAL;
296 goto out;
297 }
298
299
300 mgm->members_count = cpu_to_be32(--members_count);
301 mgm->qp[loc] = mgm->qp[i - 1];
302 mgm->qp[i - 1] = 0;
303
304 err = mlx4_WRITE_MCG(dev, index, mailbox);
305 if (err)
306 goto out;
307
308 if (i != 1)
309 goto out;
310
311 if (prev == -1) {
312 /* Remove entry from MGM */
313 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
314 if (amgm_index) {
315 err = mlx4_READ_MCG(dev, amgm_index, mailbox);
316 if (err)
317 goto out;
318 } else
319 memset(mgm->gid, 0, 16);
320
321 err = mlx4_WRITE_MCG(dev, index, mailbox);
322 if (err)
323 goto out;
324
325 if (amgm_index) {
326 if (amgm_index < dev->caps.num_mgms)
327 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
328 index, amgm_index, dev->caps.num_mgms);
329 else
330 mlx4_bitmap_free(&priv->mcg_table.bitmap,
331 amgm_index - dev->caps.num_mgms);
332 }
333 } else {
334 /* Remove entry from AMGM */
335 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
336 err = mlx4_READ_MCG(dev, prev, mailbox);
337 if (err)
338 goto out;
339
340 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
341
342 err = mlx4_WRITE_MCG(dev, prev, mailbox);
343 if (err)
344 goto out;
345
346 if (index < dev->caps.num_mgms)
347 mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
348 prev, index, dev->caps.num_mgms);
349 else
350 mlx4_bitmap_free(&priv->mcg_table.bitmap,
351 index - dev->caps.num_mgms);
352 }
353
354out:
355 mutex_unlock(&priv->mcg_table.mutex);
356
357 mlx4_free_cmd_mailbox(dev, mailbox);
358 return err;
359}
360EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
361
362int __devinit mlx4_init_mcg_table(struct mlx4_dev *dev)
363{
364 struct mlx4_priv *priv = mlx4_priv(dev);
365 int err;
366
367 err = mlx4_bitmap_init(&priv->mcg_table.bitmap,
368 dev->caps.num_amgms, dev->caps.num_amgms - 1, 0);
369 if (err)
370 return err;
371
372 mutex_init(&priv->mcg_table.mutex);
373
374 return 0;
375}
376
377void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
378{
379 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
380}
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
new file mode 100644
index 000000000000..9befbae3d196
--- /dev/null
+++ b/drivers/net/mlx4/mlx4.h
@@ -0,0 +1,348 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#ifndef MLX4_H
38#define MLX4_H
39
40#include <linux/radix-tree.h>
41
42#include <linux/mlx4/device.h>
43#include <linux/mlx4/doorbell.h>
44
45#define DRV_NAME "mlx4_core"
46#define PFX DRV_NAME ": "
47#define DRV_VERSION "0.01"
48#define DRV_RELDATE "May 1, 2007"
49
50enum {
51 MLX4_HCR_BASE = 0x80680,
52 MLX4_HCR_SIZE = 0x0001c,
53 MLX4_CLR_INT_SIZE = 0x00008
54};
55
56enum {
57 MLX4_BOARD_ID_LEN = 64
58};
59
60enum {
61 MLX4_MGM_ENTRY_SIZE = 0x40,
62 MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
63 MLX4_MTT_ENTRY_PER_SEG = 8
64};
65
66enum {
67 MLX4_EQ_ASYNC,
68 MLX4_EQ_COMP,
69 MLX4_EQ_CATAS,
70 MLX4_NUM_EQ
71};
72
73enum {
74 MLX4_NUM_PDS = 1 << 15
75};
76
77enum {
78 MLX4_CMPT_TYPE_QP = 0,
79 MLX4_CMPT_TYPE_SRQ = 1,
80 MLX4_CMPT_TYPE_CQ = 2,
81 MLX4_CMPT_TYPE_EQ = 3,
82 MLX4_CMPT_NUM_TYPE
83};
84
85enum {
86 MLX4_CMPT_SHIFT = 24,
87 MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
88};
89
90#ifdef CONFIG_MLX4_DEBUG
91extern int mlx4_debug_level;
92
93#define mlx4_dbg(mdev, format, arg...) \
94 do { \
95 if (mlx4_debug_level) \
96 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
97 } while (0)
98
99#else /* CONFIG_MLX4_DEBUG */
100
101#define mlx4_dbg(mdev, format, arg...) do { (void) mdev; } while (0)
102
103#endif /* CONFIG_MLX4_DEBUG */
104
105#define mlx4_err(mdev, format, arg...) \
106 dev_err(&mdev->pdev->dev, format, ## arg)
107#define mlx4_info(mdev, format, arg...) \
108 dev_info(&mdev->pdev->dev, format, ## arg)
109#define mlx4_warn(mdev, format, arg...) \
110 dev_warn(&mdev->pdev->dev, format, ## arg)
111
112struct mlx4_bitmap {
113 u32 last;
114 u32 top;
115 u32 max;
116 u32 mask;
117 spinlock_t lock;
118 unsigned long *table;
119};
120
121struct mlx4_buddy {
122 unsigned long **bits;
123 int max_order;
124 spinlock_t lock;
125};
126
127struct mlx4_icm;
128
129struct mlx4_icm_table {
130 u64 virt;
131 int num_icm;
132 int num_obj;
133 int obj_size;
134 int lowmem;
135 struct mutex mutex;
136 struct mlx4_icm **icm;
137};
138
139struct mlx4_eq {
140 struct mlx4_dev *dev;
141 void __iomem *doorbell;
142 int eqn;
143 u32 cons_index;
144 u16 irq;
145 u16 have_irq;
146 int nent;
147 struct mlx4_buf_list *page_list;
148 struct mlx4_mtt mtt;
149};
150
151struct mlx4_profile {
152 int num_qp;
153 int rdmarc_per_qp;
154 int num_srq;
155 int num_cq;
156 int num_mcg;
157 int num_mpt;
158 int num_mtt;
159};
160
161struct mlx4_fw {
162 u64 clr_int_base;
163 u64 catas_offset;
164 struct mlx4_icm *fw_icm;
165 struct mlx4_icm *aux_icm;
166 u32 catas_size;
167 u16 fw_pages;
168 u8 clr_int_bar;
169 u8 catas_bar;
170};
171
172struct mlx4_cmd {
173 struct pci_pool *pool;
174 void __iomem *hcr;
175 struct mutex hcr_mutex;
176 struct semaphore poll_sem;
177 struct semaphore event_sem;
178 int max_cmds;
179 spinlock_t context_lock;
180 int free_head;
181 struct mlx4_cmd_context *context;
182 u16 token_mask;
183 u8 use_events;
184 u8 toggle;
185};
186
187struct mlx4_uar_table {
188 struct mlx4_bitmap bitmap;
189};
190
191struct mlx4_mr_table {
192 struct mlx4_bitmap mpt_bitmap;
193 struct mlx4_buddy mtt_buddy;
194 u64 mtt_base;
195 u64 mpt_base;
196 struct mlx4_icm_table mtt_table;
197 struct mlx4_icm_table dmpt_table;
198};
199
200struct mlx4_cq_table {
201 struct mlx4_bitmap bitmap;
202 spinlock_t lock;
203 struct radix_tree_root tree;
204 struct mlx4_icm_table table;
205 struct mlx4_icm_table cmpt_table;
206};
207
208struct mlx4_eq_table {
209 struct mlx4_bitmap bitmap;
210 void __iomem *clr_int;
211 void __iomem *uar_map[(MLX4_NUM_EQ + 6) / 4];
212 u32 clr_mask;
213 struct mlx4_eq eq[MLX4_NUM_EQ];
214 u64 icm_virt;
215 struct page *icm_page;
216 dma_addr_t icm_dma;
217 struct mlx4_icm_table cmpt_table;
218 int have_irq;
219 u8 inta_pin;
220};
221
222struct mlx4_srq_table {
223 struct mlx4_bitmap bitmap;
224 spinlock_t lock;
225 struct radix_tree_root tree;
226 struct mlx4_icm_table table;
227 struct mlx4_icm_table cmpt_table;
228};
229
230struct mlx4_qp_table {
231 struct mlx4_bitmap bitmap;
232 u32 rdmarc_base;
233 int rdmarc_shift;
234 spinlock_t lock;
235 struct mlx4_icm_table qp_table;
236 struct mlx4_icm_table auxc_table;
237 struct mlx4_icm_table altc_table;
238 struct mlx4_icm_table rdmarc_table;
239 struct mlx4_icm_table cmpt_table;
240};
241
242struct mlx4_mcg_table {
243 struct mutex mutex;
244 struct mlx4_bitmap bitmap;
245 struct mlx4_icm_table table;
246};
247
248struct mlx4_catas_err {
249 u32 __iomem *map;
250 int size;
251};
252
253struct mlx4_priv {
254 struct mlx4_dev dev;
255
256 struct list_head dev_list;
257 struct list_head ctx_list;
258 spinlock_t ctx_lock;
259
260 struct mlx4_fw fw;
261 struct mlx4_cmd cmd;
262
263 struct mlx4_bitmap pd_bitmap;
264 struct mlx4_uar_table uar_table;
265 struct mlx4_mr_table mr_table;
266 struct mlx4_cq_table cq_table;
267 struct mlx4_eq_table eq_table;
268 struct mlx4_srq_table srq_table;
269 struct mlx4_qp_table qp_table;
270 struct mlx4_mcg_table mcg_table;
271
272 struct mlx4_catas_err catas_err;
273
274 void __iomem *clr_base;
275
276 struct mlx4_uar driver_uar;
277 void __iomem *kar;
278 MLX4_DECLARE_DOORBELL_LOCK(doorbell_lock)
279
280 u32 rev_id;
281 char board_id[MLX4_BOARD_ID_LEN];
282};
283
284static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
285{
286 return container_of(dev, struct mlx4_priv, dev);
287}
288
289u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
290void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
291int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved);
292void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
293
294int mlx4_reset(struct mlx4_dev *dev);
295
296int mlx4_init_pd_table(struct mlx4_dev *dev);
297int mlx4_init_uar_table(struct mlx4_dev *dev);
298int mlx4_init_mr_table(struct mlx4_dev *dev);
299int mlx4_init_eq_table(struct mlx4_dev *dev);
300int mlx4_init_cq_table(struct mlx4_dev *dev);
301int mlx4_init_qp_table(struct mlx4_dev *dev);
302int mlx4_init_srq_table(struct mlx4_dev *dev);
303int mlx4_init_mcg_table(struct mlx4_dev *dev);
304
305void mlx4_cleanup_pd_table(struct mlx4_dev *dev);
306void mlx4_cleanup_uar_table(struct mlx4_dev *dev);
307void mlx4_cleanup_mr_table(struct mlx4_dev *dev);
308void mlx4_cleanup_eq_table(struct mlx4_dev *dev);
309void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
310void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
311void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
312void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
313
314void mlx4_map_catas_buf(struct mlx4_dev *dev);
315void mlx4_unmap_catas_buf(struct mlx4_dev *dev);
316
317int mlx4_register_device(struct mlx4_dev *dev);
318void mlx4_unregister_device(struct mlx4_dev *dev);
319void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
320 int subtype, int port);
321
322struct mlx4_dev_cap;
323struct mlx4_init_hca_param;
324
325u64 mlx4_make_profile(struct mlx4_dev *dev,
326 struct mlx4_profile *request,
327 struct mlx4_dev_cap *dev_cap,
328 struct mlx4_init_hca_param *init_hca);
329
330int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
331void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
332
333int mlx4_cmd_init(struct mlx4_dev *dev);
334void mlx4_cmd_cleanup(struct mlx4_dev *dev);
335void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
336int mlx4_cmd_use_events(struct mlx4_dev *dev);
337void mlx4_cmd_use_polling(struct mlx4_dev *dev);
338
339void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
340void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
341
342void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
343
344void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
345
346void mlx4_handle_catas_err(struct mlx4_dev *dev);
347
348#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
new file mode 100644
index 000000000000..b33864dab179
--- /dev/null
+++ b/drivers/net/mlx4/mr.c
@@ -0,0 +1,479 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/errno.h>
37
38#include <linux/mlx4/cmd.h>
39
40#include "mlx4.h"
41#include "icm.h"
42
43/*
44 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
45 */
46struct mlx4_mpt_entry {
47 __be32 flags;
48 __be32 qpn;
49 __be32 key;
50 __be32 pd;
51 __be64 start;
52 __be64 length;
53 __be32 lkey;
54 __be32 win_cnt;
55 u8 reserved1[3];
56 u8 mtt_rep;
57 __be64 mtt_seg;
58 __be32 mtt_sz;
59 __be32 entity_size;
60 __be32 first_byte_offset;
61} __attribute__((packed));
62
63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
64#define MLX4_MPT_FLAG_MIO (1 << 17)
65#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
66#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
67#define MLX4_MPT_FLAG_REGION (1 << 8)
68
69#define MLX4_MTT_FLAG_PRESENT 1
70
71static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
72{
73 int o;
74 int m;
75 u32 seg;
76
77 spin_lock(&buddy->lock);
78
79 for (o = order; o <= buddy->max_order; ++o) {
80 m = 1 << (buddy->max_order - o);
81 seg = find_first_bit(buddy->bits[o], m);
82 if (seg < m)
83 goto found;
84 }
85
86 spin_unlock(&buddy->lock);
87 return -1;
88
89 found:
90 clear_bit(seg, buddy->bits[o]);
91
92 while (o > order) {
93 --o;
94 seg <<= 1;
95 set_bit(seg ^ 1, buddy->bits[o]);
96 }
97
98 spin_unlock(&buddy->lock);
99
100 seg <<= order;
101
102 return seg;
103}
104
105static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
106{
107 seg >>= order;
108
109 spin_lock(&buddy->lock);
110
111 while (test_bit(seg ^ 1, buddy->bits[order])) {
112 clear_bit(seg ^ 1, buddy->bits[order]);
113 seg >>= 1;
114 ++order;
115 }
116
117 set_bit(seg, buddy->bits[order]);
118
119 spin_unlock(&buddy->lock);
120}
121
122static int __devinit mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
123{
124 int i, s;
125
126 buddy->max_order = max_order;
127 spin_lock_init(&buddy->lock);
128
129 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
130 GFP_KERNEL);
131 if (!buddy->bits)
132 goto err_out;
133
134 for (i = 0; i <= buddy->max_order; ++i) {
135 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
136 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
137 if (!buddy->bits[i])
138 goto err_out_free;
139 bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
140 }
141
142 set_bit(0, buddy->bits[buddy->max_order]);
143
144 return 0;
145
146err_out_free:
147 for (i = 0; i <= buddy->max_order; ++i)
148 kfree(buddy->bits[i]);
149
150 kfree(buddy->bits);
151
152err_out:
153 return -ENOMEM;
154}
155
156static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
157{
158 int i;
159
160 for (i = 0; i <= buddy->max_order; ++i)
161 kfree(buddy->bits[i]);
162
163 kfree(buddy->bits);
164}
165
166static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
167{
168 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
169 u32 seg;
170
171 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
172 if (seg == -1)
173 return -1;
174
175 if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
176 seg + (1 << order) - 1)) {
177 mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
178 return -1;
179 }
180
181 return seg;
182}
183
184int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
185 struct mlx4_mtt *mtt)
186{
187 int i;
188
189 if (!npages) {
190 mtt->order = -1;
191 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
192 return 0;
193 } else
194 mtt->page_shift = page_shift;
195
196 for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1)
197 ++mtt->order;
198
199 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
200 if (mtt->first_seg == -1)
201 return -ENOMEM;
202
203 return 0;
204}
205EXPORT_SYMBOL_GPL(mlx4_mtt_init);
206
207void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
208{
209 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
210
211 if (mtt->order < 0)
212 return;
213
214 mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
215 mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
216 mtt->first_seg + (1 << mtt->order) - 1);
217}
218EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
219
220u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
221{
222 return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
223}
224EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
225
226static u32 hw_index_to_key(u32 ind)
227{
228 return (ind >> 24) | (ind << 8);
229}
230
231static u32 key_to_hw_index(u32 key)
232{
233 return (key << 24) | (key >> 8);
234}
235
236static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
237 int mpt_index)
238{
239 return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
240 MLX4_CMD_TIME_CLASS_B);
241}
242
243static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
244 int mpt_index)
245{
246 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
247 !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
248}
249
250int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
251 int npages, int page_shift, struct mlx4_mr *mr)
252{
253 struct mlx4_priv *priv = mlx4_priv(dev);
254 u32 index;
255 int err;
256
257 index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
258 if (index == -1) {
259 err = -ENOMEM;
260 goto err;
261 }
262
263 mr->iova = iova;
264 mr->size = size;
265 mr->pd = pd;
266 mr->access = access;
267 mr->enabled = 0;
268 mr->key = hw_index_to_key(index);
269
270 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
271 if (err)
272 goto err_index;
273
274 return 0;
275
276err_index:
277 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
278
279err:
280 kfree(mr);
281 return err;
282}
283EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
284
285void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
286{
287 struct mlx4_priv *priv = mlx4_priv(dev);
288 int err;
289
290 if (mr->enabled) {
291 err = mlx4_HW2SW_MPT(dev, NULL,
292 key_to_hw_index(mr->key) &
293 (dev->caps.num_mpts - 1));
294 if (err)
295 mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
296 }
297
298 mlx4_mtt_cleanup(dev, &mr->mtt);
299 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
300}
301EXPORT_SYMBOL_GPL(mlx4_mr_free);
302
303int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
304{
305 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
306 struct mlx4_cmd_mailbox *mailbox;
307 struct mlx4_mpt_entry *mpt_entry;
308 int err;
309
310 err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
311 if (err)
312 return err;
313
314 mailbox = mlx4_alloc_cmd_mailbox(dev);
315 if (IS_ERR(mailbox)) {
316 err = PTR_ERR(mailbox);
317 goto err_table;
318 }
319 mpt_entry = mailbox->buf;
320
321 memset(mpt_entry, 0, sizeof *mpt_entry);
322
323 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS |
324 MLX4_MPT_FLAG_MIO |
325 MLX4_MPT_FLAG_REGION |
326 mr->access);
327 if (mr->mtt.order < 0)
328 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
329
330 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
331 mpt_entry->pd = cpu_to_be32(mr->pd);
332 mpt_entry->start = cpu_to_be64(mr->iova);
333 mpt_entry->length = cpu_to_be64(mr->size);
334 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
335 mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
336
337 err = mlx4_SW2HW_MPT(dev, mailbox,
338 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
339 if (err) {
340 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
341 goto err_cmd;
342 }
343
344 mr->enabled = 1;
345
346 mlx4_free_cmd_mailbox(dev, mailbox);
347
348 return 0;
349
350err_cmd:
351 mlx4_free_cmd_mailbox(dev, mailbox);
352
353err_table:
354 mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
355 return err;
356}
357EXPORT_SYMBOL_GPL(mlx4_mr_enable);
358
359static int mlx4_WRITE_MTT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
360 int num_mtt)
361{
362 return mlx4_cmd(dev, mailbox->dma, num_mtt, 0, MLX4_CMD_WRITE_MTT,
363 MLX4_CMD_TIME_CLASS_B);
364}
365
366int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
367 int start_index, int npages, u64 *page_list)
368{
369 struct mlx4_cmd_mailbox *mailbox;
370 __be64 *mtt_entry;
371 int i;
372 int err = 0;
373
374 if (mtt->order < 0)
375 return -EINVAL;
376
377 mailbox = mlx4_alloc_cmd_mailbox(dev);
378 if (IS_ERR(mailbox))
379 return PTR_ERR(mailbox);
380
381 mtt_entry = mailbox->buf;
382
383 while (npages > 0) {
384 mtt_entry[0] = cpu_to_be64(mlx4_mtt_addr(dev, mtt) + start_index * 8);
385 mtt_entry[1] = 0;
386
387 for (i = 0; i < npages && i < MLX4_MAILBOX_SIZE / 8 - 2; ++i)
388 mtt_entry[i + 2] = cpu_to_be64(page_list[i] |
389 MLX4_MTT_FLAG_PRESENT);
390
391 /*
392 * If we have an odd number of entries to write, add
393 * one more dummy entry for firmware efficiency.
394 */
395 if (i & 1)
396 mtt_entry[i + 2] = 0;
397
398 err = mlx4_WRITE_MTT(dev, mailbox, (i + 1) & ~1);
399 if (err)
400 goto out;
401
402 npages -= i;
403 start_index += i;
404 page_list += i;
405 }
406
407out:
408 mlx4_free_cmd_mailbox(dev, mailbox);
409
410 return err;
411}
412EXPORT_SYMBOL_GPL(mlx4_write_mtt);
413
414int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
415 struct mlx4_buf *buf)
416{
417 u64 *page_list;
418 int err;
419 int i;
420
421 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
422 if (!page_list)
423 return -ENOMEM;
424
425 for (i = 0; i < buf->npages; ++i)
426 if (buf->nbufs == 1)
427 page_list[i] = buf->u.direct.map + (i << buf->page_shift);
428 else
429 page_list[i] = buf->u.page_list[i].map;
430
431 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
432
433 kfree(page_list);
434 return err;
435}
436EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
437
438int __devinit mlx4_init_mr_table(struct mlx4_dev *dev)
439{
440 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
441 int err;
442
443 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
444 ~0, dev->caps.reserved_mrws);
445 if (err)
446 return err;
447
448 err = mlx4_buddy_init(&mr_table->mtt_buddy,
449 ilog2(dev->caps.num_mtt_segs));
450 if (err)
451 goto err_buddy;
452
453 if (dev->caps.reserved_mtts) {
454 if (mlx4_alloc_mtt_range(dev, ilog2(dev->caps.reserved_mtts)) == -1) {
455 mlx4_warn(dev, "MTT table of order %d is too small.\n",
456 mr_table->mtt_buddy.max_order);
457 err = -ENOMEM;
458 goto err_reserve_mtts;
459 }
460 }
461
462 return 0;
463
464err_reserve_mtts:
465 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
466
467err_buddy:
468 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
469
470 return err;
471}
472
473void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
474{
475 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
476
477 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
478 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
479}
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
new file mode 100644
index 000000000000..23dea1ee7750
--- /dev/null
+++ b/drivers/net/mlx4/pd.c
@@ -0,0 +1,102 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/init.h>
35#include <linux/errno.h>
36
37#include <asm/page.h>
38
39#include "mlx4.h"
40#include "icm.h"
41
42int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
43{
44 struct mlx4_priv *priv = mlx4_priv(dev);
45
46 *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
47 if (*pdn == -1)
48 return -ENOMEM;
49
50 return 0;
51}
52EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
53
54void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
55{
56 mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
57}
58EXPORT_SYMBOL_GPL(mlx4_pd_free);
59
60int __devinit mlx4_init_pd_table(struct mlx4_dev *dev)
61{
62 struct mlx4_priv *priv = mlx4_priv(dev);
63
64 return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
65 (1 << 24) - 1, dev->caps.reserved_pds);
66}
67
68void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
69{
70 mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
71}
72
73
74int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
75{
76 uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
77 if (uar->index == -1)
78 return -ENOMEM;
79
80 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
81
82 return 0;
83}
84EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
85
86void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
87{
88 mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
89}
90EXPORT_SYMBOL_GPL(mlx4_uar_free);
91
92int mlx4_init_uar_table(struct mlx4_dev *dev)
93{
94 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
95 dev->caps.num_uars, dev->caps.num_uars - 1,
96 max(128, dev->caps.reserved_uars));
97}
98
99void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
100{
101 mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
102}
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
new file mode 100644
index 000000000000..9ca42b213d54
--- /dev/null
+++ b/drivers/net/mlx4/profile.c
@@ -0,0 +1,238 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36
37#include "mlx4.h"
38#include "fw.h"
39
40enum {
41 MLX4_RES_QP,
42 MLX4_RES_RDMARC,
43 MLX4_RES_ALTC,
44 MLX4_RES_AUXC,
45 MLX4_RES_SRQ,
46 MLX4_RES_CQ,
47 MLX4_RES_EQ,
48 MLX4_RES_DMPT,
49 MLX4_RES_CMPT,
50 MLX4_RES_MTT,
51 MLX4_RES_MCG,
52 MLX4_RES_NUM
53};
54
55static const char *res_name[] = {
56 [MLX4_RES_QP] = "QP",
57 [MLX4_RES_RDMARC] = "RDMARC",
58 [MLX4_RES_ALTC] = "ALTC",
59 [MLX4_RES_AUXC] = "AUXC",
60 [MLX4_RES_SRQ] = "SRQ",
61 [MLX4_RES_CQ] = "CQ",
62 [MLX4_RES_EQ] = "EQ",
63 [MLX4_RES_DMPT] = "DMPT",
64 [MLX4_RES_CMPT] = "CMPT",
65 [MLX4_RES_MTT] = "MTT",
66 [MLX4_RES_MCG] = "MCG",
67};
68
69u64 mlx4_make_profile(struct mlx4_dev *dev,
70 struct mlx4_profile *request,
71 struct mlx4_dev_cap *dev_cap,
72 struct mlx4_init_hca_param *init_hca)
73{
74 struct mlx4_priv *priv = mlx4_priv(dev);
75 struct mlx4_resource {
76 u64 size;
77 u64 start;
78 int type;
79 int num;
80 int log_num;
81 };
82
83 u64 total_size = 0;
84 struct mlx4_resource *profile;
85 struct mlx4_resource tmp;
86 int i, j;
87
88 profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL);
89 if (!profile)
90 return -ENOMEM;
91
92 profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
93 profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
94 profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz;
95 profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz;
96 profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz;
97 profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz;
98 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
99 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
100 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
101 profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
102 profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
103
104 profile[MLX4_RES_QP].num = request->num_qp;
105 profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
106 profile[MLX4_RES_ALTC].num = request->num_qp;
107 profile[MLX4_RES_AUXC].num = request->num_qp;
108 profile[MLX4_RES_SRQ].num = request->num_srq;
109 profile[MLX4_RES_CQ].num = request->num_cq;
110 profile[MLX4_RES_EQ].num = MLX4_NUM_EQ + dev_cap->reserved_eqs;
111 profile[MLX4_RES_DMPT].num = request->num_mpt;
112 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
113 profile[MLX4_RES_MTT].num = request->num_mtt;
114 profile[MLX4_RES_MCG].num = request->num_mcg;
115
116 for (i = 0; i < MLX4_RES_NUM; ++i) {
117 profile[i].type = i;
118 profile[i].num = roundup_pow_of_two(profile[i].num);
119 profile[i].log_num = ilog2(profile[i].num);
120 profile[i].size *= profile[i].num;
121 profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
122 }
123
124 /*
125 * Sort the resources in decreasing order of size. Since they
126 * all have sizes that are powers of 2, we'll be able to keep
127 * resources aligned to their size and pack them without gaps
128 * using the sorted order.
129 */
130 for (i = MLX4_RES_NUM; i > 0; --i)
131 for (j = 1; j < i; ++j) {
132 if (profile[j].size > profile[j - 1].size) {
133 tmp = profile[j];
134 profile[j] = profile[j - 1];
135 profile[j - 1] = tmp;
136 }
137 }
138
139 for (i = 0; i < MLX4_RES_NUM; ++i) {
140 if (profile[i].size) {
141 profile[i].start = total_size;
142 total_size += profile[i].size;
143 }
144
145 if (total_size > dev_cap->max_icm_sz) {
146 mlx4_err(dev, "Profile requires 0x%llx bytes; "
147 "won't fit in 0x%llx bytes of context memory.\n",
148 (unsigned long long) total_size,
149 (unsigned long long) dev_cap->max_icm_sz);
150 kfree(profile);
151 return -ENOMEM;
152 }
153
154 if (profile[i].size)
155 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
156 "size 0x%10llx\n",
157 i, res_name[profile[i].type], profile[i].log_num,
158 (unsigned long long) profile[i].start,
159 (unsigned long long) profile[i].size);
160 }
161
162 mlx4_dbg(dev, "HCA context memory: reserving %d KB\n",
163 (int) (total_size >> 10));
164
165 for (i = 0; i < MLX4_RES_NUM; ++i) {
166 switch (profile[i].type) {
167 case MLX4_RES_QP:
168 dev->caps.num_qps = profile[i].num;
169 init_hca->qpc_base = profile[i].start;
170 init_hca->log_num_qps = profile[i].log_num;
171 break;
172 case MLX4_RES_RDMARC:
173 for (priv->qp_table.rdmarc_shift = 0;
174 request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num;
175 ++priv->qp_table.rdmarc_shift)
176 ; /* nothing */
177 dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift;
178 priv->qp_table.rdmarc_base = (u32) profile[i].start;
179 init_hca->rdmarc_base = profile[i].start;
180 init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift;
181 break;
182 case MLX4_RES_ALTC:
183 init_hca->altc_base = profile[i].start;
184 break;
185 case MLX4_RES_AUXC:
186 init_hca->auxc_base = profile[i].start;
187 break;
188 case MLX4_RES_SRQ:
189 dev->caps.num_srqs = profile[i].num;
190 init_hca->srqc_base = profile[i].start;
191 init_hca->log_num_srqs = profile[i].log_num;
192 break;
193 case MLX4_RES_CQ:
194 dev->caps.num_cqs = profile[i].num;
195 init_hca->cqc_base = profile[i].start;
196 init_hca->log_num_cqs = profile[i].log_num;
197 break;
198 case MLX4_RES_EQ:
199 dev->caps.num_eqs = profile[i].num;
200 init_hca->eqc_base = profile[i].start;
201 init_hca->log_num_eqs = profile[i].log_num;
202 break;
203 case MLX4_RES_DMPT:
204 dev->caps.num_mpts = profile[i].num;
205 priv->mr_table.mpt_base = profile[i].start;
206 init_hca->dmpt_base = profile[i].start;
207 init_hca->log_mpt_sz = profile[i].log_num;
208 break;
209 case MLX4_RES_CMPT:
210 init_hca->cmpt_base = profile[i].start;
211 break;
212 case MLX4_RES_MTT:
213 dev->caps.num_mtt_segs = profile[i].num;
214 priv->mr_table.mtt_base = profile[i].start;
215 init_hca->mtt_base = profile[i].start;
216 break;
217 case MLX4_RES_MCG:
218 dev->caps.num_mgms = profile[i].num >> 1;
219 dev->caps.num_amgms = profile[i].num >> 1;
220 init_hca->mc_base = profile[i].start;
221 init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
222 init_hca->log_mc_table_sz = profile[i].log_num;
223 init_hca->log_mc_hash_sz = profile[i].log_num - 1;
224 break;
225 default:
226 break;
227 }
228 }
229
230 /*
231 * PDs don't take any HCA memory, but we assign them as part
232 * of the HCA profile anyway.
233 */
234 dev->caps.num_pds = MLX4_NUM_PDS;
235
236 kfree(profile);
237 return total_size;
238}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
new file mode 100644
index 000000000000..7f8b7d55b6e1
--- /dev/null
+++ b/drivers/net/mlx4/qp.c
@@ -0,0 +1,280 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/init.h>
37
38#include <linux/mlx4/cmd.h>
39#include <linux/mlx4/qp.h>
40
41#include "mlx4.h"
42#include "icm.h"
43
44void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
45{
46 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
47 struct mlx4_qp *qp;
48
49 spin_lock(&qp_table->lock);
50
51 qp = __mlx4_qp_lookup(dev, qpn);
52 if (qp)
53 atomic_inc(&qp->refcount);
54
55 spin_unlock(&qp_table->lock);
56
57 if (!qp) {
58 mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
59 return;
60 }
61
62 qp->event(qp, event_type);
63
64 if (atomic_dec_and_test(&qp->refcount))
65 complete(&qp->free);
66}
67
68int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
69 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
70 struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
71 int sqd_event, struct mlx4_qp *qp)
72{
73 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
74 [MLX4_QP_STATE_RST] = {
75 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
76 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
77 [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
78 },
79 [MLX4_QP_STATE_INIT] = {
80 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
81 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
82 [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
83 [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
84 },
85 [MLX4_QP_STATE_RTR] = {
86 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
87 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
88 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
89 },
90 [MLX4_QP_STATE_RTS] = {
91 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
92 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
93 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
94 [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
95 },
96 [MLX4_QP_STATE_SQD] = {
97 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
98 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
99 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
100 [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
101 },
102 [MLX4_QP_STATE_SQER] = {
103 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
104 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
105 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
106 },
107 [MLX4_QP_STATE_ERR] = {
108 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
109 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
110 }
111 };
112
113 struct mlx4_cmd_mailbox *mailbox;
114 int ret = 0;
115
116 if (cur_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
117 new_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
118 !op[cur_state][new_state])
119 return -EINVAL;
120
121 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
122 return mlx4_cmd(dev, 0, qp->qpn, 2,
123 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
124
125 mailbox = mlx4_alloc_cmd_mailbox(dev);
126 if (IS_ERR(mailbox))
127 return PTR_ERR(mailbox);
128
129 if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
130 u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
131 context->mtt_base_addr_h = mtt_addr >> 32;
132 context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
133 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
134 }
135
136 *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
137 memcpy(mailbox->buf + 8, context, sizeof *context);
138
139 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
140 cpu_to_be32(qp->qpn);
141
142 ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
143 new_state == MLX4_QP_STATE_RST ? 2 : 0,
144 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
145
146 mlx4_free_cmd_mailbox(dev, mailbox);
147 return ret;
148}
149EXPORT_SYMBOL_GPL(mlx4_qp_modify);
150
151int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
152{
153 struct mlx4_priv *priv = mlx4_priv(dev);
154 struct mlx4_qp_table *qp_table = &priv->qp_table;
155 int err;
156
157 if (sqpn)
158 qp->qpn = sqpn;
159 else {
160 qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap);
161 if (qp->qpn == -1)
162 return -ENOMEM;
163 }
164
165 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
166 if (err)
167 goto err_out;
168
169 err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
170 if (err)
171 goto err_put_qp;
172
173 err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
174 if (err)
175 goto err_put_auxc;
176
177 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
178 if (err)
179 goto err_put_altc;
180
181 err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
182 if (err)
183 goto err_put_rdmarc;
184
185 spin_lock_irq(&qp_table->lock);
186 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
187 spin_unlock_irq(&qp_table->lock);
188 if (err)
189 goto err_put_cmpt;
190
191 atomic_set(&qp->refcount, 1);
192 init_completion(&qp->free);
193
194 return 0;
195
196err_put_cmpt:
197 mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
198
199err_put_rdmarc:
200 mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
201
202err_put_altc:
203 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
204
205err_put_auxc:
206 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
207
208err_put_qp:
209 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
210
211err_out:
212 if (!sqpn)
213 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
214
215 return err;
216}
217EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
218
219void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
220{
221 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
222 unsigned long flags;
223
224 spin_lock_irqsave(&qp_table->lock, flags);
225 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
226 spin_unlock_irqrestore(&qp_table->lock, flags);
227}
228EXPORT_SYMBOL_GPL(mlx4_qp_remove);
229
230void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
231{
232 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
233
234 if (atomic_dec_and_test(&qp->refcount))
235 complete(&qp->free);
236 wait_for_completion(&qp->free);
237
238 mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
239 mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
240 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
241 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
242 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
243
244 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
245}
246EXPORT_SYMBOL_GPL(mlx4_qp_free);
247
248static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
249{
250 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
251 MLX4_CMD_TIME_CLASS_B);
252}
253
254int __devinit mlx4_init_qp_table(struct mlx4_dev *dev)
255{
256 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
257 int err;
258
259 spin_lock_init(&qp_table->lock);
260 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
261
262 /*
263 * We reserve 2 extra QPs per port for the special QPs. The
264 * block of special QPs must be aligned to a multiple of 8, so
265 * round up.
266 */
267 dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8);
268 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
269 (1 << 24) - 1, dev->caps.sqp_start + 8);
270 if (err)
271 return err;
272
273 return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
274}
275
276void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
277{
278 mlx4_CONF_SPECIAL_QP(dev, 0);
279 mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
280}
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
new file mode 100644
index 000000000000..51eef8492e93
--- /dev/null
+++ b/drivers/net/mlx4/reset.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/init.h>
34#include <linux/errno.h>
35#include <linux/pci.h>
36#include <linux/delay.h>
37#include <linux/slab.h>
38
39#include "mlx4.h"
40
41int mlx4_reset(struct mlx4_dev *dev)
42{
43 void __iomem *reset;
44 u32 *hca_header = NULL;
45 int pcie_cap;
46 u16 devctl;
47 u16 linkctl;
48 u16 vendor;
49 unsigned long end;
50 u32 sem;
51 int i;
52 int err = 0;
53
54#define MLX4_RESET_BASE 0xf0000
55#define MLX4_RESET_SIZE 0x400
56#define MLX4_SEM_OFFSET 0x3fc
57#define MLX4_RESET_OFFSET 0x10
58#define MLX4_RESET_VALUE swab32(1)
59
60#define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ)
61#define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ)
62
63 /*
64 * Reset the chip. This is somewhat ugly because we have to
65 * save off the PCI header before reset and then restore it
66 * after the chip reboots. We skip config space offsets 22
67 * and 23 since those have a special meaning.
68 */
69
70 /* Do we need to save off the full 4K PCI Express header?? */
71 hca_header = kmalloc(256, GFP_KERNEL);
72 if (!hca_header) {
73 err = -ENOMEM;
74 mlx4_err(dev, "Couldn't allocate memory to save HCA "
75 "PCI header, aborting.\n");
76 goto out;
77 }
78
79 pcie_cap = pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
80
81 for (i = 0; i < 64; ++i) {
82 if (i == 22 || i == 23)
83 continue;
84 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
85 err = -ENODEV;
86 mlx4_err(dev, "Couldn't save HCA "
87 "PCI header, aborting.\n");
88 goto out;
89 }
90 }
91
92 reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
93 MLX4_RESET_SIZE);
94 if (!reset) {
95 err = -ENOMEM;
96 mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
97 goto out;
98 }
99
100 /* grab HW semaphore to lock out flash updates */
101 end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES;
102 do {
103 sem = readl(reset + MLX4_SEM_OFFSET);
104 if (!sem)
105 break;
106
107 msleep(1);
108 } while (time_before(jiffies, end));
109
110 if (sem) {
111 mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n");
112 err = -EAGAIN;
113 iounmap(reset);
114 goto out;
115 }
116
117 /* actually hit reset */
118 writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
119 iounmap(reset);
120
121 end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
122 do {
123 if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
124 vendor != 0xffff)
125 break;
126
127 msleep(1);
128 } while (time_before(jiffies, end));
129
130 if (vendor == 0xffff) {
131 err = -ENODEV;
132 mlx4_err(dev, "PCI device did not come back after reset, "
133 "aborting.\n");
134 goto out;
135 }
136
137 /* Now restore the PCI headers */
138 if (pcie_cap) {
139 devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
140 if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL,
141 devctl)) {
142 err = -ENODEV;
143 mlx4_err(dev, "Couldn't restore HCA PCI Express "
144 "Device Control register, aborting.\n");
145 goto out;
146 }
147 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
148 if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL,
149 linkctl)) {
150 err = -ENODEV;
151 mlx4_err(dev, "Couldn't restore HCA PCI Express "
152 "Link control register, aborting.\n");
153 goto out;
154 }
155 }
156
157 for (i = 0; i < 16; ++i) {
158 if (i * 4 == PCI_COMMAND)
159 continue;
160
161 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
162 err = -ENODEV;
163 mlx4_err(dev, "Couldn't restore HCA reg %x, "
164 "aborting.\n", i);
165 goto out;
166 }
167 }
168
169 if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
170 hca_header[PCI_COMMAND / 4])) {
171 err = -ENODEV;
172 mlx4_err(dev, "Couldn't restore HCA COMMAND, "
173 "aborting.\n");
174 goto out;
175 }
176
177out:
178 kfree(hca_header);
179
180 return err;
181}
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
new file mode 100644
index 000000000000..2134f83aed87
--- /dev/null
+++ b/drivers/net/mlx4/srq.c
@@ -0,0 +1,227 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/init.h>
34
35#include <linux/mlx4/cmd.h>
36
37#include "mlx4.h"
38#include "icm.h"
39
40struct mlx4_srq_context {
41 __be32 state_logsize_srqn;
42 u8 logstride;
43 u8 reserved1[3];
44 u8 pg_offset;
45 u8 reserved2[3];
46 u32 reserved3;
47 u8 log_page_size;
48 u8 reserved4[2];
49 u8 mtt_base_addr_h;
50 __be32 mtt_base_addr_l;
51 __be32 pd;
52 __be16 limit_watermark;
53 __be16 wqe_cnt;
54 u16 reserved5;
55 __be16 wqe_counter;
56 u32 reserved6;
57 __be64 db_rec_addr;
58};
59
60void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
61{
62 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
63 struct mlx4_srq *srq;
64
65 spin_lock(&srq_table->lock);
66
67 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
68 if (srq)
69 atomic_inc(&srq->refcount);
70
71 spin_unlock(&srq_table->lock);
72
73 if (!srq) {
74 mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
75 return;
76 }
77
78 srq->event(srq, event_type);
79
80 if (atomic_dec_and_test(&srq->refcount))
81 complete(&srq->free);
82}
83
84static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
85 int srq_num)
86{
87 return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
88 MLX4_CMD_TIME_CLASS_A);
89}
90
91static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
92 int srq_num)
93{
94 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
95 mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
96 MLX4_CMD_TIME_CLASS_A);
97}
98
99static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
100{
101 return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
102 MLX4_CMD_TIME_CLASS_B);
103}
104
105int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
106 u64 db_rec, struct mlx4_srq *srq)
107{
108 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
109 struct mlx4_cmd_mailbox *mailbox;
110 struct mlx4_srq_context *srq_context;
111 u64 mtt_addr;
112 int err;
113
114 srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
115 if (srq->srqn == -1)
116 return -ENOMEM;
117
118 err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
119 if (err)
120 goto err_out;
121
122 err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
123 if (err)
124 goto err_put;
125
126 spin_lock_irq(&srq_table->lock);
127 err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
128 spin_unlock_irq(&srq_table->lock);
129 if (err)
130 goto err_cmpt_put;
131
132 mailbox = mlx4_alloc_cmd_mailbox(dev);
133 if (IS_ERR(mailbox)) {
134 err = PTR_ERR(mailbox);
135 goto err_radix;
136 }
137
138 srq_context = mailbox->buf;
139 memset(srq_context, 0, sizeof *srq_context);
140
141 srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
142 srq->srqn);
143 srq_context->logstride = srq->wqe_shift - 4;
144 srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
145
146 mtt_addr = mlx4_mtt_addr(dev, mtt);
147 srq_context->mtt_base_addr_h = mtt_addr >> 32;
148 srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
149 srq_context->pd = cpu_to_be32(pdn);
150 srq_context->db_rec_addr = cpu_to_be64(db_rec);
151
152 err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
153 mlx4_free_cmd_mailbox(dev, mailbox);
154 if (err)
155 goto err_radix;
156
157 atomic_set(&srq->refcount, 1);
158 init_completion(&srq->free);
159
160 return 0;
161
162err_radix:
163 spin_lock_irq(&srq_table->lock);
164 radix_tree_delete(&srq_table->tree, srq->srqn);
165 spin_unlock_irq(&srq_table->lock);
166
167err_cmpt_put:
168 mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
169
170err_put:
171 mlx4_table_put(dev, &srq_table->table, srq->srqn);
172
173err_out:
174 mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
175
176 return err;
177}
178EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
179
180void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
181{
182 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
183 int err;
184
185 err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
186 if (err)
187 mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
188
189 spin_lock_irq(&srq_table->lock);
190 radix_tree_delete(&srq_table->tree, srq->srqn);
191 spin_unlock_irq(&srq_table->lock);
192
193 if (atomic_dec_and_test(&srq->refcount))
194 complete(&srq->free);
195 wait_for_completion(&srq->free);
196
197 mlx4_table_put(dev, &srq_table->table, srq->srqn);
198 mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
199}
200EXPORT_SYMBOL_GPL(mlx4_srq_free);
201
202int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
203{
204 return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
205}
206EXPORT_SYMBOL_GPL(mlx4_srq_arm);
207
208int __devinit mlx4_init_srq_table(struct mlx4_dev *dev)
209{
210 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
211 int err;
212
213 spin_lock_init(&srq_table->lock);
214 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
215
216 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
217 dev->caps.num_srqs - 1, dev->caps.reserved_srqs);
218 if (err)
219 return err;
220
221 return 0;
222}
223
224void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
225{
226 mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
227}
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 223e0e6264ba..4cf0d3fcb519 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -131,7 +131,6 @@ static const char version[] __devinitdata =
131 KERN_INFO DRV_NAME " dp8381x driver, version " 131 KERN_INFO DRV_NAME " dp8381x driver, version "
132 DRV_VERSION ", " DRV_RELDATE "\n" 132 DRV_VERSION ", " DRV_RELDATE "\n"
133 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n" 133 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
134 KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
135 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; 134 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
136 135
137MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 136MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 589785d1e762..995c0a5d4066 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -63,8 +63,7 @@ static int options[MAX_UNITS];
63 63
64/* These identify the driver base version and may not be removed. */ 64/* These identify the driver base version and may not be removed. */
65static char version[] __devinitdata = 65static char version[] __devinitdata =
66KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n" 66KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n";
67KERN_INFO " http://www.scyld.com/network/ne2k-pci.html\n";
68 67
69#if defined(__powerpc__) 68#if defined(__powerpc__)
70#define inl_le(addr) le32_to_cpu(inl(addr)) 69#define inl_le(addr) le32_to_cpu(inl(addr))
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 1060154ae750..4ecb8ca5a992 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -189,16 +189,20 @@ static void ibmtr_detach(struct pcmcia_device *link)
189{ 189{
190 struct ibmtr_dev_t *info = link->priv; 190 struct ibmtr_dev_t *info = link->priv;
191 struct net_device *dev = info->dev; 191 struct net_device *dev = info->dev;
192 struct tok_info *ti = netdev_priv(dev);
192 193
193 DEBUG(0, "ibmtr_detach(0x%p)\n", link); 194 DEBUG(0, "ibmtr_detach(0x%p)\n", link);
195
196 /*
197 * When the card removal interrupt hits tok_interrupt(),
198 * bail out early, so we don't crash the machine
199 */
200 ti->sram_phys |= 1;
194 201
195 if (link->dev_node) 202 if (link->dev_node)
196 unregister_netdev(dev); 203 unregister_netdev(dev);
197 204
198 { 205 del_timer_sync(&(ti->tr_timer));
199 struct tok_info *ti = netdev_priv(dev);
200 del_timer_sync(&(ti->tr_timer));
201 }
202 206
203 ibmtr_release(link); 207 ibmtr_release(link);
204 208
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index eed433d6056a..f71dab347667 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -662,10 +662,10 @@ int phy_stop_interrupts(struct phy_device *phydev)
662 phy_error(phydev); 662 phy_error(phydev);
663 663
664 /* 664 /*
665 * Finish any pending work; we might have been scheduled 665 * Finish any pending work; we might have been scheduled to be called
666 * to be called from keventd ourselves, though. 666 * from keventd ourselves, but cancel_work_sync() handles that.
667 */ 667 */
668 run_scheduled_work(&phydev->phy_queue); 668 cancel_work_sync(&phydev->phy_queue);
669 669
670 free_irq(phydev->irq, phydev); 670 free_irq(phydev->irq, phydev);
671 671
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b07da1054add..e0489578945d 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3594,7 +3594,9 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3594 skge->duplex = -1; 3594 skge->duplex = -1;
3595 skge->speed = -1; 3595 skge->speed = -1;
3596 skge->advertising = skge_supported_modes(hw); 3596 skge->advertising = skge_supported_modes(hw);
3597 skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0; 3597
3598 if (pci_wake_enabled(hw->pdev))
3599 skge->wol = wol_supported(hw) & WAKE_MAGIC;
3598 3600
3599 hw->dev[port] = dev; 3601 hw->dev[port] = dev;
3600 3602
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index f51ba31970aa..e1f912d04043 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -110,8 +110,7 @@ static char *media[MAX_UNITS];
110 110
111/* These identify the driver base version and may not be removed. */ 111/* These identify the driver base version and may not be removed. */
112static char version[] = 112static char version[] =
113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" 113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
114KERN_INFO " http://www.scyld.com/network/sundance.html\n";
115 114
116MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 115MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117MODULE_DESCRIPTION("Sundance Alta Ethernet driver"); 116MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e5e901ecd808..923b9c725cc3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3716,10 +3716,8 @@ static void tg3_reset_task(struct work_struct *work)
3716 unsigned int restart_timer; 3716 unsigned int restart_timer;
3717 3717
3718 tg3_full_lock(tp, 0); 3718 tg3_full_lock(tp, 0);
3719 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3720 3719
3721 if (!netif_running(tp->dev)) { 3720 if (!netif_running(tp->dev)) {
3722 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3723 tg3_full_unlock(tp); 3721 tg3_full_unlock(tp);
3724 return; 3722 return;
3725 } 3723 }
@@ -3750,8 +3748,6 @@ static void tg3_reset_task(struct work_struct *work)
3750 mod_timer(&tp->timer, jiffies + 1); 3748 mod_timer(&tp->timer, jiffies + 1);
3751 3749
3752out: 3750out:
3753 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3754
3755 tg3_full_unlock(tp); 3751 tg3_full_unlock(tp);
3756} 3752}
3757 3753
@@ -7390,12 +7386,7 @@ static int tg3_close(struct net_device *dev)
7390{ 7386{
7391 struct tg3 *tp = netdev_priv(dev); 7387 struct tg3 *tp = netdev_priv(dev);
7392 7388
7393 /* Calling flush_scheduled_work() may deadlock because 7389 cancel_work_sync(&tp->reset_task);
7394 * linkwatch_event() may be on the workqueue and it will try to get
7395 * the rtnl_lock which we are holding.
7396 */
7397 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7398 msleep(1);
7399 7390
7400 netif_stop_queue(dev); 7391 netif_stop_queue(dev);
7401 7392
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4d334cf5a243..bd9f4f428e5b 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2228,7 +2228,7 @@ struct tg3 {
2228#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 2228#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
2229#define TG3_FLAG_10_100_ONLY 0x01000000 2229#define TG3_FLAG_10_100_ONLY 0x01000000
2230#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 2230#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
2231#define TG3_FLAG_IN_RESET_TASK 0x04000000 2231
2232#define TG3_FLAG_40BIT_DMA_BUG 0x08000000 2232#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
2233#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 2233#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
2234#define TG3_FLAG_SUPPORT_MSI 0x20000000 2234#define TG3_FLAG_SUPPORT_MSI 0x20000000
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 9b08afbd1f65..ea896777bcaf 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -269,7 +269,7 @@ done:
269 This would turn on IM for devices that is not contributing 269 This would turn on IM for devices that is not contributing
270 to backlog congestion with unnecessary latency. 270 to backlog congestion with unnecessary latency.
271 271
272 We monitor the the device RX-ring and have: 272 We monitor the device RX-ring and have:
273 273
274 HW Interrupt Mitigation either ON or OFF. 274 HW Interrupt Mitigation either ON or OFF.
275 275
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index fa440706fb4a..38f3b99716b8 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1021,7 +1021,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1021 np->tx_ring[entry].length |= DescEndRing; 1021 np->tx_ring[entry].length |= DescEndRing;
1022 1022
1023 /* Now acquire the irq spinlock. 1023 /* Now acquire the irq spinlock.
1024 * The difficult race is the the ordering between 1024 * The difficult race is the ordering between
1025 * increasing np->cur_tx and setting DescOwned: 1025 * increasing np->cur_tx and setting DescOwned:
1026 * - if np->cur_tx is increased first the interrupt 1026 * - if np->cur_tx is increased first the interrupt
1027 * handler could consider the packet as transmitted 1027 * handler could consider the packet as transmitted
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 985a1810ca59..2470b1ee33c0 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1043,7 +1043,7 @@ static int enable_promisc(struct xircom_private *card)
1043 1043
1044 1044
1045/* 1045/*
1046link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. 1046link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
1047 1047
1048Must be called in locked state with interrupts disabled 1048Must be called in locked state with interrupts disabled
1049*/ 1049*/
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index f2dd7763cd0b..f72573594121 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -639,7 +639,7 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
639 639
640 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd); 640 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
641 641
642 /* "I feel a presence... another warrior is on the the mesa." 642 /* "I feel a presence... another warrior is on the mesa."
643 */ 643 */
644 wmb(); 644 wmb();
645 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); 645 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
new file mode 100644
index 000000000000..3de564b23147
--- /dev/null
+++ b/drivers/net/usb/Kconfig
@@ -0,0 +1,338 @@
1#
2# USB Network devices configuration
3#
4comment "Networking support is needed for USB Network Adapter support"
5 depends on USB && !NET
6
7menu "USB Network Adapters"
8 depends on USB && NET
9
10config USB_CATC
11 tristate "USB CATC NetMate-based Ethernet device support (EXPERIMENTAL)"
12 depends on EXPERIMENTAL
13 select CRC32
14 ---help---
15 Say Y if you want to use one of the following 10Mbps USB Ethernet
16 device based on the EL1210A chip. Supported devices are:
17 Belkin F5U011
18 Belkin F5U111
19 CATC NetMate
20 CATC NetMate II
21 smartBridges smartNIC
22
23 This driver makes the adapter appear as a normal Ethernet interface,
24 typically on eth0, if it is the only ethernet device, or perhaps on
25 eth1, if you have a PCI or ISA ethernet card installed.
26
27 To compile this driver as a module, choose M here: the
28 module will be called catc.
29
30config USB_KAWETH
31 tristate "USB KLSI KL5USB101-based ethernet device support"
32 ---help---
33 Say Y here if you want to use one of the following 10Mbps only
34 USB Ethernet adapters based on the KLSI KL5KUSB101B chipset:
35 3Com 3C19250
36 ADS USB-10BT
37 ATEN USB Ethernet
38 ASANTE USB To Ethernet Adapter
39 AOX Endpoints USB Ethernet
40 Correga K.K.
41 D-Link DSB-650C and DU-E10
42 Entrega / Portgear E45
43 I-O DATA USB-ET/T
44 Jaton USB Ethernet Device Adapter
45 Kingston Technology USB Ethernet Adapter
46 Linksys USB10T
47 Mobility USB-Ethernet Adapter
48 NetGear EA-101
49 Peracom Enet and Enet2
50 Portsmith Express Ethernet Adapter
51 Shark Pocket Adapter
52 SMC 2202USB
53 Sony Vaio port extender
54
55 This driver is likely to work with most 10Mbps only USB Ethernet
56 adapters, including some "no brand" devices. It does NOT work on
57 SmartBridges smartNIC or on Belkin F5U111 devices - you should use
58 the CATC NetMate driver for those. If you are not sure which one
59 you need, select both, and the correct one should be selected for
60 you.
61
62 This driver makes the adapter appear as a normal Ethernet interface,
63 typically on eth0, if it is the only ethernet device, or perhaps on
64 eth1, if you have a PCI or ISA ethernet card installed.
65
66 To compile this driver as a module, choose M here: the
67 module will be called kaweth.
68
69config USB_PEGASUS
70 tristate "USB Pegasus/Pegasus-II based ethernet device support"
71 select MII
72 ---help---
73 Say Y here if you know you have Pegasus or Pegasus-II based adapter.
74 If in doubt then look at <file:drivers/usb/net/pegasus.h> for the
75 complete list of supported devices.
76
77 If your particular adapter is not in the list and you are _sure_ it
78 is Pegasus or Pegasus II based then send me
79 <petkan@users.sourceforge.net> vendor and device IDs.
80
81 To compile this driver as a module, choose M here: the
82 module will be called pegasus.
83
84config USB_RTL8150
85 tristate "USB RTL8150 based ethernet device support (EXPERIMENTAL)"
86 depends on EXPERIMENTAL
87 select MII
88 help
89 Say Y here if you have RTL8150 based usb-ethernet adapter.
90 Send me <petkan@users.sourceforge.net> any comments you may have.
91 You can also check for updates at <http://pegasus2.sourceforge.net/>.
92
93 To compile this driver as a module, choose M here: the
94 module will be called rtl8150.
95
96config USB_USBNET_MII
97 tristate
98 default n
99
100config USB_USBNET
101 tristate "Multi-purpose USB Networking Framework"
102 select MII if USB_USBNET_MII != n
103 ---help---
104 This driver supports several kinds of network links over USB,
105 with "minidrivers" built around a common network driver core
106 that supports deep queues for efficient transfers. (This gives
107 better performance with small packets and at high speeds).
108
109 The USB host runs "usbnet", and the other end of the link might be:
110
111 - Another USB host, when using USB "network" or "data transfer"
112 cables. These are often used to network laptops to PCs, like
113 "Laplink" parallel cables or some motherboards. These rely
114 on specialized chips from many suppliers.
115
116 - An intelligent USB gadget, perhaps embedding a Linux system.
117 These include PDAs running Linux (iPaq, Yopy, Zaurus, and
118 others), and devices that interoperate using the standard
119 CDC-Ethernet specification (including many cable modems).
120
121 - Network adapter hardware (like those for 10/100 Ethernet) which
122 uses this driver framework.
123
124 The link will appear with a name like "usb0", when the link is
125 a two-node link, or "eth0" for most CDC-Ethernet devices. Those
126 two-node links are most easily managed with Ethernet Bridging
127 (CONFIG_BRIDGE) instead of routing.
128
129 For more information see <http://www.linux-usb.org/usbnet/>.
130
131 To compile this driver as a module, choose M here: the
132 module will be called usbnet.
133
134config USB_NET_AX8817X
135 tristate "ASIX AX88xxx Based USB 2.0 Ethernet Adapters"
136 depends on USB_USBNET && NET_ETHERNET
137 select CRC32
138 select USB_USBNET_MII
139 default y
140 help
141 This option adds support for ASIX AX88xxx based USB 2.0
142 10/100 Ethernet adapters.
143
144 This driver should work with at least the following devices:
145 * Aten UC210T
146 * ASIX AX88172
147 * Billionton Systems, USB2AR
148 * Buffalo LUA-U2-KTX
149 * Corega FEther USB2-TX
150 * D-Link DUB-E100
151 * Hawking UF200
152 * Linksys USB200M
153 * Netgear FA120
154 * Sitecom LN-029
155 * Intellinet USB 2.0 Ethernet
156 * ST Lab USB 2.0 Ethernet
157 * TrendNet TU2-ET100
158
159 This driver creates an interface named "ethX", where X depends on
160 what other networking devices you have in use.
161
162
163config USB_NET_CDCETHER
164 tristate "CDC Ethernet support (smart devices such as cable modems)"
165 depends on USB_USBNET
166 default y
167 help
168 This option supports devices conforming to the Communication Device
169 Class (CDC) Ethernet Control Model, a specification that's easy to
170 implement in device firmware. The CDC specifications are available
171 from <http://www.usb.org/>.
172
173 CDC Ethernet is an implementation option for DOCSIS cable modems
174 that support USB connectivity, used for non-Microsoft USB hosts.
175 The Linux-USB CDC Ethernet Gadget driver is an open implementation.
176 This driver should work with at least the following devices:
177
178 * Ericsson PipeRider (all variants)
179 * Motorola (DM100 and SB4100)
180 * Broadcom Cable Modem (reference design)
181 * Toshiba PCX1100U
182 * ...
183
184 This driver creates an interface named "ethX", where X depends on
185 what other networking devices you have in use. However, if the
186 IEEE 802 "local assignment" bit is set in the address, a "usbX"
187 name is used instead.
188
189config USB_NET_DM9601
190 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
191 depends on USB_USBNET
192 select CRC32
193 select USB_USBNET_MII
194 help
195 This option adds support for Davicom DM9601 based USB 1.1
196 10/100 Ethernet adapters.
197
198config USB_NET_GL620A
199 tristate "GeneSys GL620USB-A based cables"
200 depends on USB_USBNET
201 help
202 Choose this option if you're using a host-to-host cable,
203 or PC2PC motherboard, with this chip.
204
205 Note that the half-duplex "GL620USB" is not supported.
206
207config USB_NET_NET1080
208 tristate "NetChip 1080 based cables (Laplink, ...)"
209 default y
210 depends on USB_USBNET
211 help
212 Choose this option if you're using a host-to-host cable based
213 on this design: one NetChip 1080 chip and supporting logic,
214 optionally with LEDs that indicate traffic
215
216config USB_NET_PLUSB
217 tristate "Prolific PL-2301/2302 based cables"
218 # if the handshake/init/reset problems, from original 'plusb',
219 # are ever resolved ... then remove "experimental"
220 depends on USB_USBNET && EXPERIMENTAL
221 help
222 Choose this option if you're using a host-to-host cable
223 with one of these chips.
224
225config USB_NET_MCS7830
226 tristate "MosChip MCS7830 based Ethernet adapters"
227 depends on USB_USBNET
228 select USB_USBNET_MII
229 help
230 Choose this option if you're using a 10/100 Ethernet USB2
231 adapter based on the MosChip 7830 controller. This includes
232 adapters marketed under the DeLOCK brand.
233
234config USB_NET_RNDIS_HOST
235 tristate "Host for RNDIS and ActiveSync devices (EXPERIMENTAL)"
236 depends on USB_USBNET && EXPERIMENTAL
237 select USB_NET_CDCETHER
238 help
239 This option enables hosting "Remote NDIS" USB networking links,
240 as encouraged by Microsoft (instead of CDC Ethernet!) for use in
241 various devices that may only support this protocol. A variant
242 of this protocol (with even less public documentation) seems to
243 be at the root of Microsoft's "ActiveSync" too.
244
245 Avoid using this protocol unless you have no better options.
246 The protocol specification is incomplete, and is controlled by
247 (and for) Microsoft; it isn't an "Open" ecosystem or market.
248
249config USB_NET_CDC_SUBSET
250 tristate "Simple USB Network Links (CDC Ethernet subset)"
251 depends on USB_USBNET
252 default y
253 help
254 This driver module supports USB network devices that can work
255 without any device-specific information. Select it if you have
256 one of these drivers.
257
258 Note that while many USB host-to-host cables can work in this mode,
259 that may mean not being able to talk to Win32 systems or more
260 commonly not being able to handle certain events (like replugging
261 the host on the other end) very well. Also, these devices will
262 not generally have permanently assigned Ethernet addresses.
263
264config USB_ALI_M5632
265 boolean "ALi M5632 based 'USB 2.0 Data Link' cables"
266 depends on USB_NET_CDC_SUBSET
267 help
268 Choose this option if you're using a host-to-host cable
269 based on this design, which supports USB 2.0 high speed.
270
271config USB_AN2720
272 boolean "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
273 depends on USB_NET_CDC_SUBSET
274 help
275 Choose this option if you're using a host-to-host cable
276 based on this design. Note that AnchorChips is now a
277 Cypress brand.
278
279config USB_BELKIN
280 boolean "eTEK based host-to-host cables (Advance, Belkin, ...)"
281 depends on USB_NET_CDC_SUBSET
282 default y
283 help
284 Choose this option if you're using a host-to-host cable
285 based on this design: two NetChip 2890 chips and an Atmel
286 microcontroller, with LEDs that indicate traffic.
287
288config USB_ARMLINUX
289 boolean "Embedded ARM Linux links (iPaq, ...)"
290 depends on USB_NET_CDC_SUBSET
291 default y
292 help
293 Choose this option to support the "usb-eth" networking driver
294 used by most of the ARM Linux community with device controllers
295 such as the SA-11x0 and PXA-25x UDCs, or the tftp capabilities
296 in some PXA versions of the "blob" boot loader.
297
298 Linux-based "Gumstix" PXA-25x based systems use this protocol
299 to talk with other Linux systems.
300
301 Although the ROMs shipped with Sharp Zaurus products use a
302 different link level framing protocol, you can have them use
303 this simpler protocol by installing a different kernel.
304
305config USB_EPSON2888
306 boolean "Epson 2888 based firmware (DEVELOPMENT)"
307 depends on USB_NET_CDC_SUBSET
308 help
309 Choose this option to support the usb networking links used
310 by some sample firmware from Epson.
311
312config USB_KC2190
313 boolean "KT Technology KC2190 based cables (InstaNet)"
314 depends on USB_NET_CDC_SUBSET && EXPERIMENTAL
315 help
316  Choose this option if you're using a host-to-host cable
317  with one of these chips.
318
319config USB_NET_ZAURUS
320 tristate "Sharp Zaurus (stock ROMs) and compatible"
321 depends on USB_USBNET
322 select USB_NET_CDCETHER
323 select CRC32
324 default y
325 help
326 Choose this option to support the usb networking links used by
327 Zaurus models like the SL-5000D, SL-5500, SL-5600, A-300, B-500.
328 This also supports some related device firmware, as used in some
329 PDAs from Olympus and some cell phones from Motorola.
330
331 If you install an alternate image, such as the Linux 2.6 based
332 versions of OpenZaurus, you should no longer need to support this
333 protocol. Only the "eth-fd" or "net_fd" drivers in these devices
334 really need this non-conformant variant of CDC Ethernet (or in
335 some cases CDC MDLM) protocol, not "g_ether".
336
337
338endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
new file mode 100644
index 000000000000..595a539f8384
--- /dev/null
+++ b/drivers/net/usb/Makefile
@@ -0,0 +1,23 @@
1#
2# Makefile for USB Network drivers
3#
4
5obj-$(CONFIG_USB_CATC) += catc.o
6obj-$(CONFIG_USB_KAWETH) += kaweth.o
7obj-$(CONFIG_USB_PEGASUS) += pegasus.o
8obj-$(CONFIG_USB_RTL8150) += rtl8150.o
9obj-$(CONFIG_USB_NET_AX8817X) += asix.o
10obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
11obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
12obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
13obj-$(CONFIG_USB_NET_NET1080) += net1080.o
14obj-$(CONFIG_USB_NET_PLUSB) += plusb.o
15obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
16obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o
17obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
18obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
19obj-$(CONFIG_USB_USBNET) += usbnet.o
20
21ifeq ($(CONFIG_USB_DEBUG),y)
22EXTRA_CFLAGS += -DDEBUG
23endif
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
new file mode 100644
index 000000000000..d5ef97bc4d01
--- /dev/null
+++ b/drivers/net/usb/asix.c
@@ -0,0 +1,1490 @@
1/*
2 * ASIX AX8817X based USB 2.0 Ethernet Devices
3 * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
4 * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
5 * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
6 * Copyright (c) 2002-2003 TiVo Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23// #define DEBUG // error path messages, extra info
24// #define VERBOSE // more; success messages
25
26#include <linux/module.h>
27#include <linux/kmod.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/ethtool.h>
32#include <linux/workqueue.h>
33#include <linux/mii.h>
34#include <linux/usb.h>
35#include <linux/crc32.h>
36
37#include "usbnet.h"
38
39#define DRIVER_VERSION "14-Jun-2006"
40static const char driver_name [] = "asix";
41
42/* ASIX AX8817X based USB 2.0 Ethernet Devices */
43
44#define AX_CMD_SET_SW_MII 0x06
45#define AX_CMD_READ_MII_REG 0x07
46#define AX_CMD_WRITE_MII_REG 0x08
47#define AX_CMD_SET_HW_MII 0x0a
48#define AX_CMD_READ_EEPROM 0x0b
49#define AX_CMD_WRITE_EEPROM 0x0c
50#define AX_CMD_WRITE_ENABLE 0x0d
51#define AX_CMD_WRITE_DISABLE 0x0e
52#define AX_CMD_READ_RX_CTL 0x0f
53#define AX_CMD_WRITE_RX_CTL 0x10
54#define AX_CMD_READ_IPG012 0x11
55#define AX_CMD_WRITE_IPG0 0x12
56#define AX_CMD_WRITE_IPG1 0x13
57#define AX_CMD_READ_NODE_ID 0x13
58#define AX_CMD_WRITE_IPG2 0x14
59#define AX_CMD_WRITE_MULTI_FILTER 0x16
60#define AX88172_CMD_READ_NODE_ID 0x17
61#define AX_CMD_READ_PHY_ID 0x19
62#define AX_CMD_READ_MEDIUM_STATUS 0x1a
63#define AX_CMD_WRITE_MEDIUM_MODE 0x1b
64#define AX_CMD_READ_MONITOR_MODE 0x1c
65#define AX_CMD_WRITE_MONITOR_MODE 0x1d
66#define AX_CMD_READ_GPIOS 0x1e
67#define AX_CMD_WRITE_GPIOS 0x1f
68#define AX_CMD_SW_RESET 0x20
69#define AX_CMD_SW_PHY_STATUS 0x21
70#define AX_CMD_SW_PHY_SELECT 0x22
71
72#define AX_MONITOR_MODE 0x01
73#define AX_MONITOR_LINK 0x02
74#define AX_MONITOR_MAGIC 0x04
75#define AX_MONITOR_HSFS 0x10
76
77/* AX88172 Medium Status Register values */
78#define AX88172_MEDIUM_FD 0x02
79#define AX88172_MEDIUM_TX 0x04
80#define AX88172_MEDIUM_FC 0x10
81#define AX88172_MEDIUM_DEFAULT \
82 ( AX88172_MEDIUM_FD | AX88172_MEDIUM_TX | AX88172_MEDIUM_FC )
83
84#define AX_MCAST_FILTER_SIZE 8
85#define AX_MAX_MCAST 64
86
87#define AX_SWRESET_CLEAR 0x00
88#define AX_SWRESET_RR 0x01
89#define AX_SWRESET_RT 0x02
90#define AX_SWRESET_PRTE 0x04
91#define AX_SWRESET_PRL 0x08
92#define AX_SWRESET_BZ 0x10
93#define AX_SWRESET_IPRL 0x20
94#define AX_SWRESET_IPPD 0x40
95
96#define AX88772_IPG0_DEFAULT 0x15
97#define AX88772_IPG1_DEFAULT 0x0c
98#define AX88772_IPG2_DEFAULT 0x12
99
100/* AX88772 & AX88178 Medium Mode Register */
101#define AX_MEDIUM_PF 0x0080
102#define AX_MEDIUM_JFE 0x0040
103#define AX_MEDIUM_TFC 0x0020
104#define AX_MEDIUM_RFC 0x0010
105#define AX_MEDIUM_ENCK 0x0008
106#define AX_MEDIUM_AC 0x0004
107#define AX_MEDIUM_FD 0x0002
108#define AX_MEDIUM_GM 0x0001
109#define AX_MEDIUM_SM 0x1000
110#define AX_MEDIUM_SBP 0x0800
111#define AX_MEDIUM_PS 0x0200
112#define AX_MEDIUM_RE 0x0100
113
114#define AX88178_MEDIUM_DEFAULT \
115 (AX_MEDIUM_PS | AX_MEDIUM_FD | AX_MEDIUM_AC | \
116 AX_MEDIUM_RFC | AX_MEDIUM_TFC | AX_MEDIUM_JFE | \
117 AX_MEDIUM_RE )
118
119#define AX88772_MEDIUM_DEFAULT \
120 (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
121 AX_MEDIUM_TFC | AX_MEDIUM_PS | \
122 AX_MEDIUM_AC | AX_MEDIUM_RE )
123
124/* AX88772 & AX88178 RX_CTL values */
125#define AX_RX_CTL_SO 0x0080
126#define AX_RX_CTL_AP 0x0020
127#define AX_RX_CTL_AM 0x0010
128#define AX_RX_CTL_AB 0x0008
129#define AX_RX_CTL_SEP 0x0004
130#define AX_RX_CTL_AMALL 0x0002
131#define AX_RX_CTL_PRO 0x0001
132#define AX_RX_CTL_MFB_2048 0x0000
133#define AX_RX_CTL_MFB_4096 0x0100
134#define AX_RX_CTL_MFB_8192 0x0200
135#define AX_RX_CTL_MFB_16384 0x0300
136
137#define AX_DEFAULT_RX_CTL \
138 (AX_RX_CTL_SO | AX_RX_CTL_AB )
139
140/* GPIO 0 .. 2 toggles */
141#define AX_GPIO_GPO0EN 0x01 /* GPIO0 Output enable */
142#define AX_GPIO_GPO_0 0x02 /* GPIO0 Output value */
143#define AX_GPIO_GPO1EN 0x04 /* GPIO1 Output enable */
144#define AX_GPIO_GPO_1 0x08 /* GPIO1 Output value */
145#define AX_GPIO_GPO2EN 0x10 /* GPIO2 Output enable */
146#define AX_GPIO_GPO_2 0x20 /* GPIO2 Output value */
147#define AX_GPIO_RESERVED 0x40 /* Reserved */
148#define AX_GPIO_RSE 0x80 /* Reload serial EEPROM */
149
150#define AX_EEPROM_MAGIC 0xdeadbeef
151#define AX88172_EEPROM_LEN 0x40
152#define AX88772_EEPROM_LEN 0xff
153
154#define PHY_MODE_MARVELL 0x0000
155#define MII_MARVELL_LED_CTRL 0x0018
156#define MII_MARVELL_STATUS 0x001b
157#define MII_MARVELL_CTRL 0x0014
158
159#define MARVELL_LED_MANUAL 0x0019
160
161#define MARVELL_STATUS_HWCFG 0x0004
162
163#define MARVELL_CTRL_TXDELAY 0x0002
164#define MARVELL_CTRL_RXDELAY 0x0080
165
166/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
167struct asix_data {
168 u8 multi_filter[AX_MCAST_FILTER_SIZE];
169 u8 phymode;
170 u8 ledmode;
171 u8 eeprom_len;
172};
173
174struct ax88172_int_data {
175 u16 res1;
176 u8 link;
177 u16 res2;
178 u8 status;
179 u16 res3;
180} __attribute__ ((packed));
181
182static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
183 u16 size, void *data)
184{
185 devdbg(dev,"asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d",
186 cmd, value, index, size);
187 return usb_control_msg(
188 dev->udev,
189 usb_rcvctrlpipe(dev->udev, 0),
190 cmd,
191 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
192 value,
193 index,
194 data,
195 size,
196 USB_CTRL_GET_TIMEOUT);
197}
198
199static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
200 u16 size, void *data)
201{
202 devdbg(dev,"asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d",
203 cmd, value, index, size);
204 return usb_control_msg(
205 dev->udev,
206 usb_sndctrlpipe(dev->udev, 0),
207 cmd,
208 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
209 value,
210 index,
211 data,
212 size,
213 USB_CTRL_SET_TIMEOUT);
214}
215
216static void asix_async_cmd_callback(struct urb *urb)
217{
218 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
219
220 if (urb->status < 0)
221 printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
222 urb->status);
223
224 kfree(req);
225 usb_free_urb(urb);
226}
227
228static void
229asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
230 u16 size, void *data)
231{
232 struct usb_ctrlrequest *req;
233 int status;
234 struct urb *urb;
235
236 devdbg(dev,"asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d",
237 cmd, value, index, size);
238 if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
239 deverr(dev, "Error allocating URB in write_cmd_async!");
240 return;
241 }
242
243 if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
244 deverr(dev, "Failed to allocate memory for control request");
245 usb_free_urb(urb);
246 return;
247 }
248
249 req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
250 req->bRequest = cmd;
251 req->wValue = cpu_to_le16(value);
252 req->wIndex = cpu_to_le16(index);
253 req->wLength = cpu_to_le16(size);
254
255 usb_fill_control_urb(urb, dev->udev,
256 usb_sndctrlpipe(dev->udev, 0),
257 (void *)req, data, size,
258 asix_async_cmd_callback, req);
259
260 if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
261 deverr(dev, "Error submitting the control message: status=%d",
262 status);
263 kfree(req);
264 usb_free_urb(urb);
265 }
266}
267
268static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
269{
270 u8 *head;
271 u32 header;
272 char *packet;
273 struct sk_buff *ax_skb;
274 u16 size;
275
276 head = (u8 *) skb->data;
277 memcpy(&header, head, sizeof(header));
278 le32_to_cpus(&header);
279 packet = head + sizeof(header);
280
281 skb_pull(skb, 4);
282
283 while (skb->len > 0) {
284 if ((short)(header & 0x0000ffff) !=
285 ~((short)((header & 0xffff0000) >> 16))) {
286 deverr(dev,"asix_rx_fixup() Bad Header Length");
287 }
288 /* get the packet length */
289 size = (u16) (header & 0x0000ffff);
290
291 if ((skb->len) - ((size + 1) & 0xfffe) == 0)
292 return 2;
293 if (size > ETH_FRAME_LEN) {
294 deverr(dev,"asix_rx_fixup() Bad RX Length %d", size);
295 return 0;
296 }
297 ax_skb = skb_clone(skb, GFP_ATOMIC);
298 if (ax_skb) {
299 ax_skb->len = size;
300 ax_skb->data = packet;
301 skb_set_tail_pointer(ax_skb, size);
302 usbnet_skb_return(dev, ax_skb);
303 } else {
304 return 0;
305 }
306
307 skb_pull(skb, (size + 1) & 0xfffe);
308
309 if (skb->len == 0)
310 break;
311
312 head = (u8 *) skb->data;
313 memcpy(&header, head, sizeof(header));
314 le32_to_cpus(&header);
315 packet = head + sizeof(header);
316 skb_pull(skb, 4);
317 }
318
319 if (skb->len < 0) {
320 deverr(dev,"asix_rx_fixup() Bad SKB Length %d", skb->len);
321 return 0;
322 }
323 return 1;
324}
325
326static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
327 gfp_t flags)
328{
329 int padlen;
330 int headroom = skb_headroom(skb);
331 int tailroom = skb_tailroom(skb);
332 u32 packet_len;
333 u32 padbytes = 0xffff0000;
334
335 padlen = ((skb->len + 4) % 512) ? 0 : 4;
336
337 if ((!skb_cloned(skb))
338 && ((headroom + tailroom) >= (4 + padlen))) {
339 if ((headroom < 4) || (tailroom < padlen)) {
340 skb->data = memmove(skb->head + 4, skb->data, skb->len);
341 skb_set_tail_pointer(skb, skb->len);
342 }
343 } else {
344 struct sk_buff *skb2;
345 skb2 = skb_copy_expand(skb, 4, padlen, flags);
346 dev_kfree_skb_any(skb);
347 skb = skb2;
348 if (!skb)
349 return NULL;
350 }
351
352 skb_push(skb, 4);
353 packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
354 cpu_to_le32s(&packet_len);
355 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
356
357 if ((skb->len % 512) == 0) {
358 cpu_to_le32s(&padbytes);
359 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
360 skb_put(skb, sizeof(padbytes));
361 }
362 return skb;
363}
364
365static void asix_status(struct usbnet *dev, struct urb *urb)
366{
367 struct ax88172_int_data *event;
368 int link;
369
370 if (urb->actual_length < 8)
371 return;
372
373 event = urb->transfer_buffer;
374 link = event->link & 0x01;
375 if (netif_carrier_ok(dev->net) != link) {
376 if (link) {
377 netif_carrier_on(dev->net);
378 usbnet_defer_kevent (dev, EVENT_LINK_RESET );
379 } else
380 netif_carrier_off(dev->net);
381 devdbg(dev, "Link Status is: %d", link);
382 }
383}
384
385static inline int asix_set_sw_mii(struct usbnet *dev)
386{
387 int ret;
388 ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
389 if (ret < 0)
390 deverr(dev, "Failed to enable software MII access");
391 return ret;
392}
393
394static inline int asix_set_hw_mii(struct usbnet *dev)
395{
396 int ret;
397 ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
398 if (ret < 0)
399 deverr(dev, "Failed to enable hardware MII access");
400 return ret;
401}
402
403static inline int asix_get_phy_addr(struct usbnet *dev)
404{
405 int ret = 0;
406 void *buf;
407
408 devdbg(dev, "asix_get_phy_addr()");
409
410 buf = kmalloc(2, GFP_KERNEL);
411 if (!buf)
412 goto out1;
413
414 if ((ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID,
415 0, 0, 2, buf)) < 2) {
416 deverr(dev, "Error reading PHYID register: %02x", ret);
417 goto out2;
418 }
419 devdbg(dev, "asix_get_phy_addr() returning 0x%04x", *((u16 *)buf));
420 ret = *((u8 *)buf + 1);
421out2:
422 kfree(buf);
423out1:
424 return ret;
425}
426
427static int asix_sw_reset(struct usbnet *dev, u8 flags)
428{
429 int ret;
430
431 ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
432 if (ret < 0)
433 deverr(dev,"Failed to send software reset: %02x", ret);
434
435 return ret;
436}
437
438static u16 asix_read_rx_ctl(struct usbnet *dev)
439{
440 u16 ret = 0;
441 void *buf;
442
443 buf = kmalloc(2, GFP_KERNEL);
444 if (!buf)
445 goto out1;
446
447 if ((ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL,
448 0, 0, 2, buf)) < 2) {
449 deverr(dev, "Error reading RX_CTL register: %02x", ret);
450 goto out2;
451 }
452 ret = le16_to_cpu(*((u16 *)buf));
453out2:
454 kfree(buf);
455out1:
456 return ret;
457}
458
459static int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
460{
461 int ret;
462
463 devdbg(dev,"asix_write_rx_ctl() - mode = 0x%04x", mode);
464 ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
465 if (ret < 0)
466 deverr(dev, "Failed to write RX_CTL mode to 0x%04x: %02x",
467 mode, ret);
468
469 return ret;
470}
471
472static u16 asix_read_medium_status(struct usbnet *dev)
473{
474 u16 ret = 0;
475 void *buf;
476
477 buf = kmalloc(2, GFP_KERNEL);
478 if (!buf)
479 goto out1;
480
481 if ((ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS,
482 0, 0, 2, buf)) < 2) {
483 deverr(dev, "Error reading Medium Status register: %02x", ret);
484 goto out2;
485 }
486 ret = le16_to_cpu(*((u16 *)buf));
487out2:
488 kfree(buf);
489out1:
490 return ret;
491}
492
493static int asix_write_medium_mode(struct usbnet *dev, u16 mode)
494{
495 int ret;
496
497 devdbg(dev,"asix_write_medium_mode() - mode = 0x%04x", mode);
498 ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
499 if (ret < 0)
500 deverr(dev, "Failed to write Medium Mode mode to 0x%04x: %02x",
501 mode, ret);
502
503 return ret;
504}
505
506static int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
507{
508 int ret;
509
510 devdbg(dev,"asix_write_gpio() - value = 0x%04x", value);
511 ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
512 if (ret < 0)
513 deverr(dev, "Failed to write GPIO value 0x%04x: %02x",
514 value, ret);
515
516 if (sleep)
517 msleep(sleep);
518
519 return ret;
520}
521
522/*
523 * AX88772 & AX88178 have a 16-bit RX_CTL value
524 */
525static void asix_set_multicast(struct net_device *net)
526{
527 struct usbnet *dev = netdev_priv(net);
528 struct asix_data *data = (struct asix_data *)&dev->data;
529 u16 rx_ctl = AX_DEFAULT_RX_CTL;
530
531 if (net->flags & IFF_PROMISC) {
532 rx_ctl |= AX_RX_CTL_PRO;
533 } else if (net->flags & IFF_ALLMULTI
534 || net->mc_count > AX_MAX_MCAST) {
535 rx_ctl |= AX_RX_CTL_AMALL;
536 } else if (net->mc_count == 0) {
537 /* just broadcast and directed */
538 } else {
539 /* We use the 20 byte dev->data
540 * for our 8 byte filter buffer
541 * to avoid allocating memory that
542 * is tricky to free later */
543 struct dev_mc_list *mc_list = net->mc_list;
544 u32 crc_bits;
545 int i;
546
547 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
548
549 /* Build the multicast hash filter. */
550 for (i = 0; i < net->mc_count; i++) {
551 crc_bits =
552 ether_crc(ETH_ALEN,
553 mc_list->dmi_addr) >> 26;
554 data->multi_filter[crc_bits >> 3] |=
555 1 << (crc_bits & 7);
556 mc_list = mc_list->next;
557 }
558
559 asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
560 AX_MCAST_FILTER_SIZE, data->multi_filter);
561
562 rx_ctl |= AX_RX_CTL_AM;
563 }
564
565 asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
566}
567
568static int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
569{
570 struct usbnet *dev = netdev_priv(netdev);
571 u16 res;
572
573 mutex_lock(&dev->phy_mutex);
574 asix_set_sw_mii(dev);
575 asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
576 (__u16)loc, 2, (u16 *)&res);
577 asix_set_hw_mii(dev);
578 mutex_unlock(&dev->phy_mutex);
579
580 devdbg(dev, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x", phy_id, loc, le16_to_cpu(res & 0xffff));
581
582 return le16_to_cpu(res & 0xffff);
583}
584
585static void
586asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
587{
588 struct usbnet *dev = netdev_priv(netdev);
589 u16 res = cpu_to_le16(val);
590
591 devdbg(dev, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x", phy_id, loc, val);
592 mutex_lock(&dev->phy_mutex);
593 asix_set_sw_mii(dev);
594 asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
595 (__u16)loc, 2, (u16 *)&res);
596 asix_set_hw_mii(dev);
597 mutex_unlock(&dev->phy_mutex);
598}
599
600/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
601static u32 asix_get_phyid(struct usbnet *dev)
602{
603 int phy_reg;
604 u32 phy_id;
605
606 phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
607 if (phy_reg < 0)
608 return 0;
609
610 phy_id = (phy_reg & 0xffff) << 16;
611
612 phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
613 if (phy_reg < 0)
614 return 0;
615
616 phy_id |= (phy_reg & 0xffff);
617
618 return phy_id;
619}
620
621static void
622asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
623{
624 struct usbnet *dev = netdev_priv(net);
625 u8 opt;
626
627 if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
628 wolinfo->supported = 0;
629 wolinfo->wolopts = 0;
630 return;
631 }
632 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
633 wolinfo->wolopts = 0;
634 if (opt & AX_MONITOR_MODE) {
635 if (opt & AX_MONITOR_LINK)
636 wolinfo->wolopts |= WAKE_PHY;
637 if (opt & AX_MONITOR_MAGIC)
638 wolinfo->wolopts |= WAKE_MAGIC;
639 }
640}
641
642static int
643asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
644{
645 struct usbnet *dev = netdev_priv(net);
646 u8 opt = 0;
647 u8 buf[1];
648
649 if (wolinfo->wolopts & WAKE_PHY)
650 opt |= AX_MONITOR_LINK;
651 if (wolinfo->wolopts & WAKE_MAGIC)
652 opt |= AX_MONITOR_MAGIC;
653 if (opt != 0)
654 opt |= AX_MONITOR_MODE;
655
656 if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
657 opt, 0, 0, &buf) < 0)
658 return -EINVAL;
659
660 return 0;
661}
662
663static int asix_get_eeprom_len(struct net_device *net)
664{
665 struct usbnet *dev = netdev_priv(net);
666 struct asix_data *data = (struct asix_data *)&dev->data;
667
668 return data->eeprom_len;
669}
670
671static int asix_get_eeprom(struct net_device *net,
672 struct ethtool_eeprom *eeprom, u8 *data)
673{
674 struct usbnet *dev = netdev_priv(net);
675 u16 *ebuf = (u16 *)data;
676 int i;
677
678 /* Crude hack to ensure that we don't overwrite memory
679 * if an odd length is supplied
680 */
681 if (eeprom->len % 2)
682 return -EINVAL;
683
684 eeprom->magic = AX_EEPROM_MAGIC;
685
686 /* ax8817x returns 2 bytes from eeprom on read */
687 for (i=0; i < eeprom->len / 2; i++) {
688 if (asix_read_cmd(dev, AX_CMD_READ_EEPROM,
689 eeprom->offset + i, 0, 2, &ebuf[i]) < 0)
690 return -EINVAL;
691 }
692 return 0;
693}
694
695static void asix_get_drvinfo (struct net_device *net,
696 struct ethtool_drvinfo *info)
697{
698 struct usbnet *dev = netdev_priv(net);
699 struct asix_data *data = (struct asix_data *)&dev->data;
700
701 /* Inherit standard device info */
702 usbnet_get_drvinfo(net, info);
703 strncpy (info->driver, driver_name, sizeof info->driver);
704 strncpy (info->version, DRIVER_VERSION, sizeof info->version);
705 info->eedump_len = data->eeprom_len;
706}
707
708static u32 asix_get_link(struct net_device *net)
709{
710 struct usbnet *dev = netdev_priv(net);
711
712 return mii_link_ok(&dev->mii);
713}
714
715static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
716{
717 struct usbnet *dev = netdev_priv(net);
718
719 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
720}
721
722/* We need to override some ethtool_ops so we require our
723 own structure so we don't interfere with other usbnet
724 devices that may be connected at the same time. */
725static struct ethtool_ops ax88172_ethtool_ops = {
726 .get_drvinfo = asix_get_drvinfo,
727 .get_link = asix_get_link,
728 .get_msglevel = usbnet_get_msglevel,
729 .set_msglevel = usbnet_set_msglevel,
730 .get_wol = asix_get_wol,
731 .set_wol = asix_set_wol,
732 .get_eeprom_len = asix_get_eeprom_len,
733 .get_eeprom = asix_get_eeprom,
734 .get_settings = usbnet_get_settings,
735 .set_settings = usbnet_set_settings,
736 .nway_reset = usbnet_nway_reset,
737};
738
739static void ax88172_set_multicast(struct net_device *net)
740{
741 struct usbnet *dev = netdev_priv(net);
742 struct asix_data *data = (struct asix_data *)&dev->data;
743 u8 rx_ctl = 0x8c;
744
745 if (net->flags & IFF_PROMISC) {
746 rx_ctl |= 0x01;
747 } else if (net->flags & IFF_ALLMULTI
748 || net->mc_count > AX_MAX_MCAST) {
749 rx_ctl |= 0x02;
750 } else if (net->mc_count == 0) {
751 /* just broadcast and directed */
752 } else {
753 /* We use the 20 byte dev->data
754 * for our 8 byte filter buffer
755 * to avoid allocating memory that
756 * is tricky to free later */
757 struct dev_mc_list *mc_list = net->mc_list;
758 u32 crc_bits;
759 int i;
760
761 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
762
763 /* Build the multicast hash filter. */
764 for (i = 0; i < net->mc_count; i++) {
765 crc_bits =
766 ether_crc(ETH_ALEN,
767 mc_list->dmi_addr) >> 26;
768 data->multi_filter[crc_bits >> 3] |=
769 1 << (crc_bits & 7);
770 mc_list = mc_list->next;
771 }
772
773 asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
774 AX_MCAST_FILTER_SIZE, data->multi_filter);
775
776 rx_ctl |= 0x10;
777 }
778
779 asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
780}
781
782static int ax88172_link_reset(struct usbnet *dev)
783{
784 u8 mode;
785 struct ethtool_cmd ecmd;
786
787 mii_check_media(&dev->mii, 1, 1);
788 mii_ethtool_gset(&dev->mii, &ecmd);
789 mode = AX88172_MEDIUM_DEFAULT;
790
791 if (ecmd.duplex != DUPLEX_FULL)
792 mode |= ~AX88172_MEDIUM_FD;
793
794 devdbg(dev, "ax88172_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode);
795
796 asix_write_medium_mode(dev, mode);
797
798 return 0;
799}
800
801static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
802{
803 int ret = 0;
804 void *buf;
805 int i;
806 unsigned long gpio_bits = dev->driver_info->data;
807 struct asix_data *data = (struct asix_data *)&dev->data;
808
809 data->eeprom_len = AX88172_EEPROM_LEN;
810
811 usbnet_get_endpoints(dev,intf);
812
813 buf = kmalloc(ETH_ALEN, GFP_KERNEL);
814 if(!buf) {
815 ret = -ENOMEM;
816 goto out1;
817 }
818
819 /* Toggle the GPIOs in a manufacturer/model specific way */
820 for (i = 2; i >= 0; i--) {
821 if ((ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS,
822 (gpio_bits >> (i * 8)) & 0xff, 0, 0,
823 buf)) < 0)
824 goto out2;
825 msleep(5);
826 }
827
828 if ((ret = asix_write_rx_ctl(dev, 0x80)) < 0)
829 goto out2;
830
831 /* Get the MAC address */
832 memset(buf, 0, ETH_ALEN);
833 if ((ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID,
834 0, 0, 6, buf)) < 0) {
835 dbg("read AX_CMD_READ_NODE_ID failed: %d", ret);
836 goto out2;
837 }
838 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
839
840 /* Initialize MII structure */
841 dev->mii.dev = dev->net;
842 dev->mii.mdio_read = asix_mdio_read;
843 dev->mii.mdio_write = asix_mdio_write;
844 dev->mii.phy_id_mask = 0x3f;
845 dev->mii.reg_num_mask = 0x1f;
846 dev->mii.phy_id = asix_get_phy_addr(dev);
847 dev->net->do_ioctl = asix_ioctl;
848
849 dev->net->set_multicast_list = ax88172_set_multicast;
850 dev->net->ethtool_ops = &ax88172_ethtool_ops;
851
852 asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
853 asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
854 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
855 mii_nway_restart(&dev->mii);
856
857 return 0;
858out2:
859 kfree(buf);
860out1:
861 return ret;
862}
863
864static struct ethtool_ops ax88772_ethtool_ops = {
865 .get_drvinfo = asix_get_drvinfo,
866 .get_link = asix_get_link,
867 .get_msglevel = usbnet_get_msglevel,
868 .set_msglevel = usbnet_set_msglevel,
869 .get_wol = asix_get_wol,
870 .set_wol = asix_set_wol,
871 .get_eeprom_len = asix_get_eeprom_len,
872 .get_eeprom = asix_get_eeprom,
873 .get_settings = usbnet_get_settings,
874 .set_settings = usbnet_set_settings,
875 .nway_reset = usbnet_nway_reset,
876};
877
878static int ax88772_link_reset(struct usbnet *dev)
879{
880 u16 mode;
881 struct ethtool_cmd ecmd;
882
883 mii_check_media(&dev->mii, 1, 1);
884 mii_ethtool_gset(&dev->mii, &ecmd);
885 mode = AX88772_MEDIUM_DEFAULT;
886
887 if (ecmd.speed != SPEED_100)
888 mode &= ~AX_MEDIUM_PS;
889
890 if (ecmd.duplex != DUPLEX_FULL)
891 mode &= ~AX_MEDIUM_FD;
892
893 devdbg(dev, "ax88772_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode);
894
895 asix_write_medium_mode(dev, mode);
896
897 return 0;
898}
899
900static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
901{
902 int ret, embd_phy;
903 void *buf;
904 u16 rx_ctl;
905 struct asix_data *data = (struct asix_data *)&dev->data;
906 u32 phyid;
907
908 data->eeprom_len = AX88772_EEPROM_LEN;
909
910 usbnet_get_endpoints(dev,intf);
911
912 buf = kmalloc(6, GFP_KERNEL);
913 if(!buf) {
914 dbg ("Cannot allocate memory for buffer");
915 ret = -ENOMEM;
916 goto out1;
917 }
918
919 if ((ret = asix_write_gpio(dev,
920 AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5)) < 0)
921 goto out2;
922
923 /* 0x10 is the phy id of the embedded 10/100 ethernet phy */
924 embd_phy = ((asix_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
925 if ((ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT,
926 embd_phy, 0, 0, buf)) < 0) {
927 dbg("Select PHY #1 failed: %d", ret);
928 goto out2;
929 }
930
931 if ((ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL)) < 0)
932 goto out2;
933
934 msleep(150);
935 if ((ret = asix_sw_reset(dev, AX_SWRESET_CLEAR)) < 0)
936 goto out2;
937
938 msleep(150);
939 if (embd_phy) {
940 if ((ret = asix_sw_reset(dev, AX_SWRESET_IPRL)) < 0)
941 goto out2;
942 }
943 else {
944 if ((ret = asix_sw_reset(dev, AX_SWRESET_PRTE)) < 0)
945 goto out2;
946 }
947
948 msleep(150);
949 rx_ctl = asix_read_rx_ctl(dev);
950 dbg("RX_CTL is 0x%04x after software reset", rx_ctl);
951 if ((ret = asix_write_rx_ctl(dev, 0x0000)) < 0)
952 goto out2;
953
954 rx_ctl = asix_read_rx_ctl(dev);
955 dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl);
956
957 /* Get the MAC address */
958 memset(buf, 0, ETH_ALEN);
959 if ((ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
960 0, 0, ETH_ALEN, buf)) < 0) {
961 dbg("Failed to read MAC address: %d", ret);
962 goto out2;
963 }
964 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
965
966 /* Initialize MII structure */
967 dev->mii.dev = dev->net;
968 dev->mii.mdio_read = asix_mdio_read;
969 dev->mii.mdio_write = asix_mdio_write;
970 dev->mii.phy_id_mask = 0x1f;
971 dev->mii.reg_num_mask = 0x1f;
972 dev->net->do_ioctl = asix_ioctl;
973 dev->mii.phy_id = asix_get_phy_addr(dev);
974
975 phyid = asix_get_phyid(dev);
976 dbg("PHYID=0x%08x", phyid);
977
978 if ((ret = asix_sw_reset(dev, AX_SWRESET_PRL)) < 0)
979 goto out2;
980
981 msleep(150);
982
983 if ((ret = asix_sw_reset(dev, AX_SWRESET_IPRL | AX_SWRESET_PRL)) < 0)
984 goto out2;
985
986 msleep(150);
987
988 dev->net->set_multicast_list = asix_set_multicast;
989 dev->net->ethtool_ops = &ax88772_ethtool_ops;
990
991 asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
992 asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
993 ADVERTISE_ALL | ADVERTISE_CSMA);
994 mii_nway_restart(&dev->mii);
995
996 if ((ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT)) < 0)
997 goto out2;
998
999 if ((ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
1000 AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
1001 AX88772_IPG2_DEFAULT, 0, buf)) < 0) {
1002 dbg("Write IPG,IPG1,IPG2 failed: %d", ret);
1003 goto out2;
1004 }
1005
1006 /* Set RX_CTL to default values with 2k buffer, and enable cactus */
1007 if ((ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL)) < 0)
1008 goto out2;
1009
1010 rx_ctl = asix_read_rx_ctl(dev);
1011 dbg("RX_CTL is 0x%04x after all initializations", rx_ctl);
1012
1013 rx_ctl = asix_read_medium_status(dev);
1014 dbg("Medium Status is 0x%04x after all initializations", rx_ctl);
1015
1016 kfree(buf);
1017
1018 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
1019 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
1020 /* hard_mtu is still the default - the device does not support
1021 jumbo eth frames */
1022 dev->rx_urb_size = 2048;
1023 }
1024
1025 return 0;
1026
1027out2:
1028 kfree(buf);
1029out1:
1030 return ret;
1031}
1032
1033static struct ethtool_ops ax88178_ethtool_ops = {
1034 .get_drvinfo = asix_get_drvinfo,
1035 .get_link = asix_get_link,
1036 .get_msglevel = usbnet_get_msglevel,
1037 .set_msglevel = usbnet_set_msglevel,
1038 .get_wol = asix_get_wol,
1039 .set_wol = asix_set_wol,
1040 .get_eeprom_len = asix_get_eeprom_len,
1041 .get_eeprom = asix_get_eeprom,
1042 .get_settings = usbnet_get_settings,
1043 .set_settings = usbnet_set_settings,
1044 .nway_reset = usbnet_nway_reset,
1045};
1046
1047static int marvell_phy_init(struct usbnet *dev)
1048{
1049 struct asix_data *data = (struct asix_data *)&dev->data;
1050 u16 reg;
1051
1052 devdbg(dev,"marvell_phy_init()");
1053
1054 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_MARVELL_STATUS);
1055 devdbg(dev,"MII_MARVELL_STATUS = 0x%04x", reg);
1056
1057 asix_mdio_write(dev->net, dev->mii.phy_id, MII_MARVELL_CTRL,
1058 MARVELL_CTRL_RXDELAY | MARVELL_CTRL_TXDELAY);
1059
1060 if (data->ledmode) {
1061 reg = asix_mdio_read(dev->net, dev->mii.phy_id,
1062 MII_MARVELL_LED_CTRL);
1063 devdbg(dev,"MII_MARVELL_LED_CTRL (1) = 0x%04x", reg);
1064
1065 reg &= 0xf8ff;
1066 reg |= (1 + 0x0100);
1067 asix_mdio_write(dev->net, dev->mii.phy_id,
1068 MII_MARVELL_LED_CTRL, reg);
1069
1070 reg = asix_mdio_read(dev->net, dev->mii.phy_id,
1071 MII_MARVELL_LED_CTRL);
1072 devdbg(dev,"MII_MARVELL_LED_CTRL (2) = 0x%04x", reg);
1073 reg &= 0xfc0f;
1074 }
1075
1076 return 0;
1077}
1078
1079static int marvell_led_status(struct usbnet *dev, u16 speed)
1080{
1081 u16 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL);
1082
1083 devdbg(dev, "marvell_led_status() read 0x%04x", reg);
1084
1085 /* Clear out the center LED bits - 0x03F0 */
1086 reg &= 0xfc0f;
1087
1088 switch (speed) {
1089 case SPEED_1000:
1090 reg |= 0x03e0;
1091 break;
1092 case SPEED_100:
1093 reg |= 0x03b0;
1094 break;
1095 default:
1096 reg |= 0x02f0;
1097 }
1098
1099 devdbg(dev, "marvell_led_status() writing 0x%04x", reg);
1100 asix_mdio_write(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL, reg);
1101
1102 return 0;
1103}
1104
1105static int ax88178_link_reset(struct usbnet *dev)
1106{
1107 u16 mode;
1108 struct ethtool_cmd ecmd;
1109 struct asix_data *data = (struct asix_data *)&dev->data;
1110
1111 devdbg(dev,"ax88178_link_reset()");
1112
1113 mii_check_media(&dev->mii, 1, 1);
1114 mii_ethtool_gset(&dev->mii, &ecmd);
1115 mode = AX88178_MEDIUM_DEFAULT;
1116
1117 if (ecmd.speed == SPEED_1000)
1118 mode |= AX_MEDIUM_GM | AX_MEDIUM_ENCK;
1119 else if (ecmd.speed == SPEED_100)
1120 mode |= AX_MEDIUM_PS;
1121 else
1122 mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
1123
1124 if (ecmd.duplex == DUPLEX_FULL)
1125 mode |= AX_MEDIUM_FD;
1126 else
1127 mode &= ~AX_MEDIUM_FD;
1128
1129 devdbg(dev, "ax88178_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode);
1130
1131 asix_write_medium_mode(dev, mode);
1132
1133 if (data->phymode == PHY_MODE_MARVELL && data->ledmode)
1134 marvell_led_status(dev, ecmd.speed);
1135
1136 return 0;
1137}
1138
1139static void ax88178_set_mfb(struct usbnet *dev)
1140{
1141 u16 mfb = AX_RX_CTL_MFB_16384;
1142 u16 rxctl;
1143 u16 medium;
1144 int old_rx_urb_size = dev->rx_urb_size;
1145
1146 if (dev->hard_mtu < 2048) {
1147 dev->rx_urb_size = 2048;
1148 mfb = AX_RX_CTL_MFB_2048;
1149 } else if (dev->hard_mtu < 4096) {
1150 dev->rx_urb_size = 4096;
1151 mfb = AX_RX_CTL_MFB_4096;
1152 } else if (dev->hard_mtu < 8192) {
1153 dev->rx_urb_size = 8192;
1154 mfb = AX_RX_CTL_MFB_8192;
1155 } else if (dev->hard_mtu < 16384) {
1156 dev->rx_urb_size = 16384;
1157 mfb = AX_RX_CTL_MFB_16384;
1158 }
1159
1160 rxctl = asix_read_rx_ctl(dev);
1161 asix_write_rx_ctl(dev, (rxctl & ~AX_RX_CTL_MFB_16384) | mfb);
1162
1163 medium = asix_read_medium_status(dev);
1164 if (dev->net->mtu > 1500)
1165 medium |= AX_MEDIUM_JFE;
1166 else
1167 medium &= ~AX_MEDIUM_JFE;
1168 asix_write_medium_mode(dev, medium);
1169
1170 if (dev->rx_urb_size > old_rx_urb_size)
1171 usbnet_unlink_rx_urbs(dev);
1172}
1173
1174static int ax88178_change_mtu(struct net_device *net, int new_mtu)
1175{
1176 struct usbnet *dev = netdev_priv(net);
1177 int ll_mtu = new_mtu + net->hard_header_len + 4;
1178
1179 devdbg(dev, "ax88178_change_mtu() new_mtu=%d", new_mtu);
1180
1181 if (new_mtu <= 0 || ll_mtu > 16384)
1182 return -EINVAL;
1183
1184 if ((ll_mtu % dev->maxpacket) == 0)
1185 return -EDOM;
1186
1187 net->mtu = new_mtu;
1188 dev->hard_mtu = net->mtu + net->hard_header_len;
1189 ax88178_set_mfb(dev);
1190
1191 return 0;
1192}
1193
1194static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
1195{
1196 struct asix_data *data = (struct asix_data *)&dev->data;
1197 int ret;
1198 void *buf;
1199 u16 eeprom;
1200 int gpio0 = 0;
1201 u32 phyid;
1202
1203 usbnet_get_endpoints(dev,intf);
1204
1205 buf = kmalloc(6, GFP_KERNEL);
1206 if(!buf) {
1207 dbg ("Cannot allocate memory for buffer");
1208 ret = -ENOMEM;
1209 goto out1;
1210 }
1211
1212 eeprom = 0;
1213 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &eeprom);
1214 dbg("GPIO Status: 0x%04x", eeprom);
1215
1216 asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
1217 asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
1218 asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
1219
1220 dbg("EEPROM index 0x17 is 0x%04x", eeprom);
1221
1222 if (eeprom == 0xffff) {
1223 data->phymode = PHY_MODE_MARVELL;
1224 data->ledmode = 0;
1225 gpio0 = 1;
1226 } else {
1227 data->phymode = eeprom & 7;
1228 data->ledmode = eeprom >> 8;
1229 gpio0 = (eeprom & 0x80) ? 0 : 1;
1230 }
1231 dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
1232
1233 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
1234 if ((eeprom >> 8) != 1) {
1235 asix_write_gpio(dev, 0x003c, 30);
1236 asix_write_gpio(dev, 0x001c, 300);
1237 asix_write_gpio(dev, 0x003c, 30);
1238 } else {
1239 dbg("gpio phymode == 1 path");
1240 asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
1241 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
1242 }
1243
1244 asix_sw_reset(dev, 0);
1245 msleep(150);
1246
1247 asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
1248 msleep(150);
1249
1250 asix_write_rx_ctl(dev, 0);
1251
1252 /* Get the MAC address */
1253 memset(buf, 0, ETH_ALEN);
1254 if ((ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
1255 0, 0, ETH_ALEN, buf)) < 0) {
1256 dbg("Failed to read MAC address: %d", ret);
1257 goto out2;
1258 }
1259 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
1260
1261 /* Initialize MII structure */
1262 dev->mii.dev = dev->net;
1263 dev->mii.mdio_read = asix_mdio_read;
1264 dev->mii.mdio_write = asix_mdio_write;
1265 dev->mii.phy_id_mask = 0x1f;
1266 dev->mii.reg_num_mask = 0xff;
1267 dev->mii.supports_gmii = 1;
1268 dev->net->do_ioctl = asix_ioctl;
1269 dev->mii.phy_id = asix_get_phy_addr(dev);
1270 dev->net->set_multicast_list = asix_set_multicast;
1271 dev->net->ethtool_ops = &ax88178_ethtool_ops;
1272 dev->net->change_mtu = &ax88178_change_mtu;
1273
1274 phyid = asix_get_phyid(dev);
1275 dbg("PHYID=0x%08x", phyid);
1276
1277 if (data->phymode == PHY_MODE_MARVELL) {
1278 marvell_phy_init(dev);
1279 msleep(60);
1280 }
1281
1282 asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR,
1283 BMCR_RESET | BMCR_ANENABLE);
1284 asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
1285 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1286 asix_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
1287 ADVERTISE_1000FULL);
1288
1289 mii_nway_restart(&dev->mii);
1290
1291 if ((ret = asix_write_medium_mode(dev, AX88178_MEDIUM_DEFAULT)) < 0)
1292 goto out2;
1293
1294 if ((ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL)) < 0)
1295 goto out2;
1296
1297 kfree(buf);
1298
1299 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
1300 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
1301 /* hard_mtu is still the default - the device does not support
1302 jumbo eth frames */
1303 dev->rx_urb_size = 2048;
1304 }
1305
1306 return 0;
1307
1308out2:
1309 kfree(buf);
1310out1:
1311 return ret;
1312}
1313
1314static const struct driver_info ax8817x_info = {
1315 .description = "ASIX AX8817x USB 2.0 Ethernet",
1316 .bind = ax88172_bind,
1317 .status = asix_status,
1318 .link_reset = ax88172_link_reset,
1319 .reset = ax88172_link_reset,
1320 .flags = FLAG_ETHER,
1321 .data = 0x00130103,
1322};
1323
1324static const struct driver_info dlink_dub_e100_info = {
1325 .description = "DLink DUB-E100 USB Ethernet",
1326 .bind = ax88172_bind,
1327 .status = asix_status,
1328 .link_reset = ax88172_link_reset,
1329 .reset = ax88172_link_reset,
1330 .flags = FLAG_ETHER,
1331 .data = 0x009f9d9f,
1332};
1333
1334static const struct driver_info netgear_fa120_info = {
1335 .description = "Netgear FA-120 USB Ethernet",
1336 .bind = ax88172_bind,
1337 .status = asix_status,
1338 .link_reset = ax88172_link_reset,
1339 .reset = ax88172_link_reset,
1340 .flags = FLAG_ETHER,
1341 .data = 0x00130103,
1342};
1343
1344static const struct driver_info hawking_uf200_info = {
1345 .description = "Hawking UF200 USB Ethernet",
1346 .bind = ax88172_bind,
1347 .status = asix_status,
1348 .link_reset = ax88172_link_reset,
1349 .reset = ax88172_link_reset,
1350 .flags = FLAG_ETHER,
1351 .data = 0x001f1d1f,
1352};
1353
1354static const struct driver_info ax88772_info = {
1355 .description = "ASIX AX88772 USB 2.0 Ethernet",
1356 .bind = ax88772_bind,
1357 .status = asix_status,
1358 .link_reset = ax88772_link_reset,
1359 .reset = ax88772_link_reset,
1360 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1361 .rx_fixup = asix_rx_fixup,
1362 .tx_fixup = asix_tx_fixup,
1363};
1364
1365static const struct driver_info ax88178_info = {
1366 .description = "ASIX AX88178 USB 2.0 Ethernet",
1367 .bind = ax88178_bind,
1368 .status = asix_status,
1369 .link_reset = ax88178_link_reset,
1370 .reset = ax88178_link_reset,
1371 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1372 .rx_fixup = asix_rx_fixup,
1373 .tx_fixup = asix_tx_fixup,
1374};
1375
1376static const struct usb_device_id products [] = {
1377{
1378 // Linksys USB200M
1379 USB_DEVICE (0x077b, 0x2226),
1380 .driver_info = (unsigned long) &ax8817x_info,
1381}, {
1382 // Netgear FA120
1383 USB_DEVICE (0x0846, 0x1040),
1384 .driver_info = (unsigned long) &netgear_fa120_info,
1385}, {
1386 // DLink DUB-E100
1387 USB_DEVICE (0x2001, 0x1a00),
1388 .driver_info = (unsigned long) &dlink_dub_e100_info,
1389}, {
1390 // Intellinet, ST Lab USB Ethernet
1391 USB_DEVICE (0x0b95, 0x1720),
1392 .driver_info = (unsigned long) &ax8817x_info,
1393}, {
1394 // Hawking UF200, TrendNet TU2-ET100
1395 USB_DEVICE (0x07b8, 0x420a),
1396 .driver_info = (unsigned long) &hawking_uf200_info,
1397}, {
1398 // Billionton Systems, USB2AR
1399 USB_DEVICE (0x08dd, 0x90ff),
1400 .driver_info = (unsigned long) &ax8817x_info,
1401}, {
1402 // ATEN UC210T
1403 USB_DEVICE (0x0557, 0x2009),
1404 .driver_info = (unsigned long) &ax8817x_info,
1405}, {
1406 // Buffalo LUA-U2-KTX
1407 USB_DEVICE (0x0411, 0x003d),
1408 .driver_info = (unsigned long) &ax8817x_info,
1409}, {
1410 // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
1411 USB_DEVICE (0x6189, 0x182d),
1412 .driver_info = (unsigned long) &ax8817x_info,
1413}, {
1414 // corega FEther USB2-TX
1415 USB_DEVICE (0x07aa, 0x0017),
1416 .driver_info = (unsigned long) &ax8817x_info,
1417}, {
1418 // Surecom EP-1427X-2
1419 USB_DEVICE (0x1189, 0x0893),
1420 .driver_info = (unsigned long) &ax8817x_info,
1421}, {
1422 // goodway corp usb gwusb2e
1423 USB_DEVICE (0x1631, 0x6200),
1424 .driver_info = (unsigned long) &ax8817x_info,
1425}, {
1426 // JVC MP-PRX1 Port Replicator
1427 USB_DEVICE (0x04f1, 0x3008),
1428 .driver_info = (unsigned long) &ax8817x_info,
1429}, {
1430 // ASIX AX88772 10/100
1431 USB_DEVICE (0x0b95, 0x7720),
1432 .driver_info = (unsigned long) &ax88772_info,
1433}, {
1434 // ASIX AX88178 10/100/1000
1435 USB_DEVICE (0x0b95, 0x1780),
1436 .driver_info = (unsigned long) &ax88178_info,
1437}, {
1438 // Linksys USB200M Rev 2
1439 USB_DEVICE (0x13b1, 0x0018),
1440 .driver_info = (unsigned long) &ax88772_info,
1441}, {
1442 // 0Q0 cable ethernet
1443 USB_DEVICE (0x1557, 0x7720),
1444 .driver_info = (unsigned long) &ax88772_info,
1445}, {
1446 // DLink DUB-E100 H/W Ver B1
1447 USB_DEVICE (0x07d1, 0x3c05),
1448 .driver_info = (unsigned long) &ax88772_info,
1449}, {
1450 // DLink DUB-E100 H/W Ver B1 Alternate
1451 USB_DEVICE (0x2001, 0x3c05),
1452 .driver_info = (unsigned long) &ax88772_info,
1453}, {
1454 // Linksys USB1000
1455 USB_DEVICE (0x1737, 0x0039),
1456 .driver_info = (unsigned long) &ax88178_info,
1457}, {
1458 // IO-DATA ETG-US2
1459 USB_DEVICE (0x04bb, 0x0930),
1460 .driver_info = (unsigned long) &ax88178_info,
1461},
1462 { }, // END
1463};
1464MODULE_DEVICE_TABLE(usb, products);
1465
1466static struct usb_driver asix_driver = {
1467 .name = "asix",
1468 .id_table = products,
1469 .probe = usbnet_probe,
1470 .suspend = usbnet_suspend,
1471 .resume = usbnet_resume,
1472 .disconnect = usbnet_disconnect,
1473};
1474
1475static int __init asix_init(void)
1476{
1477 return usb_register(&asix_driver);
1478}
1479module_init(asix_init);
1480
1481static void __exit asix_exit(void)
1482{
1483 usb_deregister(&asix_driver);
1484}
1485module_exit(asix_exit);
1486
1487MODULE_AUTHOR("David Hollis");
1488MODULE_DESCRIPTION("ASIX AX8817X based USB 2.0 Ethernet Devices");
1489MODULE_LICENSE("GPL");
1490
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
new file mode 100644
index 000000000000..86e90c59d551
--- /dev/null
+++ b/drivers/net/usb/catc.c
@@ -0,0 +1,963 @@
1/*
2 * Copyright (c) 2001 Vojtech Pavlik
3 *
4 * CATC EL1210A NetMate USB Ethernet driver
5 *
6 * Sponsored by SuSE
7 *
8 * Based on the work of
9 * Donald Becker
10 *
11 * Old chipset support added by Simon Evans <spse@secret.org.uk> 2002
12 * - adds support for Belkin F5U011
13 */
14
15/*
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 *
30 * Should you need to contact me, the author, you can do so either by
31 * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
32 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
33 */
34
35#include <linux/init.h>
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/string.h>
39#include <linux/slab.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/skbuff.h>
43#include <linux/spinlock.h>
44#include <linux/ethtool.h>
45#include <linux/crc32.h>
46#include <linux/bitops.h>
47#include <asm/uaccess.h>
48
49#undef DEBUG
50
51#include <linux/usb.h>
52
53/*
54 * Version information.
55 */
56
57#define DRIVER_VERSION "v2.8"
58#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>"
59#define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver"
60#define SHORT_DRIVER_DESC "EL1210A NetMate USB Ethernet"
61
62MODULE_AUTHOR(DRIVER_AUTHOR);
63MODULE_DESCRIPTION(DRIVER_DESC);
64MODULE_LICENSE("GPL");
65
66static const char driver_name[] = "catc";
67
68/*
69 * Some defines.
70 */
71
72#define STATS_UPDATE (HZ) /* Time between stats updates */
73#define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */
74#define PKT_SZ 1536 /* Max Ethernet packet size */
75#define RX_MAX_BURST 15 /* Max packets per rx buffer (> 0, < 16) */
76#define TX_MAX_BURST 15 /* Max full sized packets per tx buffer (> 0) */
77#define CTRL_QUEUE 16 /* Max control requests in flight (power of two) */
78#define RX_PKT_SZ 1600 /* Max size of receive packet for F5U011 */
79
80/*
81 * Control requests.
82 */
83
84enum control_requests {
85 ReadMem = 0xf1,
86 GetMac = 0xf2,
87 Reset = 0xf4,
88 SetMac = 0xf5,
89 SetRxMode = 0xf5, /* F5U011 only */
90 WriteROM = 0xf8,
91 SetReg = 0xfa,
92 GetReg = 0xfb,
93 WriteMem = 0xfc,
94 ReadROM = 0xfd,
95};
96
97/*
98 * Registers.
99 */
100
101enum register_offsets {
102 TxBufCount = 0x20,
103 RxBufCount = 0x21,
104 OpModes = 0x22,
105 TxQed = 0x23,
106 RxQed = 0x24,
107 MaxBurst = 0x25,
108 RxUnit = 0x60,
109 EthStatus = 0x61,
110 StationAddr0 = 0x67,
111 EthStats = 0x69,
112 LEDCtrl = 0x81,
113};
114
115enum eth_stats {
116 TxSingleColl = 0x00,
117 TxMultiColl = 0x02,
118 TxExcessColl = 0x04,
119 RxFramErr = 0x06,
120};
121
122enum op_mode_bits {
123 Op3MemWaits = 0x03,
124 OpLenInclude = 0x08,
125 OpRxMerge = 0x10,
126 OpTxMerge = 0x20,
127 OpWin95bugfix = 0x40,
128 OpLoopback = 0x80,
129};
130
131enum rx_filter_bits {
132 RxEnable = 0x01,
133 RxPolarity = 0x02,
134 RxForceOK = 0x04,
135 RxMultiCast = 0x08,
136 RxPromisc = 0x10,
137 AltRxPromisc = 0x20, /* F5U011 uses different bit */
138};
139
140enum led_values {
141 LEDFast = 0x01,
142 LEDSlow = 0x02,
143 LEDFlash = 0x03,
144 LEDPulse = 0x04,
145 LEDLink = 0x08,
146};
147
148enum link_status {
149 LinkNoChange = 0,
150 LinkGood = 1,
151 LinkBad = 2
152};
153
154/*
155 * The catc struct.
156 */
157
158#define CTRL_RUNNING 0
159#define RX_RUNNING 1
160#define TX_RUNNING 2
161
162struct catc {
163 struct net_device *netdev;
164 struct usb_device *usbdev;
165
166 struct net_device_stats stats;
167 unsigned long flags;
168
169 unsigned int tx_ptr, tx_idx;
170 unsigned int ctrl_head, ctrl_tail;
171 spinlock_t tx_lock, ctrl_lock;
172
173 u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)];
174 u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)];
175 u8 irq_buf[2];
176 u8 ctrl_buf[64];
177 struct usb_ctrlrequest ctrl_dr;
178
179 struct timer_list timer;
180 u8 stats_buf[8];
181 u16 stats_vals[4];
182 unsigned long last_stats;
183
184 u8 multicast[64];
185
186 struct ctrl_queue {
187 u8 dir;
188 u8 request;
189 u16 value;
190 u16 index;
191 void *buf;
192 int len;
193 void (*callback)(struct catc *catc, struct ctrl_queue *q);
194 } ctrl_queue[CTRL_QUEUE];
195
196 struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb;
197
198 u8 is_f5u011; /* Set if device is an F5U011 */
199 u8 rxmode[2]; /* Used for F5U011 */
200 atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */
201};
202
203/*
204 * Useful macros.
205 */
206
207#define catc_get_mac(catc, mac) catc_ctrl_msg(catc, USB_DIR_IN, GetMac, 0, 0, mac, 6)
208#define catc_reset(catc) catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0)
209#define catc_set_reg(catc, reg, val) catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0)
210#define catc_get_reg(catc, reg, buf) catc_ctrl_msg(catc, USB_DIR_IN, GetReg, 0, reg, buf, 1)
211#define catc_write_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size)
212#define catc_read_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_IN, ReadMem, 0, addr, buf, size)
213
214#define f5u011_rxmode(catc, rxmode) catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2)
215#define f5u011_rxmode_async(catc, rxmode) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL)
216#define f5u011_mchash_async(catc, hash) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL)
217
218#define catc_set_reg_async(catc, reg, val) catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL)
219#define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb)
220#define catc_write_mem_async(catc, addr, buf, size) catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL)
221
222/*
223 * Receive routines.
224 */
225
226static void catc_rx_done(struct urb *urb)
227{
228 struct catc *catc = urb->context;
229 u8 *pkt_start = urb->transfer_buffer;
230 struct sk_buff *skb;
231 int pkt_len, pkt_offset = 0;
232
233 if (!catc->is_f5u011) {
234 clear_bit(RX_RUNNING, &catc->flags);
235 pkt_offset = 2;
236 }
237
238 if (urb->status) {
239 dbg("rx_done, status %d, length %d", urb->status, urb->actual_length);
240 return;
241 }
242
243 do {
244 if(!catc->is_f5u011) {
245 pkt_len = le16_to_cpup((__le16*)pkt_start);
246 if (pkt_len > urb->actual_length) {
247 catc->stats.rx_length_errors++;
248 catc->stats.rx_errors++;
249 break;
250 }
251 } else {
252 pkt_len = urb->actual_length;
253 }
254
255 if (!(skb = dev_alloc_skb(pkt_len)))
256 return;
257
258 eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0);
259 skb_put(skb, pkt_len);
260
261 skb->protocol = eth_type_trans(skb, catc->netdev);
262 netif_rx(skb);
263
264 catc->stats.rx_packets++;
265 catc->stats.rx_bytes += pkt_len;
266
267 /* F5U011 only does one packet per RX */
268 if (catc->is_f5u011)
269 break;
270 pkt_start += (((pkt_len + 1) >> 6) + 1) << 6;
271
272 } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length);
273
274 catc->netdev->last_rx = jiffies;
275
276 if (catc->is_f5u011) {
277 if (atomic_read(&catc->recq_sz)) {
278 int status;
279 atomic_dec(&catc->recq_sz);
280 dbg("getting extra packet");
281 urb->dev = catc->usbdev;
282 if ((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
283 dbg("submit(rx_urb) status %d", status);
284 }
285 } else {
286 clear_bit(RX_RUNNING, &catc->flags);
287 }
288 }
289}
290
291static void catc_irq_done(struct urb *urb)
292{
293 struct catc *catc = urb->context;
294 u8 *data = urb->transfer_buffer;
295 int status;
296 unsigned int hasdata = 0, linksts = LinkNoChange;
297
298 if (!catc->is_f5u011) {
299 hasdata = data[1] & 0x80;
300 if (data[1] & 0x40)
301 linksts = LinkGood;
302 else if (data[1] & 0x20)
303 linksts = LinkBad;
304 } else {
305 hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff);
306 if (data[0] == 0x90)
307 linksts = LinkGood;
308 else if (data[0] == 0xA0)
309 linksts = LinkBad;
310 }
311
312 switch (urb->status) {
313 case 0: /* success */
314 break;
315 case -ECONNRESET: /* unlink */
316 case -ENOENT:
317 case -ESHUTDOWN:
318 return;
319 /* -EPIPE: should clear the halt */
320 default: /* error */
321 dbg("irq_done, status %d, data %02x %02x.", urb->status, data[0], data[1]);
322 goto resubmit;
323 }
324
325 if (linksts == LinkGood) {
326 netif_carrier_on(catc->netdev);
327 dbg("link ok");
328 }
329
330 if (linksts == LinkBad) {
331 netif_carrier_off(catc->netdev);
332 dbg("link bad");
333 }
334
335 if (hasdata) {
336 if (test_and_set_bit(RX_RUNNING, &catc->flags)) {
337 if (catc->is_f5u011)
338 atomic_inc(&catc->recq_sz);
339 } else {
340 catc->rx_urb->dev = catc->usbdev;
341 if ((status = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) {
342 err("submit(rx_urb) status %d", status);
343 }
344 }
345 }
346resubmit:
347 status = usb_submit_urb (urb, GFP_ATOMIC);
348 if (status)
349 err ("can't resubmit intr, %s-%s, status %d",
350 catc->usbdev->bus->bus_name,
351 catc->usbdev->devpath, status);
352}
353
354/*
355 * Transmit routines.
356 */
357
358static int catc_tx_run(struct catc *catc)
359{
360 int status;
361
362 if (catc->is_f5u011)
363 catc->tx_ptr = (catc->tx_ptr + 63) & ~63;
364
365 catc->tx_urb->transfer_buffer_length = catc->tx_ptr;
366 catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx];
367 catc->tx_urb->dev = catc->usbdev;
368
369 if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0)
370 err("submit(tx_urb), status %d", status);
371
372 catc->tx_idx = !catc->tx_idx;
373 catc->tx_ptr = 0;
374
375 catc->netdev->trans_start = jiffies;
376 return status;
377}
378
379static void catc_tx_done(struct urb *urb)
380{
381 struct catc *catc = urb->context;
382 unsigned long flags;
383 int r;
384
385 if (urb->status == -ECONNRESET) {
386 dbg("Tx Reset.");
387 urb->status = 0;
388 catc->netdev->trans_start = jiffies;
389 catc->stats.tx_errors++;
390 clear_bit(TX_RUNNING, &catc->flags);
391 netif_wake_queue(catc->netdev);
392 return;
393 }
394
395 if (urb->status) {
396 dbg("tx_done, status %d, length %d", urb->status, urb->actual_length);
397 return;
398 }
399
400 spin_lock_irqsave(&catc->tx_lock, flags);
401
402 if (catc->tx_ptr) {
403 r = catc_tx_run(catc);
404 if (unlikely(r < 0))
405 clear_bit(TX_RUNNING, &catc->flags);
406 } else {
407 clear_bit(TX_RUNNING, &catc->flags);
408 }
409
410 netif_wake_queue(catc->netdev);
411
412 spin_unlock_irqrestore(&catc->tx_lock, flags);
413}
414
415static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
416{
417 struct catc *catc = netdev_priv(netdev);
418 unsigned long flags;
419 int r = 0;
420 char *tx_buf;
421
422 spin_lock_irqsave(&catc->tx_lock, flags);
423
424 catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6;
425 tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr;
426 *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len);
427 skb_copy_from_linear_data(skb, tx_buf + 2, skb->len);
428 catc->tx_ptr += skb->len + 2;
429
430 if (!test_and_set_bit(TX_RUNNING, &catc->flags)) {
431 r = catc_tx_run(catc);
432 if (r < 0)
433 clear_bit(TX_RUNNING, &catc->flags);
434 }
435
436 if ((catc->is_f5u011 && catc->tx_ptr)
437 || (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2))))
438 netif_stop_queue(netdev);
439
440 spin_unlock_irqrestore(&catc->tx_lock, flags);
441
442 if (r >= 0) {
443 catc->stats.tx_bytes += skb->len;
444 catc->stats.tx_packets++;
445 }
446
447 dev_kfree_skb(skb);
448
449 return 0;
450}
451
452static void catc_tx_timeout(struct net_device *netdev)
453{
454 struct catc *catc = netdev_priv(netdev);
455
456 warn("Transmit timed out.");
457 usb_unlink_urb(catc->tx_urb);
458}
459
460/*
461 * Control messages.
462 */
463
464static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len)
465{
466 int retval = usb_control_msg(catc->usbdev,
467 dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0),
468 request, 0x40 | dir, value, index, buf, len, 1000);
469 return retval < 0 ? retval : 0;
470}
471
472static void catc_ctrl_run(struct catc *catc)
473{
474 struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
475 struct usb_device *usbdev = catc->usbdev;
476 struct urb *urb = catc->ctrl_urb;
477 struct usb_ctrlrequest *dr = &catc->ctrl_dr;
478 int status;
479
480 dr->bRequest = q->request;
481 dr->bRequestType = 0x40 | q->dir;
482 dr->wValue = cpu_to_le16(q->value);
483 dr->wIndex = cpu_to_le16(q->index);
484 dr->wLength = cpu_to_le16(q->len);
485
486 urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0);
487 urb->transfer_buffer_length = q->len;
488 urb->transfer_buffer = catc->ctrl_buf;
489 urb->setup_packet = (void *) dr;
490 urb->dev = usbdev;
491
492 if (!q->dir && q->buf && q->len)
493 memcpy(catc->ctrl_buf, q->buf, q->len);
494
495 if ((status = usb_submit_urb(catc->ctrl_urb, GFP_KERNEL)))
496 err("submit(ctrl_urb) status %d", status);
497}
498
499static void catc_ctrl_done(struct urb *urb)
500{
501 struct catc *catc = urb->context;
502 struct ctrl_queue *q;
503 unsigned long flags;
504
505 if (urb->status)
506 dbg("ctrl_done, status %d, len %d.", urb->status, urb->actual_length);
507
508 spin_lock_irqsave(&catc->ctrl_lock, flags);
509
510 q = catc->ctrl_queue + catc->ctrl_tail;
511
512 if (q->dir) {
513 if (q->buf && q->len)
514 memcpy(q->buf, catc->ctrl_buf, q->len);
515 else
516 q->buf = catc->ctrl_buf;
517 }
518
519 if (q->callback)
520 q->callback(catc, q);
521
522 catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1);
523
524 if (catc->ctrl_head != catc->ctrl_tail)
525 catc_ctrl_run(catc);
526 else
527 clear_bit(CTRL_RUNNING, &catc->flags);
528
529 spin_unlock_irqrestore(&catc->ctrl_lock, flags);
530}
531
532static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value,
533 u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q))
534{
535 struct ctrl_queue *q;
536 int retval = 0;
537 unsigned long flags;
538
539 spin_lock_irqsave(&catc->ctrl_lock, flags);
540
541 q = catc->ctrl_queue + catc->ctrl_head;
542
543 q->dir = dir;
544 q->request = request;
545 q->value = value;
546 q->index = index;
547 q->buf = buf;
548 q->len = len;
549 q->callback = callback;
550
551 catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1);
552
553 if (catc->ctrl_head == catc->ctrl_tail) {
554 err("ctrl queue full");
555 catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1);
556 retval = -1;
557 }
558
559 if (!test_and_set_bit(CTRL_RUNNING, &catc->flags))
560 catc_ctrl_run(catc);
561
562 spin_unlock_irqrestore(&catc->ctrl_lock, flags);
563
564 return retval;
565}
566
567/*
568 * Statistics.
569 */
570
571static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
572{
573 int index = q->index - EthStats;
574 u16 data, last;
575
576 catc->stats_buf[index] = *((char *)q->buf);
577
578 if (index & 1)
579 return;
580
581 data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1];
582 last = catc->stats_vals[index >> 1];
583
584 switch (index) {
585 case TxSingleColl:
586 case TxMultiColl:
587 catc->stats.collisions += data - last;
588 break;
589 case TxExcessColl:
590 catc->stats.tx_aborted_errors += data - last;
591 catc->stats.tx_errors += data - last;
592 break;
593 case RxFramErr:
594 catc->stats.rx_frame_errors += data - last;
595 catc->stats.rx_errors += data - last;
596 break;
597 }
598
599 catc->stats_vals[index >> 1] = data;
600}
601
602static void catc_stats_timer(unsigned long data)
603{
604 struct catc *catc = (void *) data;
605 int i;
606
607 for (i = 0; i < 8; i++)
608 catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done);
609
610 mod_timer(&catc->timer, jiffies + STATS_UPDATE);
611}
612
613static struct net_device_stats *catc_get_stats(struct net_device *netdev)
614{
615 struct catc *catc = netdev_priv(netdev);
616 return &catc->stats;
617}
618
619/*
620 * Receive modes. Broadcast, Multicast, Promisc.
621 */
622
623static void catc_multicast(unsigned char *addr, u8 *multicast)
624{
625 u32 crc;
626
627 crc = ether_crc_le(6, addr);
628 multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
629}
630
631static void catc_set_multicast_list(struct net_device *netdev)
632{
633 struct catc *catc = netdev_priv(netdev);
634 struct dev_mc_list *mc;
635 u8 broadcast[6];
636 u8 rx = RxEnable | RxPolarity | RxMultiCast;
637 int i;
638
639 memset(broadcast, 0xff, 6);
640 memset(catc->multicast, 0, 64);
641
642 catc_multicast(broadcast, catc->multicast);
643 catc_multicast(netdev->dev_addr, catc->multicast);
644
645 if (netdev->flags & IFF_PROMISC) {
646 memset(catc->multicast, 0xff, 64);
647 rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc;
648 }
649
650 if (netdev->flags & IFF_ALLMULTI) {
651 memset(catc->multicast, 0xff, 64);
652 } else {
653 for (i = 0, mc = netdev->mc_list; mc && i < netdev->mc_count; i++, mc = mc->next) {
654 u32 crc = ether_crc_le(6, mc->dmi_addr);
655 if (!catc->is_f5u011) {
656 catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
657 } else {
658 catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7);
659 }
660 }
661 }
662 if (!catc->is_f5u011) {
663 catc_set_reg_async(catc, RxUnit, rx);
664 catc_write_mem_async(catc, 0xfa80, catc->multicast, 64);
665 } else {
666 f5u011_mchash_async(catc, catc->multicast);
667 if (catc->rxmode[0] != rx) {
668 catc->rxmode[0] = rx;
669 dbg("Setting RX mode to %2.2X %2.2X", catc->rxmode[0], catc->rxmode[1]);
670 f5u011_rxmode_async(catc, catc->rxmode);
671 }
672 }
673}
674
675static void catc_get_drvinfo(struct net_device *dev,
676 struct ethtool_drvinfo *info)
677{
678 struct catc *catc = netdev_priv(dev);
679 strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
680 strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN);
681 usb_make_path (catc->usbdev, info->bus_info, sizeof info->bus_info);
682}
683
684static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
685{
686 struct catc *catc = netdev_priv(dev);
687 if (!catc->is_f5u011)
688 return -EOPNOTSUPP;
689
690 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP;
691 cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP;
692 cmd->speed = SPEED_10;
693 cmd->duplex = DUPLEX_HALF;
694 cmd->port = PORT_TP;
695 cmd->phy_address = 0;
696 cmd->transceiver = XCVR_INTERNAL;
697 cmd->autoneg = AUTONEG_DISABLE;
698 cmd->maxtxpkt = 1;
699 cmd->maxrxpkt = 1;
700 return 0;
701}
702
703static struct ethtool_ops ops = {
704 .get_drvinfo = catc_get_drvinfo,
705 .get_settings = catc_get_settings,
706 .get_link = ethtool_op_get_link
707};
708
709/*
710 * Open, close.
711 */
712
713static int catc_open(struct net_device *netdev)
714{
715 struct catc *catc = netdev_priv(netdev);
716 int status;
717
718 catc->irq_urb->dev = catc->usbdev;
719 if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) {
720 err("submit(irq_urb) status %d", status);
721 return -1;
722 }
723
724 netif_start_queue(netdev);
725
726 if (!catc->is_f5u011)
727 mod_timer(&catc->timer, jiffies + STATS_UPDATE);
728
729 return 0;
730}
731
732static int catc_stop(struct net_device *netdev)
733{
734 struct catc *catc = netdev_priv(netdev);
735
736 netif_stop_queue(netdev);
737
738 if (!catc->is_f5u011)
739 del_timer_sync(&catc->timer);
740
741 usb_kill_urb(catc->rx_urb);
742 usb_kill_urb(catc->tx_urb);
743 usb_kill_urb(catc->irq_urb);
744 usb_kill_urb(catc->ctrl_urb);
745
746 return 0;
747}
748
749/*
750 * USB probe, disconnect.
751 */
752
753static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id)
754{
755 struct usb_device *usbdev = interface_to_usbdev(intf);
756 struct net_device *netdev;
757 struct catc *catc;
758 u8 broadcast[6];
759 int i, pktsz;
760
761 if (usb_set_interface(usbdev,
762 intf->altsetting->desc.bInterfaceNumber, 1)) {
763 err("Can't set altsetting 1.");
764 return -EIO;
765 }
766
767 netdev = alloc_etherdev(sizeof(struct catc));
768 if (!netdev)
769 return -ENOMEM;
770
771 catc = netdev_priv(netdev);
772
773 netdev->open = catc_open;
774 netdev->hard_start_xmit = catc_hard_start_xmit;
775 netdev->stop = catc_stop;
776 netdev->get_stats = catc_get_stats;
777 netdev->tx_timeout = catc_tx_timeout;
778 netdev->watchdog_timeo = TX_TIMEOUT;
779 netdev->set_multicast_list = catc_set_multicast_list;
780 SET_ETHTOOL_OPS(netdev, &ops);
781
782 catc->usbdev = usbdev;
783 catc->netdev = netdev;
784
785 spin_lock_init(&catc->tx_lock);
786 spin_lock_init(&catc->ctrl_lock);
787
788 init_timer(&catc->timer);
789 catc->timer.data = (long) catc;
790 catc->timer.function = catc_stats_timer;
791
792 catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
793 catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
794 catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
795 catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
796 if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
797 (!catc->rx_urb) || (!catc->irq_urb)) {
798 err("No free urbs available.");
799 usb_free_urb(catc->ctrl_urb);
800 usb_free_urb(catc->tx_urb);
801 usb_free_urb(catc->rx_urb);
802 usb_free_urb(catc->irq_urb);
803 free_netdev(netdev);
804 return -ENOMEM;
805 }
806
807 /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
808 if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
809 le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
810 le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
811 dbg("Testing for f5u011");
812 catc->is_f5u011 = 1;
813 atomic_set(&catc->recq_sz, 0);
814 pktsz = RX_PKT_SZ;
815 } else {
816 pktsz = RX_MAX_BURST * (PKT_SZ + 2);
817 }
818
819 usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0),
820 NULL, NULL, 0, catc_ctrl_done, catc);
821
822 usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1),
823 NULL, 0, catc_tx_done, catc);
824
825 usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1),
826 catc->rx_buf, pktsz, catc_rx_done, catc);
827
828 usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2),
829 catc->irq_buf, 2, catc_irq_done, catc, 1);
830
831 if (!catc->is_f5u011) {
832 dbg("Checking memory size\n");
833
834 i = 0x12345678;
835 catc_write_mem(catc, 0x7a80, &i, 4);
836 i = 0x87654321;
837 catc_write_mem(catc, 0xfa80, &i, 4);
838 catc_read_mem(catc, 0x7a80, &i, 4);
839
840 switch (i) {
841 case 0x12345678:
842 catc_set_reg(catc, TxBufCount, 8);
843 catc_set_reg(catc, RxBufCount, 32);
844 dbg("64k Memory\n");
845 break;
846 default:
847 warn("Couldn't detect memory size, assuming 32k");
848 case 0x87654321:
849 catc_set_reg(catc, TxBufCount, 4);
850 catc_set_reg(catc, RxBufCount, 16);
851 dbg("32k Memory\n");
852 break;
853 }
854
855 dbg("Getting MAC from SEEROM.");
856
857 catc_get_mac(catc, netdev->dev_addr);
858
859 dbg("Setting MAC into registers.");
860
861 for (i = 0; i < 6; i++)
862 catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
863
864 dbg("Filling the multicast list.");
865
866 memset(broadcast, 0xff, 6);
867 catc_multicast(broadcast, catc->multicast);
868 catc_multicast(netdev->dev_addr, catc->multicast);
869 catc_write_mem(catc, 0xfa80, catc->multicast, 64);
870
871 dbg("Clearing error counters.");
872
873 for (i = 0; i < 8; i++)
874 catc_set_reg(catc, EthStats + i, 0);
875 catc->last_stats = jiffies;
876
877 dbg("Enabling.");
878
879 catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
880 catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
881 catc_set_reg(catc, LEDCtrl, LEDLink);
882 catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast);
883 } else {
884 dbg("Performing reset\n");
885 catc_reset(catc);
886 catc_get_mac(catc, netdev->dev_addr);
887
888 dbg("Setting RX Mode");
889 catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
890 catc->rxmode[1] = 0;
891 f5u011_rxmode(catc, catc->rxmode);
892 }
893 dbg("Init done.");
894 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, ",
895 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
896 usbdev->bus->bus_name, usbdev->devpath);
897 for (i = 0; i < 5; i++) printk("%2.2x:", netdev->dev_addr[i]);
898 printk("%2.2x.\n", netdev->dev_addr[i]);
899 usb_set_intfdata(intf, catc);
900
901 SET_NETDEV_DEV(netdev, &intf->dev);
902 if (register_netdev(netdev) != 0) {
903 usb_set_intfdata(intf, NULL);
904 usb_free_urb(catc->ctrl_urb);
905 usb_free_urb(catc->tx_urb);
906 usb_free_urb(catc->rx_urb);
907 usb_free_urb(catc->irq_urb);
908 free_netdev(netdev);
909 return -EIO;
910 }
911 return 0;
912}
913
914static void catc_disconnect(struct usb_interface *intf)
915{
916 struct catc *catc = usb_get_intfdata(intf);
917
918 usb_set_intfdata(intf, NULL);
919 if (catc) {
920 unregister_netdev(catc->netdev);
921 usb_free_urb(catc->ctrl_urb);
922 usb_free_urb(catc->tx_urb);
923 usb_free_urb(catc->rx_urb);
924 usb_free_urb(catc->irq_urb);
925 free_netdev(catc->netdev);
926 }
927}
928
929/*
930 * Module functions and tables.
931 */
932
933static struct usb_device_id catc_id_table [] = {
934 { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */
935 { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */
936 { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */
937 { }
938};
939
940MODULE_DEVICE_TABLE(usb, catc_id_table);
941
942static struct usb_driver catc_driver = {
943 .name = driver_name,
944 .probe = catc_probe,
945 .disconnect = catc_disconnect,
946 .id_table = catc_id_table,
947};
948
949static int __init catc_init(void)
950{
951 int result = usb_register(&catc_driver);
952 if (result == 0)
953 info(DRIVER_VERSION " " DRIVER_DESC);
954 return result;
955}
956
957static void __exit catc_exit(void)
958{
959 usb_deregister(&catc_driver);
960}
961
962module_init(catc_init);
963module_exit(catc_exit);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
new file mode 100644
index 000000000000..5a21f06bf8a5
--- /dev/null
+++ b/drivers/net/usb/cdc_ether.c
@@ -0,0 +1,570 @@
1/*
2 * CDC Ethernet based networking peripherals
3 * Copyright (C) 2003-2005 by David Brownell
4 * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21// #define DEBUG // error path messages, extra info
22// #define VERBOSE // more; success messages
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ctype.h>
29#include <linux/ethtool.h>
30#include <linux/workqueue.h>
31#include <linux/mii.h>
32#include <linux/usb.h>
33#include <linux/usb/cdc.h>
34
35#include "usbnet.h"
36
37
38#if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
39
40static int is_rndis(struct usb_interface_descriptor *desc)
41{
42 return desc->bInterfaceClass == USB_CLASS_COMM
43 && desc->bInterfaceSubClass == 2
44 && desc->bInterfaceProtocol == 0xff;
45}
46
47static int is_activesync(struct usb_interface_descriptor *desc)
48{
49 return desc->bInterfaceClass == USB_CLASS_MISC
50 && desc->bInterfaceSubClass == 1
51 && desc->bInterfaceProtocol == 1;
52}
53
54#else
55
56#define is_rndis(desc) 0
57#define is_activesync(desc) 0
58
59#endif
60
61/*
62 * probes control interface, claims data interface, collects the bulk
63 * endpoints, activates data interface (if needed), maybe sets MTU.
64 * all pure cdc, except for certain firmware workarounds, and knowing
65 * that rndis uses one different rule.
66 */
67int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
68{
69 u8 *buf = intf->cur_altsetting->extra;
70 int len = intf->cur_altsetting->extralen;
71 struct usb_interface_descriptor *d;
72 struct cdc_state *info = (void *) &dev->data;
73 int status;
74 int rndis;
75 struct usb_driver *driver = driver_of(intf);
76
77 if (sizeof dev->data < sizeof *info)
78 return -EDOM;
79
80 /* expect strict spec conformance for the descriptors, but
81 * cope with firmware which stores them in the wrong place
82 */
83 if (len == 0 && dev->udev->actconfig->extralen) {
84 /* Motorola SB4100 (and others: Brad Hards says it's
85 * from a Broadcom design) put CDC descriptors here
86 */
87 buf = dev->udev->actconfig->extra;
88 len = dev->udev->actconfig->extralen;
89 if (len)
90 dev_dbg(&intf->dev,
91 "CDC descriptors on config\n");
92 }
93
94 /* this assumes that if there's a non-RNDIS vendor variant
95 * of cdc-acm, it'll fail RNDIS requests cleanly.
96 */
97 rndis = is_rndis(&intf->cur_altsetting->desc)
98 || is_activesync(&intf->cur_altsetting->desc);
99
100 memset(info, 0, sizeof *info);
101 info->control = intf;
102 while (len > 3) {
103 if (buf [1] != USB_DT_CS_INTERFACE)
104 goto next_desc;
105
106 /* use bDescriptorSubType to identify the CDC descriptors.
107 * We expect devices with CDC header and union descriptors.
108 * For CDC Ethernet we need the ethernet descriptor.
109 * For RNDIS, ignore two (pointless) CDC modem descriptors
110 * in favor of a complicated OID-based RPC scheme doing what
111 * CDC Ethernet achieves with a simple descriptor.
112 */
113 switch (buf [2]) {
114 case USB_CDC_HEADER_TYPE:
115 if (info->header) {
116 dev_dbg(&intf->dev, "extra CDC header\n");
117 goto bad_desc;
118 }
119 info->header = (void *) buf;
120 if (info->header->bLength != sizeof *info->header) {
121 dev_dbg(&intf->dev, "CDC header len %u\n",
122 info->header->bLength);
123 goto bad_desc;
124 }
125 break;
126 case USB_CDC_ACM_TYPE:
127 /* paranoia: disambiguate a "real" vendor-specific
128 * modem interface from an RNDIS non-modem.
129 */
130 if (rndis) {
131 struct usb_cdc_acm_descriptor *d;
132
133 d = (void *) buf;
134 if (d->bmCapabilities) {
135 dev_dbg(&intf->dev,
136 "ACM capabilities %02x, "
137 "not really RNDIS?\n",
138 d->bmCapabilities);
139 goto bad_desc;
140 }
141 }
142 break;
143 case USB_CDC_UNION_TYPE:
144 if (info->u) {
145 dev_dbg(&intf->dev, "extra CDC union\n");
146 goto bad_desc;
147 }
148 info->u = (void *) buf;
149 if (info->u->bLength != sizeof *info->u) {
150 dev_dbg(&intf->dev, "CDC union len %u\n",
151 info->u->bLength);
152 goto bad_desc;
153 }
154
155 /* we need a master/control interface (what we're
156 * probed with) and a slave/data interface; union
157 * descriptors sort this all out.
158 */
159 info->control = usb_ifnum_to_if(dev->udev,
160 info->u->bMasterInterface0);
161 info->data = usb_ifnum_to_if(dev->udev,
162 info->u->bSlaveInterface0);
163 if (!info->control || !info->data) {
164 dev_dbg(&intf->dev,
165 "master #%u/%p slave #%u/%p\n",
166 info->u->bMasterInterface0,
167 info->control,
168 info->u->bSlaveInterface0,
169 info->data);
170 goto bad_desc;
171 }
172 if (info->control != intf) {
173 dev_dbg(&intf->dev, "bogus CDC Union\n");
174 /* Ambit USB Cable Modem (and maybe others)
175 * interchanges master and slave interface.
176 */
177 if (info->data == intf) {
178 info->data = info->control;
179 info->control = intf;
180 } else
181 goto bad_desc;
182 }
183
184 /* a data interface altsetting does the real i/o */
185 d = &info->data->cur_altsetting->desc;
186 if (d->bInterfaceClass != USB_CLASS_CDC_DATA) {
187 dev_dbg(&intf->dev, "slave class %u\n",
188 d->bInterfaceClass);
189 goto bad_desc;
190 }
191 break;
192 case USB_CDC_ETHERNET_TYPE:
193 if (info->ether) {
194 dev_dbg(&intf->dev, "extra CDC ether\n");
195 goto bad_desc;
196 }
197 info->ether = (void *) buf;
198 if (info->ether->bLength != sizeof *info->ether) {
199 dev_dbg(&intf->dev, "CDC ether len %u\n",
200 info->ether->bLength);
201 goto bad_desc;
202 }
203 dev->hard_mtu = le16_to_cpu(
204 info->ether->wMaxSegmentSize);
205 /* because of Zaurus, we may be ignoring the host
206 * side link address we were given.
207 */
208 break;
209 }
210next_desc:
211 len -= buf [0]; /* bLength */
212 buf += buf [0];
213 }
214
215 /* Microsoft ActiveSync based RNDIS devices lack the CDC descriptors,
216 * so we'll hard-wire the interfaces and not check for descriptors.
217 */
218 if (is_activesync(&intf->cur_altsetting->desc) && !info->u) {
219 info->control = usb_ifnum_to_if(dev->udev, 0);
220 info->data = usb_ifnum_to_if(dev->udev, 1);
221 if (!info->control || !info->data) {
222 dev_dbg(&intf->dev,
223 "activesync: master #0/%p slave #1/%p\n",
224 info->control,
225 info->data);
226 goto bad_desc;
227 }
228
229 } else if (!info->header || !info->u || (!rndis && !info->ether)) {
230 dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n",
231 info->header ? "" : "header ",
232 info->u ? "" : "union ",
233 info->ether ? "" : "ether ");
234 goto bad_desc;
235 }
236
237 /* claim data interface and set it up ... with side effects.
238 * network traffic can't flow until an altsetting is enabled.
239 */
240 status = usb_driver_claim_interface(driver, info->data, dev);
241 if (status < 0)
242 return status;
243 status = usbnet_get_endpoints(dev, info->data);
244 if (status < 0) {
245 /* ensure immediate exit from usbnet_disconnect */
246 usb_set_intfdata(info->data, NULL);
247 usb_driver_release_interface(driver, info->data);
248 return status;
249 }
250
251 /* status endpoint: optional for CDC Ethernet, not RNDIS (or ACM) */
252 dev->status = NULL;
253 if (info->control->cur_altsetting->desc.bNumEndpoints == 1) {
254 struct usb_endpoint_descriptor *desc;
255
256 dev->status = &info->control->cur_altsetting->endpoint [0];
257 desc = &dev->status->desc;
258 if (!usb_endpoint_is_int_in(desc)
259 || (le16_to_cpu(desc->wMaxPacketSize)
260 < sizeof(struct usb_cdc_notification))
261 || !desc->bInterval) {
262 dev_dbg(&intf->dev, "bad notification endpoint\n");
263 dev->status = NULL;
264 }
265 }
266 if (rndis && !dev->status) {
267 dev_dbg(&intf->dev, "missing RNDIS status endpoint\n");
268 usb_set_intfdata(info->data, NULL);
269 usb_driver_release_interface(driver, info->data);
270 return -ENODEV;
271 }
272 return 0;
273
274bad_desc:
275 dev_info(&dev->udev->dev, "bad CDC descriptors\n");
276 return -ENODEV;
277}
278EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind);
279
280void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
281{
282 struct cdc_state *info = (void *) &dev->data;
283 struct usb_driver *driver = driver_of(intf);
284
285 /* disconnect master --> disconnect slave */
286 if (intf == info->control && info->data) {
287 /* ensure immediate exit from usbnet_disconnect */
288 usb_set_intfdata(info->data, NULL);
289 usb_driver_release_interface(driver, info->data);
290 info->data = NULL;
291 }
292
293 /* and vice versa (just in case) */
294 else if (intf == info->data && info->control) {
295 /* ensure immediate exit from usbnet_disconnect */
296 usb_set_intfdata(info->control, NULL);
297 usb_driver_release_interface(driver, info->control);
298 info->control = NULL;
299 }
300}
301EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
302
303
304/*-------------------------------------------------------------------------
305 *
306 * Communications Device Class, Ethernet Control model
307 *
308 * Takes two interfaces. The DATA interface is inactive till an altsetting
309 * is selected. Configuration data includes class descriptors. There's
310 * an optional status endpoint on the control interface.
311 *
312 * This should interop with whatever the 2.4 "CDCEther.c" driver
313 * (by Brad Hards) talked with, with more functionality.
314 *
315 *-------------------------------------------------------------------------*/
316
317static void dumpspeed(struct usbnet *dev, __le32 *speeds)
318{
319 if (netif_msg_timer(dev))
320 devinfo(dev, "link speeds: %u kbps up, %u kbps down",
321 __le32_to_cpu(speeds[0]) / 1000,
322 __le32_to_cpu(speeds[1]) / 1000);
323}
324
325static void cdc_status(struct usbnet *dev, struct urb *urb)
326{
327 struct usb_cdc_notification *event;
328
329 if (urb->actual_length < sizeof *event)
330 return;
331
332 /* SPEED_CHANGE can get split into two 8-byte packets */
333 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
334 dumpspeed(dev, (__le32 *) urb->transfer_buffer);
335 return;
336 }
337
338 event = urb->transfer_buffer;
339 switch (event->bNotificationType) {
340 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
341 if (netif_msg_timer(dev))
342 devdbg(dev, "CDC: carrier %s",
343 event->wValue ? "on" : "off");
344 if (event->wValue)
345 netif_carrier_on(dev->net);
346 else
347 netif_carrier_off(dev->net);
348 break;
349 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
350 if (netif_msg_timer(dev))
351 devdbg(dev, "CDC: speed change (len %d)",
352 urb->actual_length);
353 if (urb->actual_length != (sizeof *event + 8))
354 set_bit(EVENT_STS_SPLIT, &dev->flags);
355 else
356 dumpspeed(dev, (__le32 *) &event[1]);
357 break;
358 /* USB_CDC_NOTIFY_RESPONSE_AVAILABLE can happen too (e.g. RNDIS),
359 * but there are no standard formats for the response data.
360 */
361 default:
362 deverr(dev, "CDC: unexpected notification %02x!",
363 event->bNotificationType);
364 break;
365 }
366}
367
368static u8 nibble(unsigned char c)
369{
370 if (likely(isdigit(c)))
371 return c - '0';
372 c = toupper(c);
373 if (likely(isxdigit(c)))
374 return 10 + c - 'A';
375 return 0;
376}
377
378static inline int
379get_ethernet_addr(struct usbnet *dev, struct usb_cdc_ether_desc *e)
380{
381 int tmp, i;
382 unsigned char buf [13];
383
384 tmp = usb_string(dev->udev, e->iMACAddress, buf, sizeof buf);
385 if (tmp != 12) {
386 dev_dbg(&dev->udev->dev,
387 "bad MAC string %d fetch, %d\n", e->iMACAddress, tmp);
388 if (tmp >= 0)
389 tmp = -EINVAL;
390 return tmp;
391 }
392 for (i = tmp = 0; i < 6; i++, tmp += 2)
393 dev->net->dev_addr [i] =
394 (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]);
395 return 0;
396}
397
398static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
399{
400 int status;
401 struct cdc_state *info = (void *) &dev->data;
402
403 status = usbnet_generic_cdc_bind(dev, intf);
404 if (status < 0)
405 return status;
406
407 status = get_ethernet_addr(dev, info->ether);
408 if (status < 0) {
409 usb_set_intfdata(info->data, NULL);
410 usb_driver_release_interface(driver_of(intf), info->data);
411 return status;
412 }
413
414 /* FIXME cdc-ether has some multicast code too, though it complains
415 * in routine cases. info->ether describes the multicast support.
416 * Implement that here, manipulating the cdc filter as needed.
417 */
418 return 0;
419}
420
421static const struct driver_info cdc_info = {
422 .description = "CDC Ethernet Device",
423 .flags = FLAG_ETHER,
424 // .check_connect = cdc_check_connect,
425 .bind = cdc_bind,
426 .unbind = usbnet_cdc_unbind,
427 .status = cdc_status,
428};
429
430/*-------------------------------------------------------------------------*/
431
432
433static const struct usb_device_id products [] = {
434/*
435 * BLACKLIST !!
436 *
437 * First blacklist any products that are egregiously nonconformant
438 * with the CDC Ethernet specs. Minor braindamage we cope with; when
439 * they're not even trying, needing a separate driver is only the first
440 * of the differences to show up.
441 */
442
443#define ZAURUS_MASTER_INTERFACE \
444 .bInterfaceClass = USB_CLASS_COMM, \
445 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
446 .bInterfaceProtocol = USB_CDC_PROTO_NONE
447
448/* SA-1100 based Sharp Zaurus ("collie"), or compatible;
449 * wire-incompatible with true CDC Ethernet implementations.
450 * (And, it seems, needlessly so...)
451 */
452{
453 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
454 | USB_DEVICE_ID_MATCH_DEVICE,
455 .idVendor = 0x04DD,
456 .idProduct = 0x8004,
457 ZAURUS_MASTER_INTERFACE,
458 .driver_info = 0,
459},
460
461/* PXA-25x based Sharp Zaurii. Note that it seems some of these
462 * (later models especially) may have shipped only with firmware
463 * advertising false "CDC MDLM" compatibility ... but we're not
464 * clear which models did that, so for now let's assume the worst.
465 */
466{
467 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
468 | USB_DEVICE_ID_MATCH_DEVICE,
469 .idVendor = 0x04DD,
470 .idProduct = 0x8005, /* A-300 */
471 ZAURUS_MASTER_INTERFACE,
472 .driver_info = 0,
473}, {
474 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
475 | USB_DEVICE_ID_MATCH_DEVICE,
476 .idVendor = 0x04DD,
477 .idProduct = 0x8006, /* B-500/SL-5600 */
478 ZAURUS_MASTER_INTERFACE,
479 .driver_info = 0,
480}, {
481 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
482 | USB_DEVICE_ID_MATCH_DEVICE,
483 .idVendor = 0x04DD,
484 .idProduct = 0x8007, /* C-700 */
485 ZAURUS_MASTER_INTERFACE,
486 .driver_info = 0,
487}, {
488 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
489 | USB_DEVICE_ID_MATCH_DEVICE,
490 .idVendor = 0x04DD,
491 .idProduct = 0x9031, /* C-750 C-760 */
492 ZAURUS_MASTER_INTERFACE,
493 .driver_info = 0,
494}, {
495 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
496 | USB_DEVICE_ID_MATCH_DEVICE,
497 .idVendor = 0x04DD,
498 .idProduct = 0x9032, /* SL-6000 */
499 ZAURUS_MASTER_INTERFACE,
500 .driver_info = 0,
501}, {
502 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
503 | USB_DEVICE_ID_MATCH_DEVICE,
504 .idVendor = 0x04DD,
505 /* reported with some C860 units */
506 .idProduct = 0x9050, /* C-860 */
507 ZAURUS_MASTER_INTERFACE,
508 .driver_info = 0,
509},
510
511/* Olympus has some models with a Zaurus-compatible option.
512 * R-1000 uses a FreeScale i.MXL cpu (ARMv4T)
513 */
514{
515 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
516 | USB_DEVICE_ID_MATCH_DEVICE,
517 .idVendor = 0x07B4,
518 .idProduct = 0x0F02, /* R-1000 */
519 ZAURUS_MASTER_INTERFACE,
520 .driver_info = 0,
521},
522
523/*
524 * WHITELIST!!!
525 *
526 * CDC Ether uses two interfaces, not necessarily consecutive.
527 * We match the main interface, ignoring the optional device
528 * class so we could handle devices that aren't exclusively
529 * CDC ether.
530 *
531 * NOTE: this match must come AFTER entries blacklisting devices
532 * because of bugs/quirks in a given product (like Zaurus, above).
533 */
534{
535 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
536 USB_CDC_PROTO_NONE),
537 .driver_info = (unsigned long) &cdc_info,
538},
539 { }, // END
540};
541MODULE_DEVICE_TABLE(usb, products);
542
543static struct usb_driver cdc_driver = {
544 .name = "cdc_ether",
545 .id_table = products,
546 .probe = usbnet_probe,
547 .disconnect = usbnet_disconnect,
548 .suspend = usbnet_suspend,
549 .resume = usbnet_resume,
550};
551
552
553static int __init cdc_init(void)
554{
555 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
556 < sizeof(struct cdc_state)));
557
558 return usb_register(&cdc_driver);
559}
560module_init(cdc_init);
561
562static void __exit cdc_exit(void)
563{
564 usb_deregister(&cdc_driver);
565}
566module_exit(cdc_exit);
567
568MODULE_AUTHOR("David Brownell");
569MODULE_DESCRIPTION("USB CDC Ethernet devices");
570MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
new file mode 100644
index 000000000000..bc62b012602b
--- /dev/null
+++ b/drivers/net/usb/cdc_subset.c
@@ -0,0 +1,344 @@
1/*
2 * Simple "CDC Subset" USB Networking Links
3 * Copyright (C) 2000-2005 by David Brownell
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/kmod.h>
22#include <linux/init.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/ethtool.h>
26#include <linux/workqueue.h>
27#include <linux/mii.h>
28#include <linux/usb.h>
29
30#include "usbnet.h"
31
32
33/*
34 * This supports simple USB network links that don't require any special
35 * framing or hardware control operations. The protocol used here is a
36 * strict subset of CDC Ethernet, with three basic differences reflecting
37 * the goal that almost any hardware should run it:
38 *
39 * - Minimal runtime control: one interface, no altsettings, and
40 * no vendor or class specific control requests. If a device is
41 * configured, it is allowed to exchange packets with the host.
42 * Fancier models would mean not working on some hardware.
43 *
44 * - Minimal manufacturing control: no IEEE "Organizationally
45 * Unique ID" required, or an EEPROMs to store one. Each host uses
46 * one random "locally assigned" Ethernet address instead, which can
47 * of course be overridden using standard tools like "ifconfig".
48 * (With 2^46 such addresses, same-net collisions are quite rare.)
49 *
50 * - There is no additional framing data for USB. Packets are written
51 * exactly as in CDC Ethernet, starting with an Ethernet header and
52 * terminated by a short packet. However, the host will never send a
53 * zero length packet; some systems can't handle those robustly.
54 *
55 * Anything that can transmit and receive USB bulk packets can implement
56 * this protocol. That includes both smart peripherals and quite a lot
57 * of "host-to-host" USB cables (which embed two devices back-to-back).
58 *
59 * Note that although Linux may use many of those host-to-host links
60 * with this "cdc_subset" framing, that doesn't mean there may not be a
61 * better approach. Handling the "other end unplugs/replugs" scenario
62 * well tends to require chip-specific vendor requests. Also, Windows
63 * peers at the other end of host-to-host cables may expect their own
64 * framing to be used rather than this "cdc_subset" model.
65 */
66
67#if defined(CONFIG_USB_EPSON2888) || defined(CONFIG_USB_ARMLINUX)
68/* PDA style devices are always connected if present */
69static int always_connected (struct usbnet *dev)
70{
71 return 0;
72}
73#endif
74
75#ifdef CONFIG_USB_ALI_M5632
76#define HAVE_HARDWARE
77
78/*-------------------------------------------------------------------------
79 *
80 * ALi M5632 driver ... does high speed
81 *
82 * NOTE that the MS-Windows drivers for this chip use some funky and
83 * (naturally) undocumented 7-byte prefix to each packet, so this is a
84 * case where we don't currently interoperate. Also, once you unplug
85 * one end of the cable, you need to replug the other end too ... since
86 * chip docs are unavailable, there's no way to reset the relevant state
87 * short of a power cycle.
88 *
89 *-------------------------------------------------------------------------*/
90
91static const struct driver_info ali_m5632_info = {
92 .description = "ALi M5632",
93};
94
95#endif
96
97
98#ifdef CONFIG_USB_AN2720
99#define HAVE_HARDWARE
100
101/*-------------------------------------------------------------------------
102 *
103 * AnchorChips 2720 driver ... http://www.cypress.com
104 *
105 * This doesn't seem to have a way to detect whether the peer is
106 * connected, or need any reset handshaking. It's got pretty big
107 * internal buffers (handles most of a frame's worth of data).
108 * Chip data sheets don't describe any vendor control messages.
109 *
110 *-------------------------------------------------------------------------*/
111
112static const struct driver_info an2720_info = {
113 .description = "AnchorChips/Cypress 2720",
114 // no reset available!
115 // no check_connect available!
116
117 .in = 2, .out = 2, // direction distinguishes these
118};
119
120#endif /* CONFIG_USB_AN2720 */
121
122
123#ifdef CONFIG_USB_BELKIN
124#define HAVE_HARDWARE
125
126/*-------------------------------------------------------------------------
127 *
128 * Belkin F5U104 ... two NetChip 2280 devices + Atmel AVR microcontroller
129 *
130 * ... also two eTEK designs, including one sold as "Advance USBNET"
131 *
132 *-------------------------------------------------------------------------*/
133
134static const struct driver_info belkin_info = {
135 .description = "Belkin, eTEK, or compatible",
136};
137
138#endif /* CONFIG_USB_BELKIN */
139
140
141
142#ifdef CONFIG_USB_EPSON2888
143#define HAVE_HARDWARE
144
145/*-------------------------------------------------------------------------
146 *
147 * EPSON USB clients
148 *
149 * This is the same idea as Linux PDAs (below) except the firmware in the
150 * device might not be Tux-powered. Epson provides reference firmware that
151 * implements this interface. Product developers can reuse or modify that
152 * code, such as by using their own product and vendor codes.
153 *
154 * Support was from Juro Bystricky <bystricky.juro@erd.epson.com>
155 *
156 *-------------------------------------------------------------------------*/
157
158static const struct driver_info epson2888_info = {
159 .description = "Epson USB Device",
160 .check_connect = always_connected,
161
162 .in = 4, .out = 3,
163};
164
165#endif /* CONFIG_USB_EPSON2888 */
166
167
168/*-------------------------------------------------------------------------
169 *
170 * info from Jonathan McDowell <noodles@earth.li>
171 *
172 *-------------------------------------------------------------------------*/
173#ifdef CONFIG_USB_KC2190
174#define HAVE_HARDWARE
175static const struct driver_info kc2190_info = {
176 .description = "KC Technology KC-190",
177};
178#endif /* CONFIG_USB_KC2190 */
179
180
181#ifdef CONFIG_USB_ARMLINUX
182#define HAVE_HARDWARE
183
184/*-------------------------------------------------------------------------
185 *
186 * Intel's SA-1100 chip integrates basic USB support, and is used
187 * in PDAs like some iPaqs, the Yopy, some Zaurus models, and more.
188 * When they run Linux, arch/arm/mach-sa1100/usb-eth.c may be used to
189 * network using minimal USB framing data.
190 *
191 * This describes the driver currently in standard ARM Linux kernels.
192 * The Zaurus uses a different driver (see later).
193 *
194 * PXA25x and PXA210 use XScale cores (ARM v5TE) with better USB support
195 * and different USB endpoint numbering than the SA1100 devices. The
196 * mach-pxa/usb-eth.c driver re-uses the device ids from mach-sa1100
197 * so we rely on the endpoint descriptors.
198 *
199 *-------------------------------------------------------------------------*/
200
201static const struct driver_info linuxdev_info = {
202 .description = "Linux Device",
203 .check_connect = always_connected,
204};
205
206static const struct driver_info yopy_info = {
207 .description = "Yopy",
208 .check_connect = always_connected,
209};
210
211static const struct driver_info blob_info = {
212 .description = "Boot Loader OBject",
213 .check_connect = always_connected,
214};
215
216#endif /* CONFIG_USB_ARMLINUX */
217
218
219/*-------------------------------------------------------------------------*/
220
221#ifndef HAVE_HARDWARE
222#error You need to configure some hardware for this driver
223#endif
224
225/*
226 * chip vendor names won't normally be on the cables, and
227 * may not be on the device.
228 */
229
230static const struct usb_device_id products [] = {
231
232#ifdef CONFIG_USB_ALI_M5632
233{
234 USB_DEVICE (0x0402, 0x5632), // ALi defaults
235 .driver_info = (unsigned long) &ali_m5632_info,
236},
237{
238 USB_DEVICE (0x182d,0x207c), // SiteCom CN-124
239 .driver_info = (unsigned long) &ali_m5632_info,
240},
241#endif
242
243#ifdef CONFIG_USB_AN2720
244{
245 USB_DEVICE (0x0547, 0x2720), // AnchorChips defaults
246 .driver_info = (unsigned long) &an2720_info,
247}, {
248 USB_DEVICE (0x0547, 0x2727), // Xircom PGUNET
249 .driver_info = (unsigned long) &an2720_info,
250},
251#endif
252
253#ifdef CONFIG_USB_BELKIN
254{
255 USB_DEVICE (0x050d, 0x0004), // Belkin
256 .driver_info = (unsigned long) &belkin_info,
257}, {
258 USB_DEVICE (0x056c, 0x8100), // eTEK
259 .driver_info = (unsigned long) &belkin_info,
260}, {
261 USB_DEVICE (0x0525, 0x9901), // Advance USBNET (eTEK)
262 .driver_info = (unsigned long) &belkin_info,
263},
264#endif
265
266#ifdef CONFIG_USB_EPSON2888
267{
268 USB_DEVICE (0x0525, 0x2888), // EPSON USB client
269 .driver_info = (unsigned long) &epson2888_info,
270},
271#endif
272
273#ifdef CONFIG_USB_KC2190
274{
275 USB_DEVICE (0x050f, 0x0190), // KC-190
276 .driver_info = (unsigned long) &kc2190_info,
277},
278#endif
279
280#ifdef CONFIG_USB_ARMLINUX
281/*
282 * SA-1100 using standard ARM Linux kernels, or compatible.
283 * Often used when talking to Linux PDAs (iPaq, Yopy, etc).
284 * The sa-1100 "usb-eth" driver handles the basic framing.
285 *
286 * PXA25x or PXA210 ... these use a "usb-eth" driver much like
287 * the sa1100 one, but hardware uses different endpoint numbers.
288 *
289 * Or the Linux "Ethernet" gadget on hardware that can't talk
290 * CDC Ethernet (e.g., no altsettings), in either of two modes:
291 * - acting just like the old "usb-eth" firmware, though
292 * the implementation is different
293 * - supporting RNDIS as the first/default configuration for
294 * MS-Windows interop; Linux needs to use the other config
295 */
296{
297 // 1183 = 0x049F, both used as hex values?
298 // Compaq "Itsy" vendor/product id
299 USB_DEVICE (0x049F, 0x505A), // usb-eth, or compatible
300 .driver_info = (unsigned long) &linuxdev_info,
301}, {
302 USB_DEVICE (0x0E7E, 0x1001), // G.Mate "Yopy"
303 .driver_info = (unsigned long) &yopy_info,
304}, {
305 USB_DEVICE (0x8086, 0x07d3), // "blob" bootloader
306 .driver_info = (unsigned long) &blob_info,
307}, {
308 // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config
309 // e.g. Gumstix, current OpenZaurus, ...
310 USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
311 .driver_info = (unsigned long) &linuxdev_info,
312},
313#endif
314
315 { }, // END
316};
317MODULE_DEVICE_TABLE(usb, products);
318
319/*-------------------------------------------------------------------------*/
320
321static struct usb_driver cdc_subset_driver = {
322 .name = "cdc_subset",
323 .probe = usbnet_probe,
324 .suspend = usbnet_suspend,
325 .resume = usbnet_resume,
326 .disconnect = usbnet_disconnect,
327 .id_table = products,
328};
329
330static int __init cdc_subset_init(void)
331{
332 return usb_register(&cdc_subset_driver);
333}
334module_init(cdc_subset_init);
335
336static void __exit cdc_subset_exit(void)
337{
338 usb_deregister(&cdc_subset_driver);
339}
340module_exit(cdc_subset_exit);
341
342MODULE_AUTHOR("David Brownell");
343MODULE_DESCRIPTION("Simple 'CDC Subset' USB networking links");
344MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
new file mode 100644
index 000000000000..a67638601477
--- /dev/null
+++ b/drivers/net/usb/dm9601.c
@@ -0,0 +1,619 @@
1/*
2 * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices
3 *
4 * Peter Korsgaard <jacmet@sunsite.dk>
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 */
10
11//#define DEBUG
12
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/stddef.h>
16#include <linux/init.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/ethtool.h>
20#include <linux/mii.h>
21#include <linux/usb.h>
22#include <linux/crc32.h>
23
24#include "usbnet.h"
25
26/* datasheet:
27 http://www.davicom.com.tw/big5/download/Data%20Sheet/DM9601-DS-P01-930914.pdf
28*/
29
30/* control requests */
31#define DM_READ_REGS 0x00
32#define DM_WRITE_REGS 0x01
33#define DM_READ_MEMS 0x02
34#define DM_WRITE_REG 0x03
35#define DM_WRITE_MEMS 0x05
36#define DM_WRITE_MEM 0x07
37
38/* registers */
39#define DM_NET_CTRL 0x00
40#define DM_RX_CTRL 0x05
41#define DM_SHARED_CTRL 0x0b
42#define DM_SHARED_ADDR 0x0c
43#define DM_SHARED_DATA 0x0d /* low + high */
44#define DM_PHY_ADDR 0x10 /* 6 bytes */
45#define DM_MCAST_ADDR 0x16 /* 8 bytes */
46#define DM_GPR_CTRL 0x1e
47#define DM_GPR_DATA 0x1f
48
49#define DM_MAX_MCAST 64
50#define DM_MCAST_SIZE 8
51#define DM_EEPROM_LEN 256
52#define DM_TX_OVERHEAD 2 /* 2 byte header */
53#define DM_RX_OVERHEAD 7 /* 3 byte header + 4 byte crc tail */
54#define DM_TIMEOUT 1000
55
56
57static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
58{
59 devdbg(dev, "dm_read() reg=0x%02x length=%d", reg, length);
60 return usb_control_msg(dev->udev,
61 usb_rcvctrlpipe(dev->udev, 0),
62 DM_READ_REGS,
63 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
64 0, reg, data, length, USB_CTRL_SET_TIMEOUT);
65}
66
67static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value)
68{
69 return dm_read(dev, reg, 1, value);
70}
71
72static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
73{
74 devdbg(dev, "dm_write() reg=0x%02x, length=%d", reg, length);
75 return usb_control_msg(dev->udev,
76 usb_sndctrlpipe(dev->udev, 0),
77 DM_WRITE_REGS,
78 USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
79 0, reg, data, length, USB_CTRL_SET_TIMEOUT);
80}
81
82static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
83{
84 devdbg(dev, "dm_write_reg() reg=0x%02x, value=0x%02x", reg, value);
85 return usb_control_msg(dev->udev,
86 usb_sndctrlpipe(dev->udev, 0),
87 DM_WRITE_REG,
88 USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
89 value, reg, NULL, 0, USB_CTRL_SET_TIMEOUT);
90}
91
92static void dm_write_async_callback(struct urb *urb)
93{
94 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
95
96 if (urb->status < 0)
97 printk(KERN_DEBUG "dm_write_async_callback() failed with %d",
98 urb->status);
99
100 kfree(req);
101 usb_free_urb(urb);
102}
103
104static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
105{
106 struct usb_ctrlrequest *req;
107 struct urb *urb;
108 int status;
109
110 devdbg(dev, "dm_write_async() reg=0x%02x length=%d", reg, length);
111
112 urb = usb_alloc_urb(0, GFP_ATOMIC);
113 if (!urb) {
114 deverr(dev, "Error allocating URB in dm_write_async!");
115 return;
116 }
117
118 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
119 if (!req) {
120 deverr(dev, "Failed to allocate memory for control request");
121 usb_free_urb(urb);
122 return;
123 }
124
125 req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
126 req->bRequest = DM_WRITE_REGS;
127 req->wValue = 0;
128 req->wIndex = cpu_to_le16(reg);
129 req->wLength = cpu_to_le16(length);
130
131 usb_fill_control_urb(urb, dev->udev,
132 usb_sndctrlpipe(dev->udev, 0),
133 (void *)req, data, length,
134 dm_write_async_callback, req);
135
136 status = usb_submit_urb(urb, GFP_ATOMIC);
137 if (status < 0) {
138 deverr(dev, "Error submitting the control message: status=%d",
139 status);
140 kfree(req);
141 usb_free_urb(urb);
142 }
143}
144
145static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
146{
147 struct usb_ctrlrequest *req;
148 struct urb *urb;
149 int status;
150
151 devdbg(dev, "dm_write_reg_async() reg=0x%02x value=0x%02x",
152 reg, value);
153
154 urb = usb_alloc_urb(0, GFP_ATOMIC);
155 if (!urb) {
156 deverr(dev, "Error allocating URB in dm_write_async!");
157 return;
158 }
159
160 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
161 if (!req) {
162 deverr(dev, "Failed to allocate memory for control request");
163 usb_free_urb(urb);
164 return;
165 }
166
167 req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
168 req->bRequest = DM_WRITE_REG;
169 req->wValue = cpu_to_le16(value);
170 req->wIndex = cpu_to_le16(reg);
171 req->wLength = 0;
172
173 usb_fill_control_urb(urb, dev->udev,
174 usb_sndctrlpipe(dev->udev, 0),
175 (void *)req, NULL, 0, dm_write_async_callback, req);
176
177 status = usb_submit_urb(urb, GFP_ATOMIC);
178 if (status < 0) {
179 deverr(dev, "Error submitting the control message: status=%d",
180 status);
181 kfree(req);
182 usb_free_urb(urb);
183 }
184}
185
186static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, u16 *value)
187{
188 int ret, i;
189
190 mutex_lock(&dev->phy_mutex);
191
192 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
193 dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0xc : 0x4);
194
195 for (i = 0; i < DM_TIMEOUT; i++) {
196 u8 tmp;
197
198 udelay(1);
199 ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp);
200 if (ret < 0)
201 goto out;
202
203 /* ready */
204 if ((tmp & 1) == 0)
205 break;
206 }
207
208 if (i == DM_TIMEOUT) {
209 deverr(dev, "%s read timed out!", phy ? "phy" : "eeprom");
210 ret = -EIO;
211 goto out;
212 }
213
214 dm_write_reg(dev, DM_SHARED_CTRL, 0x0);
215 ret = dm_read(dev, DM_SHARED_DATA, 2, value);
216
217 devdbg(dev, "read shared %d 0x%02x returned 0x%04x, %d",
218 phy, reg, *value, ret);
219
220 out:
221 mutex_unlock(&dev->phy_mutex);
222 return ret;
223}
224
225static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, u16 value)
226{
227 int ret, i;
228
229 mutex_lock(&dev->phy_mutex);
230
231 ret = dm_write(dev, DM_SHARED_DATA, 2, &value);
232 if (ret < 0)
233 goto out;
234
235 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
236 dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14);
237
238 for (i = 0; i < DM_TIMEOUT; i++) {
239 u8 tmp;
240
241 udelay(1);
242 ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp);
243 if (ret < 0)
244 goto out;
245
246 /* ready */
247 if ((tmp & 1) == 0)
248 break;
249 }
250
251 if (i == DM_TIMEOUT) {
252 deverr(dev, "%s write timed out!", phy ? "phy" : "eeprom");
253 ret = -EIO;
254 goto out;
255 }
256
257 dm_write_reg(dev, DM_SHARED_CTRL, 0x0);
258
259out:
260 mutex_unlock(&dev->phy_mutex);
261 return ret;
262}
263
264static int dm_read_eeprom_word(struct usbnet *dev, u8 offset, void *value)
265{
266 return dm_read_shared_word(dev, 0, offset, value);
267}
268
269
270
271static int dm9601_get_eeprom_len(struct net_device *dev)
272{
273 return DM_EEPROM_LEN;
274}
275
276static int dm9601_get_eeprom(struct net_device *net,
277 struct ethtool_eeprom *eeprom, u8 * data)
278{
279 struct usbnet *dev = netdev_priv(net);
280 u16 *ebuf = (u16 *) data;
281 int i;
282
283 /* access is 16bit */
284 if ((eeprom->offset % 2) || (eeprom->len % 2))
285 return -EINVAL;
286
287 for (i = 0; i < eeprom->len / 2; i++) {
288 if (dm_read_eeprom_word(dev, eeprom->offset / 2 + i,
289 &ebuf[i]) < 0)
290 return -EINVAL;
291 }
292 return 0;
293}
294
295static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
296{
297 struct usbnet *dev = netdev_priv(netdev);
298
299 u16 res;
300
301 if (phy_id) {
302 devdbg(dev, "Only internal phy supported");
303 return 0;
304 }
305
306 dm_read_shared_word(dev, 1, loc, &res);
307
308 devdbg(dev,
309 "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x",
310 phy_id, loc, le16_to_cpu(res));
311
312 return le16_to_cpu(res);
313}
314
315static void dm9601_mdio_write(struct net_device *netdev, int phy_id, int loc,
316 int val)
317{
318 struct usbnet *dev = netdev_priv(netdev);
319 u16 res = cpu_to_le16(val);
320
321 if (phy_id) {
322 devdbg(dev, "Only internal phy supported");
323 return;
324 }
325
326 devdbg(dev,"dm9601_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x",
327 phy_id, loc, val);
328
329 dm_write_shared_word(dev, 1, loc, res);
330}
331
332static void dm9601_get_drvinfo(struct net_device *net,
333 struct ethtool_drvinfo *info)
334{
335 /* Inherit standard device info */
336 usbnet_get_drvinfo(net, info);
337 info->eedump_len = DM_EEPROM_LEN;
338}
339
340static u32 dm9601_get_link(struct net_device *net)
341{
342 struct usbnet *dev = netdev_priv(net);
343
344 return mii_link_ok(&dev->mii);
345}
346
347static int dm9601_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
348{
349 struct usbnet *dev = netdev_priv(net);
350
351 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
352}
353
354static struct ethtool_ops dm9601_ethtool_ops = {
355 .get_drvinfo = dm9601_get_drvinfo,
356 .get_link = dm9601_get_link,
357 .get_msglevel = usbnet_get_msglevel,
358 .set_msglevel = usbnet_set_msglevel,
359 .get_eeprom_len = dm9601_get_eeprom_len,
360 .get_eeprom = dm9601_get_eeprom,
361 .get_settings = usbnet_get_settings,
362 .set_settings = usbnet_set_settings,
363 .nway_reset = usbnet_nway_reset,
364};
365
366static void dm9601_set_multicast(struct net_device *net)
367{
368 struct usbnet *dev = netdev_priv(net);
369 /* We use the 20 byte dev->data for our 8 byte filter buffer
370 * to avoid allocating memory that is tricky to free later */
371 u8 *hashes = (u8 *) & dev->data;
372 u8 rx_ctl = 0x01;
373
374 memset(hashes, 0x00, DM_MCAST_SIZE);
375 hashes[DM_MCAST_SIZE - 1] |= 0x80; /* broadcast address */
376
377 if (net->flags & IFF_PROMISC) {
378 rx_ctl |= 0x02;
379 } else if (net->flags & IFF_ALLMULTI || net->mc_count > DM_MAX_MCAST) {
380 rx_ctl |= 0x04;
381 } else if (net->mc_count) {
382 struct dev_mc_list *mc_list = net->mc_list;
383 int i;
384
385 for (i = 0; i < net->mc_count; i++) {
386 u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
387 hashes[crc >> 3] |= 1 << (crc & 0x7);
388 }
389 }
390
391 dm_write_async(dev, DM_MCAST_ADDR, DM_MCAST_SIZE, hashes);
392 dm_write_reg_async(dev, DM_RX_CTRL, rx_ctl);
393}
394
395static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
396{
397 int ret;
398
399 ret = usbnet_get_endpoints(dev, intf);
400 if (ret)
401 goto out;
402
403 dev->net->do_ioctl = dm9601_ioctl;
404 dev->net->set_multicast_list = dm9601_set_multicast;
405 dev->net->ethtool_ops = &dm9601_ethtool_ops;
406 dev->net->hard_header_len += DM_TX_OVERHEAD;
407 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
408 dev->rx_urb_size = dev->net->mtu + DM_RX_OVERHEAD;
409
410 dev->mii.dev = dev->net;
411 dev->mii.mdio_read = dm9601_mdio_read;
412 dev->mii.mdio_write = dm9601_mdio_write;
413 dev->mii.phy_id_mask = 0x1f;
414 dev->mii.reg_num_mask = 0x1f;
415
416 /* reset */
417 ret = dm_write_reg(dev, DM_NET_CTRL, 1);
418 udelay(20);
419
420 /* read MAC */
421 ret = dm_read(dev, DM_PHY_ADDR, ETH_ALEN, dev->net->dev_addr);
422 if (ret < 0) {
423 printk(KERN_ERR "Error reading MAC address\n");
424 ret = -ENODEV;
425 goto out;
426 }
427
428
429 /* power up phy */
430 dm_write_reg(dev, DM_GPR_CTRL, 1);
431 dm_write_reg(dev, DM_GPR_DATA, 0);
432
433 /* receive broadcast packets */
434 dm9601_set_multicast(dev->net);
435
436 dm9601_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
437 dm9601_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
438 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
439 mii_nway_restart(&dev->mii);
440
441out:
442 return ret;
443}
444
445static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
446{
447 u8 status;
448 int len;
449
450 /* format:
451 b0: rx status
452 b1: packet length (incl crc) low
453 b2: packet length (incl crc) high
454 b3..n-4: packet data
455 bn-3..bn: ethernet crc
456 */
457
458 if (unlikely(skb->len < DM_RX_OVERHEAD)) {
459 dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
460 return 0;
461 }
462
463 status = skb->data[0];
464 len = (skb->data[1] | (skb->data[2] << 8)) - 4;
465
466 if (unlikely(status & 0xbf)) {
467 if (status & 0x01) dev->stats.rx_fifo_errors++;
468 if (status & 0x02) dev->stats.rx_crc_errors++;
469 if (status & 0x04) dev->stats.rx_frame_errors++;
470 if (status & 0x20) dev->stats.rx_missed_errors++;
471 if (status & 0x90) dev->stats.rx_length_errors++;
472 return 0;
473 }
474
475 skb_pull(skb, 3);
476 skb_trim(skb, len);
477
478 return 1;
479}
480
481static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
482 gfp_t flags)
483{
484 int len;
485
486 /* format:
487 b0: packet length low
488 b1: packet length high
489 b3..n: packet data
490 */
491
492 if (skb_headroom(skb) < DM_TX_OVERHEAD) {
493 struct sk_buff *skb2;
494
495 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
496 dev_kfree_skb_any(skb);
497 skb = skb2;
498 if (!skb)
499 return NULL;
500 }
501
502 __skb_push(skb, DM_TX_OVERHEAD);
503
504 len = skb->len;
505 /* usbnet adds padding if length is a multiple of packet size
506 if so, adjust length value in header */
507 if ((len % dev->maxpacket) == 0)
508 len++;
509
510 skb->data[0] = len;
511 skb->data[1] = len >> 8;
512
513 return skb;
514}
515
516static void dm9601_status(struct usbnet *dev, struct urb *urb)
517{
518 int link;
519 u8 *buf;
520
521 /* format:
522 b0: net status
523 b1: tx status 1
524 b2: tx status 2
525 b3: rx status
526 b4: rx overflow
527 b5: rx count
528 b6: tx count
529 b7: gpr
530 */
531
532 if (urb->actual_length < 8)
533 return;
534
535 buf = urb->transfer_buffer;
536
537 link = !!(buf[0] & 0x40);
538 if (netif_carrier_ok(dev->net) != link) {
539 if (link) {
540 netif_carrier_on(dev->net);
541 usbnet_defer_kevent (dev, EVENT_LINK_RESET);
542 }
543 else
544 netif_carrier_off(dev->net);
545 devdbg(dev, "Link Status is: %d", link);
546 }
547}
548
549static int dm9601_link_reset(struct usbnet *dev)
550{
551 struct ethtool_cmd ecmd;
552
553 mii_check_media(&dev->mii, 1, 1);
554 mii_ethtool_gset(&dev->mii, &ecmd);
555
556 devdbg(dev, "link_reset() speed: %d duplex: %d",
557 ecmd.speed, ecmd.duplex);
558
559 return 0;
560}
561
562static const struct driver_info dm9601_info = {
563 .description = "Davicom DM9601 USB Ethernet",
564 .flags = FLAG_ETHER,
565 .bind = dm9601_bind,
566 .rx_fixup = dm9601_rx_fixup,
567 .tx_fixup = dm9601_tx_fixup,
568 .status = dm9601_status,
569 .link_reset = dm9601_link_reset,
570 .reset = dm9601_link_reset,
571};
572
573static const struct usb_device_id products[] = {
574 {
575 USB_DEVICE(0x07aa, 0x9601), /* Corega FEther USB-TXC */
576 .driver_info = (unsigned long)&dm9601_info,
577 },
578 {
579 USB_DEVICE(0x0a46, 0x9601), /* Davicom USB-100 */
580 .driver_info = (unsigned long)&dm9601_info,
581 },
582 {
583 USB_DEVICE(0x0a46, 0x6688), /* ZT6688 USB NIC */
584 .driver_info = (unsigned long)&dm9601_info,
585 },
586 {
587 USB_DEVICE(0x0a46, 0x0268), /* ShanTou ST268 USB NIC */
588 .driver_info = (unsigned long)&dm9601_info,
589 },
590 {}, // END
591};
592
593MODULE_DEVICE_TABLE(usb, products);
594
595static struct usb_driver dm9601_driver = {
596 .name = "dm9601",
597 .id_table = products,
598 .probe = usbnet_probe,
599 .disconnect = usbnet_disconnect,
600 .suspend = usbnet_suspend,
601 .resume = usbnet_resume,
602};
603
604static int __init dm9601_init(void)
605{
606 return usb_register(&dm9601_driver);
607}
608
609static void __exit dm9601_exit(void)
610{
611 usb_deregister(&dm9601_driver);
612}
613
614module_init(dm9601_init);
615module_exit(dm9601_exit);
616
617MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
618MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
619MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
new file mode 100644
index 000000000000..031cf5ca4dbb
--- /dev/null
+++ b/drivers/net/usb/gl620a.c
@@ -0,0 +1,245 @@
1/*
2 * GeneSys GL620USB-A based links
3 * Copyright (C) 2001 by Jiun-Jie Huang <huangjj@genesyslogic.com.tw>
4 * Copyright (C) 2001 by Stanislav Brabec <utx@penguin.cz>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21// #define DEBUG // error path messages, extra info
22// #define VERBOSE // more; success messages
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/workqueue.h>
30#include <linux/mii.h>
31#include <linux/usb.h>
32
33#include "usbnet.h"
34
35
36/*
37 * GeneSys GL620USB-A (www.genesyslogic.com.tw)
38 *
39 * ... should partially interop with the Win32 driver for this hardware.
40 * The GeneSys docs imply there's some NDIS issue motivating this framing.
41 *
42 * Some info from GeneSys:
43 * - GL620USB-A is full duplex; GL620USB is only half duplex for bulk.
44 * (Some cables, like the BAFO-100c, use the half duplex version.)
45 * - For the full duplex model, the low bit of the version code says
46 * which side is which ("left/right").
47 * - For the half duplex type, a control/interrupt handshake settles
48 * the transfer direction. (That's disabled here, partially coded.)
49 * A control URB would block until other side writes an interrupt.
50 *
51 * Original code from Jiun-Jie Huang <huangjj@genesyslogic.com.tw>
52 * and merged into "usbnet" by Stanislav Brabec <utx@penguin.cz>.
53 */
54
55// control msg write command
56#define GENELINK_CONNECT_WRITE 0xF0
57// interrupt pipe index
58#define GENELINK_INTERRUPT_PIPE 0x03
59// interrupt read buffer size
60#define INTERRUPT_BUFSIZE 0x08
61// interrupt pipe interval value
62#define GENELINK_INTERRUPT_INTERVAL 0x10
63// max transmit packet number per transmit
64#define GL_MAX_TRANSMIT_PACKETS 32
65// max packet length
66#define GL_MAX_PACKET_LEN 1514
67// max receive buffer size
68#define GL_RCV_BUF_SIZE \
69 (((GL_MAX_PACKET_LEN + 4) * GL_MAX_TRANSMIT_PACKETS) + 4)
70
71struct gl_packet {
72 __le32 packet_length;
73 char packet_data [1];
74};
75
76struct gl_header {
77 __le32 packet_count;
78 struct gl_packet packets;
79};
80
81static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
82{
83 struct gl_header *header;
84 struct gl_packet *packet;
85 struct sk_buff *gl_skb;
86 u32 size;
87 u32 count;
88
89 header = (struct gl_header *) skb->data;
90
91 // get the packet count of the received skb
92 count = le32_to_cpu(header->packet_count);
93 if (count > GL_MAX_TRANSMIT_PACKETS) {
94 dbg("genelink: invalid received packet count %u", count);
95 return 0;
96 }
97
98 // set the current packet pointer to the first packet
99 packet = &header->packets;
100
101 // decrement the length for the packet count size 4 bytes
102 skb_pull(skb, 4);
103
104 while (count > 1) {
105 // get the packet length
106 size = le32_to_cpu(packet->packet_length);
107
108 // this may be a broken packet
109 if (size > GL_MAX_PACKET_LEN) {
110 dbg("genelink: invalid rx length %d", size);
111 return 0;
112 }
113
114 // allocate the skb for the individual packet
115 gl_skb = alloc_skb(size, GFP_ATOMIC);
116 if (gl_skb) {
117
118 // copy the packet data to the new skb
119 memcpy(skb_put(gl_skb, size),
120 packet->packet_data, size);
121 usbnet_skb_return(dev, gl_skb);
122 }
123
124 // advance to the next packet
125 packet = (struct gl_packet *)&packet->packet_data[size];
126 count--;
127
128 // shift the data pointer to the next gl_packet
129 skb_pull(skb, size + 4);
130 }
131
132 // skip the packet length field 4 bytes
133 skb_pull(skb, 4);
134
135 if (skb->len > GL_MAX_PACKET_LEN) {
136 dbg("genelink: invalid rx length %d", skb->len);
137 return 0;
138 }
139 return 1;
140}
141
142static struct sk_buff *
143genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
144{
145 int padlen;
146 int length = skb->len;
147 int headroom = skb_headroom(skb);
148 int tailroom = skb_tailroom(skb);
149 __le32 *packet_count;
150 __le32 *packet_len;
151
152 // FIXME: magic numbers, bleech
153 padlen = ((skb->len + (4 + 4*1)) % 64) ? 0 : 1;
154
155 if ((!skb_cloned(skb))
156 && ((headroom + tailroom) >= (padlen + (4 + 4*1)))) {
157 if ((headroom < (4 + 4*1)) || (tailroom < padlen)) {
158 skb->data = memmove(skb->head + (4 + 4*1),
159 skb->data, skb->len);
160 skb_set_tail_pointer(skb, skb->len);
161 }
162 } else {
163 struct sk_buff *skb2;
164 skb2 = skb_copy_expand(skb, (4 + 4*1) , padlen, flags);
165 dev_kfree_skb_any(skb);
166 skb = skb2;
167 if (!skb)
168 return NULL;
169 }
170
171 // attach the packet count to the header
172 packet_count = (__le32 *) skb_push(skb, (4 + 4*1));
173 packet_len = packet_count + 1;
174
175 *packet_count = cpu_to_le32(1);
176 *packet_len = cpu_to_le32(length);
177
178 // add padding byte
179 if ((skb->len % dev->maxpacket) == 0)
180 skb_put(skb, 1);
181
182 return skb;
183}
184
185static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
186{
187 dev->hard_mtu = GL_RCV_BUF_SIZE;
188 dev->net->hard_header_len += 4;
189 dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
190 dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
191 return 0;
192}
193
194static const struct driver_info genelink_info = {
195 .description = "Genesys GeneLink",
196 .flags = FLAG_FRAMING_GL | FLAG_NO_SETINT,
197 .bind = genelink_bind,
198 .rx_fixup = genelink_rx_fixup,
199 .tx_fixup = genelink_tx_fixup,
200
201 .in = 1, .out = 2,
202
203#ifdef GENELINK_ACK
204 .check_connect =genelink_check_connect,
205#endif
206};
207
208static const struct usb_device_id products [] = {
209
210{
211 USB_DEVICE(0x05e3, 0x0502), // GL620USB-A
212 .driver_info = (unsigned long) &genelink_info,
213},
214 /* NOT: USB_DEVICE(0x05e3, 0x0501), // GL620USB
215 * that's half duplex, not currently supported
216 */
217 { }, // END
218};
219MODULE_DEVICE_TABLE(usb, products);
220
221static struct usb_driver gl620a_driver = {
222 .name = "gl620a",
223 .id_table = products,
224 .probe = usbnet_probe,
225 .disconnect = usbnet_disconnect,
226 .suspend = usbnet_suspend,
227 .resume = usbnet_resume,
228};
229
230static int __init usbnet_init(void)
231{
232 return usb_register(&gl620a_driver);
233}
234module_init(usbnet_init);
235
236static void __exit usbnet_exit(void)
237{
238 usb_deregister(&gl620a_driver);
239}
240module_exit(usbnet_exit);
241
242MODULE_AUTHOR("Jiun-Jie Huang");
243MODULE_DESCRIPTION("GL620-USB-A Host-to-Host Link cables");
244MODULE_LICENSE("GPL");
245
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
new file mode 100644
index 000000000000..60d29440f316
--- /dev/null
+++ b/drivers/net/usb/kaweth.c
@@ -0,0 +1,1337 @@
1/****************************************************************
2 *
3 * kaweth.c - driver for KL5KUSB101 based USB->Ethernet
4 *
5 * (c) 2000 Interlan Communications
6 * (c) 2000 Stephane Alnet
7 * (C) 2001 Brad Hards
8 * (C) 2002 Oliver Neukum
9 *
10 * Original author: The Zapman <zapman@interlan.net>
11 * Inspired by, and much credit goes to Michael Rothwell
12 * <rothwell@interlan.net> for the test equipment, help, and patience
13 * Based off of (and with thanks to) Petko Manolov's pegaus.c driver.
14 * Also many thanks to Joel Silverman and Ed Surprenant at Kawasaki
15 * for providing the firmware and driver resources.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software Foundation,
29 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
30 *
31 ****************************************************************/
32
33/* TODO:
34 * Fix in_interrupt() problem
35 * Develop test procedures for USB net interfaces
36 * Run test procedures
37 * Fix bugs from previous two steps
38 * Snoop other OSs for any tricks we're not doing
39 * SMP locking
40 * Reduce arbitrary timeouts
41 * Smart multicast support
42 * Temporary MAC change support
43 * Tunable SOFs parameter - ioctl()?
44 * Ethernet stats collection
45 * Code formatting improvements
46 */
47
48#include <linux/module.h>
49#include <linux/slab.h>
50#include <linux/string.h>
51#include <linux/init.h>
52#include <linux/delay.h>
53#include <linux/netdevice.h>
54#include <linux/etherdevice.h>
55#include <linux/usb.h>
56#include <linux/types.h>
57#include <linux/ethtool.h>
58#include <linux/dma-mapping.h>
59#include <linux/wait.h>
60#include <asm/uaccess.h>
61#include <asm/semaphore.h>
62#include <asm/byteorder.h>
63
64#undef DEBUG
65
66#include "kawethfw.h"
67
68#define KAWETH_MTU 1514
69#define KAWETH_BUF_SIZE 1664
70#define KAWETH_TX_TIMEOUT (5 * HZ)
71#define KAWETH_SCRATCH_SIZE 32
72#define KAWETH_FIRMWARE_BUF_SIZE 4096
73#define KAWETH_CONTROL_TIMEOUT (30 * HZ)
74
75#define KAWETH_STATUS_BROKEN 0x0000001
76#define KAWETH_STATUS_CLOSING 0x0000002
77#define KAWETH_STATUS_SUSPENDING 0x0000004
78
79#define KAWETH_STATUS_BLOCKED (KAWETH_STATUS_CLOSING | KAWETH_STATUS_SUSPENDING)
80
81#define KAWETH_PACKET_FILTER_PROMISCUOUS 0x01
82#define KAWETH_PACKET_FILTER_ALL_MULTICAST 0x02
83#define KAWETH_PACKET_FILTER_DIRECTED 0x04
84#define KAWETH_PACKET_FILTER_BROADCAST 0x08
85#define KAWETH_PACKET_FILTER_MULTICAST 0x10
86
87/* Table 7 */
88#define KAWETH_COMMAND_GET_ETHERNET_DESC 0x00
89#define KAWETH_COMMAND_MULTICAST_FILTERS 0x01
90#define KAWETH_COMMAND_SET_PACKET_FILTER 0x02
91#define KAWETH_COMMAND_STATISTICS 0x03
92#define KAWETH_COMMAND_SET_TEMP_MAC 0x06
93#define KAWETH_COMMAND_GET_TEMP_MAC 0x07
94#define KAWETH_COMMAND_SET_URB_SIZE 0x08
95#define KAWETH_COMMAND_SET_SOFS_WAIT 0x09
96#define KAWETH_COMMAND_SCAN 0xFF
97
98#define KAWETH_SOFS_TO_WAIT 0x05
99
100#define INTBUFFERSIZE 4
101
102#define STATE_OFFSET 0
103#define STATE_MASK 0x40
104#define STATE_SHIFT 5
105
106#define IS_BLOCKED(s) (s & KAWETH_STATUS_BLOCKED)
107
108
109MODULE_AUTHOR("Michael Zappe <zapman@interlan.net>, Stephane Alnet <stephane@u-picardie.fr>, Brad Hards <bhards@bigpond.net.au> and Oliver Neukum <oliver@neukum.org>");
110MODULE_DESCRIPTION("KL5USB101 USB Ethernet driver");
111MODULE_LICENSE("GPL");
112
113static const char driver_name[] = "kaweth";
114
115static int kaweth_probe(
116 struct usb_interface *intf,
117 const struct usb_device_id *id /* from id_table */
118 );
119static void kaweth_disconnect(struct usb_interface *intf);
120static int kaweth_internal_control_msg(struct usb_device *usb_dev,
121 unsigned int pipe,
122 struct usb_ctrlrequest *cmd, void *data,
123 int len, int timeout);
124static int kaweth_suspend(struct usb_interface *intf, pm_message_t message);
125static int kaweth_resume(struct usb_interface *intf);
126
127/****************************************************************
128 * usb_device_id
129 ****************************************************************/
130static struct usb_device_id usb_klsi_table[] = {
131 { USB_DEVICE(0x03e8, 0x0008) }, /* AOX Endpoints USB Ethernet */
132 { USB_DEVICE(0x04bb, 0x0901) }, /* I-O DATA USB-ET/T */
133 { USB_DEVICE(0x0506, 0x03e8) }, /* 3Com 3C19250 */
134 { USB_DEVICE(0x0506, 0x11f8) }, /* 3Com 3C460 */
135 { USB_DEVICE(0x0557, 0x2002) }, /* ATEN USB Ethernet */
136 { USB_DEVICE(0x0557, 0x4000) }, /* D-Link DSB-650C */
137 { USB_DEVICE(0x0565, 0x0002) }, /* Peracom Enet */
138 { USB_DEVICE(0x0565, 0x0003) }, /* Optus@Home UEP1045A */
139 { USB_DEVICE(0x0565, 0x0005) }, /* Peracom Enet2 */
140 { USB_DEVICE(0x05e9, 0x0008) }, /* KLSI KL5KUSB101B */
141 { USB_DEVICE(0x05e9, 0x0009) }, /* KLSI KL5KUSB101B (Board change) */
142 { USB_DEVICE(0x066b, 0x2202) }, /* Linksys USB10T */
143 { USB_DEVICE(0x06e1, 0x0008) }, /* ADS USB-10BT */
144 { USB_DEVICE(0x06e1, 0x0009) }, /* ADS USB-10BT */
145 { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
146 { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
147 { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
148 { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
149 { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
150 { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
151 { USB_DEVICE(0x085a, 0x0009) }, /* PortGear Ethernet Adapter */
152 { USB_DEVICE(0x087d, 0x5704) }, /* Jaton USB Ethernet Device Adapter */
153 { USB_DEVICE(0x0951, 0x0008) }, /* Kingston Technology USB Ethernet Adapter */
154 { USB_DEVICE(0x095a, 0x3003) }, /* Portsmith Express Ethernet Adapter */
155 { USB_DEVICE(0x10bd, 0x1427) }, /* ASANTE USB To Ethernet Adapter */
156 { USB_DEVICE(0x1342, 0x0204) }, /* Mobility USB-Ethernet Adapter */
157 { USB_DEVICE(0x13d2, 0x0400) }, /* Shark Pocket Adapter */
158 { USB_DEVICE(0x1485, 0x0001) }, /* Silicom U2E */
159 { USB_DEVICE(0x1485, 0x0002) }, /* Psion Dacom Gold Port Ethernet */
160 { USB_DEVICE(0x1645, 0x0005) }, /* Entrega E45 */
161 { USB_DEVICE(0x1645, 0x0008) }, /* Entrega USB Ethernet Adapter */
162 { USB_DEVICE(0x1645, 0x8005) }, /* PortGear Ethernet Adapter */
163 { USB_DEVICE(0x1668, 0x0323) }, /* Actiontec USB Ethernet */
164 { USB_DEVICE(0x2001, 0x4000) }, /* D-link DSB-650C */
165 {} /* Null terminator */
166};
167
168MODULE_DEVICE_TABLE (usb, usb_klsi_table);
169
170/****************************************************************
171 * kaweth_driver
172 ****************************************************************/
173static struct usb_driver kaweth_driver = {
174 .name = driver_name,
175 .probe = kaweth_probe,
176 .disconnect = kaweth_disconnect,
177 .suspend = kaweth_suspend,
178 .resume = kaweth_resume,
179 .id_table = usb_klsi_table,
180 .supports_autosuspend = 1,
181};
182
183typedef __u8 eth_addr_t[6];
184
185/****************************************************************
186 * usb_eth_dev
187 ****************************************************************/
188struct usb_eth_dev {
189 char *name;
190 __u16 vendor;
191 __u16 device;
192 void *pdata;
193};
194
195/****************************************************************
196 * kaweth_ethernet_configuration
197 * Refer Table 8
198 ****************************************************************/
199struct kaweth_ethernet_configuration
200{
201 __u8 size;
202 __u8 reserved1;
203 __u8 reserved2;
204 eth_addr_t hw_addr;
205 __u32 statistics_mask;
206 __le16 segment_size;
207 __u16 max_multicast_filters;
208 __u8 reserved3;
209} __attribute__ ((packed));
210
211/****************************************************************
212 * kaweth_device
213 ****************************************************************/
214struct kaweth_device
215{
216 spinlock_t device_lock;
217
218 __u32 status;
219 int end;
220 int suspend_lowmem_rx;
221 int suspend_lowmem_ctrl;
222 int linkstate;
223 int opened;
224 struct delayed_work lowmem_work;
225
226 struct usb_device *dev;
227 struct usb_interface *intf;
228 struct net_device *net;
229 wait_queue_head_t term_wait;
230
231 struct urb *rx_urb;
232 struct urb *tx_urb;
233 struct urb *irq_urb;
234
235 dma_addr_t intbufferhandle;
236 __u8 *intbuffer;
237 dma_addr_t rxbufferhandle;
238 __u8 *rx_buf;
239
240
241 struct sk_buff *tx_skb;
242
243 __u8 *firmware_buf;
244 __u8 scratch[KAWETH_SCRATCH_SIZE];
245 __u16 packet_filter_bitmap;
246
247 struct kaweth_ethernet_configuration configuration;
248
249 struct net_device_stats stats;
250};
251
252
253/****************************************************************
254 * kaweth_control
255 ****************************************************************/
256static int kaweth_control(struct kaweth_device *kaweth,
257 unsigned int pipe,
258 __u8 request,
259 __u8 requesttype,
260 __u16 value,
261 __u16 index,
262 void *data,
263 __u16 size,
264 int timeout)
265{
266 struct usb_ctrlrequest *dr;
267
268 dbg("kaweth_control()");
269
270 if(in_interrupt()) {
271 dbg("in_interrupt()");
272 return -EBUSY;
273 }
274
275 dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
276
277 if (!dr) {
278 dbg("kmalloc() failed");
279 return -ENOMEM;
280 }
281
282 dr->bRequestType= requesttype;
283 dr->bRequest = request;
284 dr->wValue = cpu_to_le16p(&value);
285 dr->wIndex = cpu_to_le16p(&index);
286 dr->wLength = cpu_to_le16p(&size);
287
288 return kaweth_internal_control_msg(kaweth->dev,
289 pipe,
290 dr,
291 data,
292 size,
293 timeout);
294}
295
296/****************************************************************
297 * kaweth_read_configuration
298 ****************************************************************/
299static int kaweth_read_configuration(struct kaweth_device *kaweth)
300{
301 int retval;
302
303 dbg("Reading kaweth configuration");
304
305 retval = kaweth_control(kaweth,
306 usb_rcvctrlpipe(kaweth->dev, 0),
307 KAWETH_COMMAND_GET_ETHERNET_DESC,
308 USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
309 0,
310 0,
311 (void *)&kaweth->configuration,
312 sizeof(kaweth->configuration),
313 KAWETH_CONTROL_TIMEOUT);
314
315 return retval;
316}
317
318/****************************************************************
319 * kaweth_set_urb_size
320 ****************************************************************/
321static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
322{
323 int retval;
324
325 dbg("Setting URB size to %d", (unsigned)urb_size);
326
327 retval = kaweth_control(kaweth,
328 usb_sndctrlpipe(kaweth->dev, 0),
329 KAWETH_COMMAND_SET_URB_SIZE,
330 USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
331 urb_size,
332 0,
333 (void *)&kaweth->scratch,
334 0,
335 KAWETH_CONTROL_TIMEOUT);
336
337 return retval;
338}
339
340/****************************************************************
341 * kaweth_set_sofs_wait
342 ****************************************************************/
343static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
344{
345 int retval;
346
347 dbg("Set SOFS wait to %d", (unsigned)sofs_wait);
348
349 retval = kaweth_control(kaweth,
350 usb_sndctrlpipe(kaweth->dev, 0),
351 KAWETH_COMMAND_SET_SOFS_WAIT,
352 USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
353 sofs_wait,
354 0,
355 (void *)&kaweth->scratch,
356 0,
357 KAWETH_CONTROL_TIMEOUT);
358
359 return retval;
360}
361
362/****************************************************************
363 * kaweth_set_receive_filter
364 ****************************************************************/
365static int kaweth_set_receive_filter(struct kaweth_device *kaweth,
366 __u16 receive_filter)
367{
368 int retval;
369
370 dbg("Set receive filter to %d", (unsigned)receive_filter);
371
372 retval = kaweth_control(kaweth,
373 usb_sndctrlpipe(kaweth->dev, 0),
374 KAWETH_COMMAND_SET_PACKET_FILTER,
375 USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
376 receive_filter,
377 0,
378 (void *)&kaweth->scratch,
379 0,
380 KAWETH_CONTROL_TIMEOUT);
381
382 return retval;
383}
384
385/****************************************************************
386 * kaweth_download_firmware
387 ****************************************************************/
388static int kaweth_download_firmware(struct kaweth_device *kaweth,
389 __u8 *data,
390 __u16 data_len,
391 __u8 interrupt,
392 __u8 type)
393{
394 if(data_len > KAWETH_FIRMWARE_BUF_SIZE) {
395 err("Firmware too big: %d", data_len);
396 return -ENOSPC;
397 }
398
399 memcpy(kaweth->firmware_buf, data, data_len);
400
401 kaweth->firmware_buf[2] = (data_len & 0xFF) - 7;
402 kaweth->firmware_buf[3] = data_len >> 8;
403 kaweth->firmware_buf[4] = type;
404 kaweth->firmware_buf[5] = interrupt;
405
406 dbg("High: %i, Low:%i", kaweth->firmware_buf[3],
407 kaweth->firmware_buf[2]);
408
409 dbg("Downloading firmware at %p to kaweth device at %p",
410 data,
411 kaweth);
412 dbg("Firmware length: %d", data_len);
413
414 return kaweth_control(kaweth,
415 usb_sndctrlpipe(kaweth->dev, 0),
416 KAWETH_COMMAND_SCAN,
417 USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
418 0,
419 0,
420 (void *)kaweth->firmware_buf,
421 data_len,
422 KAWETH_CONTROL_TIMEOUT);
423}
424
425/****************************************************************
426 * kaweth_trigger_firmware
427 ****************************************************************/
428static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
429 __u8 interrupt)
430{
431 kaweth->firmware_buf[0] = 0xB6;
432 kaweth->firmware_buf[1] = 0xC3;
433 kaweth->firmware_buf[2] = 0x01;
434 kaweth->firmware_buf[3] = 0x00;
435 kaweth->firmware_buf[4] = 0x06;
436 kaweth->firmware_buf[5] = interrupt;
437 kaweth->firmware_buf[6] = 0x00;
438 kaweth->firmware_buf[7] = 0x00;
439
440 dbg("Triggering firmware");
441
442 return kaweth_control(kaweth,
443 usb_sndctrlpipe(kaweth->dev, 0),
444 KAWETH_COMMAND_SCAN,
445 USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
446 0,
447 0,
448 (void *)kaweth->firmware_buf,
449 8,
450 KAWETH_CONTROL_TIMEOUT);
451}
452
453/****************************************************************
454 * kaweth_reset
455 ****************************************************************/
456static int kaweth_reset(struct kaweth_device *kaweth)
457{
458 int result;
459
460 dbg("kaweth_reset(%p)", kaweth);
461 result = kaweth_control(kaweth,
462 usb_sndctrlpipe(kaweth->dev, 0),
463 USB_REQ_SET_CONFIGURATION,
464 0,
465 kaweth->dev->config[0].desc.bConfigurationValue,
466 0,
467 NULL,
468 0,
469 KAWETH_CONTROL_TIMEOUT);
470
471 mdelay(10);
472
473 dbg("kaweth_reset() returns %d.",result);
474
475 return result;
476}
477
478static void kaweth_usb_receive(struct urb *);
479static int kaweth_resubmit_rx_urb(struct kaweth_device *, gfp_t);
480
481/****************************************************************
482 int_callback
483*****************************************************************/
484
485static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, gfp_t mf)
486{
487 int status;
488
489 status = usb_submit_urb (kaweth->irq_urb, mf);
490 if (unlikely(status == -ENOMEM)) {
491 kaweth->suspend_lowmem_ctrl = 1;
492 schedule_delayed_work(&kaweth->lowmem_work, HZ/4);
493 } else {
494 kaweth->suspend_lowmem_ctrl = 0;
495 }
496
497 if (status)
498 err ("can't resubmit intr, %s-%s, status %d",
499 kaweth->dev->bus->bus_name,
500 kaweth->dev->devpath, status);
501}
502
503static void int_callback(struct urb *u)
504{
505 struct kaweth_device *kaweth = u->context;
506 int act_state;
507
508 switch (u->status) {
509 case 0: /* success */
510 break;
511 case -ECONNRESET: /* unlink */
512 case -ENOENT:
513 case -ESHUTDOWN:
514 return;
515 /* -EPIPE: should clear the halt */
516 default: /* error */
517 goto resubmit;
518 }
519
520 /* we check the link state to report changes */
521 if (kaweth->linkstate != (act_state = ( kaweth->intbuffer[STATE_OFFSET] | STATE_MASK) >> STATE_SHIFT)) {
522 if (act_state)
523 netif_carrier_on(kaweth->net);
524 else
525 netif_carrier_off(kaweth->net);
526
527 kaweth->linkstate = act_state;
528 }
529resubmit:
530 kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC);
531}
532
533static void kaweth_resubmit_tl(struct work_struct *work)
534{
535 struct kaweth_device *kaweth =
536 container_of(work, struct kaweth_device, lowmem_work.work);
537
538 if (IS_BLOCKED(kaweth->status))
539 return;
540
541 if (kaweth->suspend_lowmem_rx)
542 kaweth_resubmit_rx_urb(kaweth, GFP_NOIO);
543
544 if (kaweth->suspend_lowmem_ctrl)
545 kaweth_resubmit_int_urb(kaweth, GFP_NOIO);
546}
547
548
549/****************************************************************
550 * kaweth_resubmit_rx_urb
551 ****************************************************************/
552static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth,
553 gfp_t mem_flags)
554{
555 int result;
556
557 usb_fill_bulk_urb(kaweth->rx_urb,
558 kaweth->dev,
559 usb_rcvbulkpipe(kaweth->dev, 1),
560 kaweth->rx_buf,
561 KAWETH_BUF_SIZE,
562 kaweth_usb_receive,
563 kaweth);
564 kaweth->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
565 kaweth->rx_urb->transfer_dma = kaweth->rxbufferhandle;
566
567 if((result = usb_submit_urb(kaweth->rx_urb, mem_flags))) {
568 if (result == -ENOMEM) {
569 kaweth->suspend_lowmem_rx = 1;
570 schedule_delayed_work(&kaweth->lowmem_work, HZ/4);
571 }
572 err("resubmitting rx_urb %d failed", result);
573 } else {
574 kaweth->suspend_lowmem_rx = 0;
575 }
576
577 return result;
578}
579
580static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth);
581
582/****************************************************************
583 * kaweth_usb_receive
584 ****************************************************************/
585static void kaweth_usb_receive(struct urb *urb)
586{
587 struct kaweth_device *kaweth = urb->context;
588 struct net_device *net = kaweth->net;
589
590 int count = urb->actual_length;
591 int count2 = urb->transfer_buffer_length;
592
593 __u16 pkt_len = le16_to_cpup((__le16 *)kaweth->rx_buf);
594
595 struct sk_buff *skb;
596
597 if(unlikely(urb->status == -ECONNRESET || urb->status == -ESHUTDOWN))
598 /* we are killed - set a flag and wake the disconnect handler */
599 {
600 kaweth->end = 1;
601 wake_up(&kaweth->term_wait);
602 return;
603 }
604
605 spin_lock(&kaweth->device_lock);
606 if (IS_BLOCKED(kaweth->status)) {
607 spin_unlock(&kaweth->device_lock);
608 return;
609 }
610 spin_unlock(&kaweth->device_lock);
611
612 if(urb->status && urb->status != -EREMOTEIO && count != 1) {
613 err("%s RX status: %d count: %d packet_len: %d",
614 net->name,
615 urb->status,
616 count,
617 (int)pkt_len);
618 kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
619 return;
620 }
621
622 if(kaweth->net && (count > 2)) {
623 if(pkt_len > (count - 2)) {
624 err("Packet length too long for USB frame (pkt_len: %x, count: %x)",pkt_len, count);
625 err("Packet len & 2047: %x", pkt_len & 2047);
626 err("Count 2: %x", count2);
627 kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
628 return;
629 }
630
631 if(!(skb = dev_alloc_skb(pkt_len+2))) {
632 kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
633 return;
634 }
635
636 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
637
638 eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0);
639
640 skb_put(skb, pkt_len);
641
642 skb->protocol = eth_type_trans(skb, net);
643
644 netif_rx(skb);
645
646 kaweth->stats.rx_packets++;
647 kaweth->stats.rx_bytes += pkt_len;
648 }
649
650 kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
651}
652
653/****************************************************************
654 * kaweth_open
655 ****************************************************************/
656static int kaweth_open(struct net_device *net)
657{
658 struct kaweth_device *kaweth = netdev_priv(net);
659 int res;
660
661 dbg("Opening network device.");
662
663 res = usb_autopm_get_interface(kaweth->intf);
664 if (res) {
665 err("Interface cannot be resumed.");
666 return -EIO;
667 }
668 res = kaweth_resubmit_rx_urb(kaweth, GFP_KERNEL);
669 if (res)
670 goto err_out;
671
672 usb_fill_int_urb(
673 kaweth->irq_urb,
674 kaweth->dev,
675 usb_rcvintpipe(kaweth->dev, 3),
676 kaweth->intbuffer,
677 INTBUFFERSIZE,
678 int_callback,
679 kaweth,
680 250); /* overriding the descriptor */
681 kaweth->irq_urb->transfer_dma = kaweth->intbufferhandle;
682 kaweth->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
683
684 res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL);
685 if (res) {
686 usb_kill_urb(kaweth->rx_urb);
687 goto err_out;
688 }
689 kaweth->opened = 1;
690
691 netif_start_queue(net);
692
693 kaweth_async_set_rx_mode(kaweth);
694 return 0;
695
696err_out:
697 usb_autopm_enable(kaweth->intf);
698 return -EIO;
699}
700
701/****************************************************************
702 * kaweth_kill_urbs
703 ****************************************************************/
704static void kaweth_kill_urbs(struct kaweth_device *kaweth)
705{
706 usb_kill_urb(kaweth->irq_urb);
707 usb_kill_urb(kaweth->rx_urb);
708 usb_kill_urb(kaweth->tx_urb);
709
710 flush_scheduled_work();
711
712 /* a scheduled work may have resubmitted,
713 we hit them again */
714 usb_kill_urb(kaweth->irq_urb);
715 usb_kill_urb(kaweth->rx_urb);
716}
717
718/****************************************************************
719 * kaweth_close
720 ****************************************************************/
721static int kaweth_close(struct net_device *net)
722{
723 struct kaweth_device *kaweth = netdev_priv(net);
724
725 netif_stop_queue(net);
726 kaweth->opened = 0;
727
728 kaweth->status |= KAWETH_STATUS_CLOSING;
729
730 kaweth_kill_urbs(kaweth);
731
732 kaweth->status &= ~KAWETH_STATUS_CLOSING;
733
734 usb_autopm_enable(kaweth->intf);
735
736 return 0;
737}
738
739static void kaweth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
740{
741 struct kaweth_device *kaweth = netdev_priv(dev);
742
743 strlcpy(info->driver, driver_name, sizeof(info->driver));
744 usb_make_path(kaweth->dev, info->bus_info, sizeof (info->bus_info));
745}
746
747static u32 kaweth_get_link(struct net_device *dev)
748{
749 struct kaweth_device *kaweth = netdev_priv(dev);
750
751 return kaweth->linkstate;
752}
753
754static struct ethtool_ops ops = {
755 .get_drvinfo = kaweth_get_drvinfo,
756 .get_link = kaweth_get_link
757};
758
759/****************************************************************
760 * kaweth_usb_transmit_complete
761 ****************************************************************/
762static void kaweth_usb_transmit_complete(struct urb *urb)
763{
764 struct kaweth_device *kaweth = urb->context;
765 struct sk_buff *skb = kaweth->tx_skb;
766
767 if (unlikely(urb->status != 0))
768 if (urb->status != -ENOENT)
769 dbg("%s: TX status %d.", kaweth->net->name, urb->status);
770
771 netif_wake_queue(kaweth->net);
772 dev_kfree_skb_irq(skb);
773}
774
775/****************************************************************
776 * kaweth_start_xmit
777 ****************************************************************/
778static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net)
779{
780 struct kaweth_device *kaweth = netdev_priv(net);
781 __le16 *private_header;
782
783 int res;
784
785 spin_lock(&kaweth->device_lock);
786
787 kaweth_async_set_rx_mode(kaweth);
788 netif_stop_queue(net);
789 if (IS_BLOCKED(kaweth->status)) {
790 goto skip;
791 }
792
793 /* We now decide whether we can put our special header into the sk_buff */
794 if (skb_cloned(skb) || skb_headroom(skb) < 2) {
795 /* no such luck - we make our own */
796 struct sk_buff *copied_skb;
797 copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
798 dev_kfree_skb_irq(skb);
799 skb = copied_skb;
800 if (!copied_skb) {
801 kaweth->stats.tx_errors++;
802 netif_start_queue(net);
803 spin_unlock(&kaweth->device_lock);
804 return 0;
805 }
806 }
807
808 private_header = (__le16 *)__skb_push(skb, 2);
809 *private_header = cpu_to_le16(skb->len-2);
810 kaweth->tx_skb = skb;
811
812 usb_fill_bulk_urb(kaweth->tx_urb,
813 kaweth->dev,
814 usb_sndbulkpipe(kaweth->dev, 2),
815 private_header,
816 skb->len,
817 kaweth_usb_transmit_complete,
818 kaweth);
819 kaweth->end = 0;
820
821 if((res = usb_submit_urb(kaweth->tx_urb, GFP_ATOMIC)))
822 {
823 warn("kaweth failed tx_urb %d", res);
824skip:
825 kaweth->stats.tx_errors++;
826
827 netif_start_queue(net);
828 dev_kfree_skb_irq(skb);
829 }
830 else
831 {
832 kaweth->stats.tx_packets++;
833 kaweth->stats.tx_bytes += skb->len;
834 net->trans_start = jiffies;
835 }
836
837 spin_unlock(&kaweth->device_lock);
838
839 return 0;
840}
841
842/****************************************************************
843 * kaweth_set_rx_mode
844 ****************************************************************/
845static void kaweth_set_rx_mode(struct net_device *net)
846{
847 struct kaweth_device *kaweth = netdev_priv(net);
848
849 __u16 packet_filter_bitmap = KAWETH_PACKET_FILTER_DIRECTED |
850 KAWETH_PACKET_FILTER_BROADCAST |
851 KAWETH_PACKET_FILTER_MULTICAST;
852
853 dbg("Setting Rx mode to %d", packet_filter_bitmap);
854
855 netif_stop_queue(net);
856
857 if (net->flags & IFF_PROMISC) {
858 packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS;
859 }
860 else if ((net->mc_count) || (net->flags & IFF_ALLMULTI)) {
861 packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST;
862 }
863
864 kaweth->packet_filter_bitmap = packet_filter_bitmap;
865 netif_wake_queue(net);
866}
867
868/****************************************************************
869 * kaweth_async_set_rx_mode
870 ****************************************************************/
871static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
872{
873 __u16 packet_filter_bitmap = kaweth->packet_filter_bitmap;
874 kaweth->packet_filter_bitmap = 0;
875 if (packet_filter_bitmap == 0)
876 return;
877
878 {
879 int result;
880 result = kaweth_control(kaweth,
881 usb_sndctrlpipe(kaweth->dev, 0),
882 KAWETH_COMMAND_SET_PACKET_FILTER,
883 USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
884 packet_filter_bitmap,
885 0,
886 (void *)&kaweth->scratch,
887 0,
888 KAWETH_CONTROL_TIMEOUT);
889
890 if(result < 0) {
891 err("Failed to set Rx mode: %d", result);
892 }
893 else {
894 dbg("Set Rx mode to %d", packet_filter_bitmap);
895 }
896 }
897}
898
899/****************************************************************
900 * kaweth_netdev_stats
901 ****************************************************************/
902static struct net_device_stats *kaweth_netdev_stats(struct net_device *dev)
903{
904 struct kaweth_device *kaweth = netdev_priv(dev);
905 return &kaweth->stats;
906}
907
908/****************************************************************
909 * kaweth_tx_timeout
910 ****************************************************************/
911static void kaweth_tx_timeout(struct net_device *net)
912{
913 struct kaweth_device *kaweth = netdev_priv(net);
914
915 warn("%s: Tx timed out. Resetting.", net->name);
916 kaweth->stats.tx_errors++;
917 net->trans_start = jiffies;
918
919 usb_unlink_urb(kaweth->tx_urb);
920}
921
922/****************************************************************
923 * kaweth_suspend
924 ****************************************************************/
925static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
926{
927 struct kaweth_device *kaweth = usb_get_intfdata(intf);
928 unsigned long flags;
929
930 dbg("Suspending device");
931 spin_lock_irqsave(&kaweth->device_lock, flags);
932 kaweth->status |= KAWETH_STATUS_SUSPENDING;
933 spin_unlock_irqrestore(&kaweth->device_lock, flags);
934
935 kaweth_kill_urbs(kaweth);
936 return 0;
937}
938
939/****************************************************************
940 * kaweth_resume
941 ****************************************************************/
942static int kaweth_resume(struct usb_interface *intf)
943{
944 struct kaweth_device *kaweth = usb_get_intfdata(intf);
945 unsigned long flags;
946
947 dbg("Resuming device");
948 spin_lock_irqsave(&kaweth->device_lock, flags);
949 kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
950 spin_unlock_irqrestore(&kaweth->device_lock, flags);
951
952 if (!kaweth->opened)
953 return 0;
954 kaweth_resubmit_rx_urb(kaweth, GFP_NOIO);
955 kaweth_resubmit_int_urb(kaweth, GFP_NOIO);
956
957 return 0;
958}
959
960/****************************************************************
961 * kaweth_probe
962 ****************************************************************/
963static int kaweth_probe(
964 struct usb_interface *intf,
965 const struct usb_device_id *id /* from id_table */
966 )
967{
968 struct usb_device *dev = interface_to_usbdev(intf);
969 struct kaweth_device *kaweth;
970 struct net_device *netdev;
971 const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
972 int result = 0;
973
974 dbg("Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x",
975 dev->devnum,
976 le16_to_cpu(dev->descriptor.idVendor),
977 le16_to_cpu(dev->descriptor.idProduct),
978 le16_to_cpu(dev->descriptor.bcdDevice));
979
980 dbg("Device at %p", dev);
981
982 dbg("Descriptor length: %x type: %x",
983 (int)dev->descriptor.bLength,
984 (int)dev->descriptor.bDescriptorType);
985
986 netdev = alloc_etherdev(sizeof(*kaweth));
987 if (!netdev)
988 return -ENOMEM;
989
990 kaweth = netdev_priv(netdev);
991 kaweth->dev = dev;
992 kaweth->net = netdev;
993
994 spin_lock_init(&kaweth->device_lock);
995 init_waitqueue_head(&kaweth->term_wait);
996
997 dbg("Resetting.");
998
999 kaweth_reset(kaweth);
1000
1001 /*
1002 * If high byte of bcdDevice is nonzero, firmware is already
1003 * downloaded. Don't try to do it again, or we'll hang the device.
1004 */
1005
1006 if (le16_to_cpu(dev->descriptor.bcdDevice) >> 8) {
1007 info("Firmware present in device.");
1008 } else {
1009 /* Download the firmware */
1010 info("Downloading firmware...");
1011 kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
1012 if ((result = kaweth_download_firmware(kaweth,
1013 kaweth_new_code,
1014 len_kaweth_new_code,
1015 100,
1016 2)) < 0) {
1017 err("Error downloading firmware (%d)", result);
1018 goto err_fw;
1019 }
1020
1021 if ((result = kaweth_download_firmware(kaweth,
1022 kaweth_new_code_fix,
1023 len_kaweth_new_code_fix,
1024 100,
1025 3)) < 0) {
1026 err("Error downloading firmware fix (%d)", result);
1027 goto err_fw;
1028 }
1029
1030 if ((result = kaweth_download_firmware(kaweth,
1031 kaweth_trigger_code,
1032 len_kaweth_trigger_code,
1033 126,
1034 2)) < 0) {
1035 err("Error downloading trigger code (%d)", result);
1036 goto err_fw;
1037
1038 }
1039
1040 if ((result = kaweth_download_firmware(kaweth,
1041 kaweth_trigger_code_fix,
1042 len_kaweth_trigger_code_fix,
1043 126,
1044 3)) < 0) {
1045 err("Error downloading trigger code fix (%d)", result);
1046 goto err_fw;
1047 }
1048
1049
1050 if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) {
1051 err("Error triggering firmware (%d)", result);
1052 goto err_fw;
1053 }
1054
1055 /* Device will now disappear for a moment... */
1056 info("Firmware loaded. I'll be back...");
1057err_fw:
1058 free_page((unsigned long)kaweth->firmware_buf);
1059 free_netdev(netdev);
1060 return -EIO;
1061 }
1062
1063 result = kaweth_read_configuration(kaweth);
1064
1065 if(result < 0) {
1066 err("Error reading configuration (%d), no net device created", result);
1067 goto err_free_netdev;
1068 }
1069
1070 info("Statistics collection: %x", kaweth->configuration.statistics_mask);
1071 info("Multicast filter limit: %x", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
1072 info("MTU: %d", le16_to_cpu(kaweth->configuration.segment_size));
1073 info("Read MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
1074 (int)kaweth->configuration.hw_addr[0],
1075 (int)kaweth->configuration.hw_addr[1],
1076 (int)kaweth->configuration.hw_addr[2],
1077 (int)kaweth->configuration.hw_addr[3],
1078 (int)kaweth->configuration.hw_addr[4],
1079 (int)kaweth->configuration.hw_addr[5]);
1080
1081 if(!memcmp(&kaweth->configuration.hw_addr,
1082 &bcast_addr,
1083 sizeof(bcast_addr))) {
1084 err("Firmware not functioning properly, no net device created");
1085 goto err_free_netdev;
1086 }
1087
1088 if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) {
1089 dbg("Error setting URB size");
1090 goto err_free_netdev;
1091 }
1092
1093 if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) {
1094 err("Error setting SOFS wait");
1095 goto err_free_netdev;
1096 }
1097
1098 result = kaweth_set_receive_filter(kaweth,
1099 KAWETH_PACKET_FILTER_DIRECTED |
1100 KAWETH_PACKET_FILTER_BROADCAST |
1101 KAWETH_PACKET_FILTER_MULTICAST);
1102
1103 if(result < 0) {
1104 err("Error setting receive filter");
1105 goto err_free_netdev;
1106 }
1107
1108 dbg("Initializing net device.");
1109
1110 kaweth->intf = intf;
1111
1112 kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
1113 if (!kaweth->tx_urb)
1114 goto err_free_netdev;
1115 kaweth->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
1116 if (!kaweth->rx_urb)
1117 goto err_only_tx;
1118 kaweth->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
1119 if (!kaweth->irq_urb)
1120 goto err_tx_and_rx;
1121
1122 kaweth->intbuffer = usb_buffer_alloc( kaweth->dev,
1123 INTBUFFERSIZE,
1124 GFP_KERNEL,
1125 &kaweth->intbufferhandle);
1126 if (!kaweth->intbuffer)
1127 goto err_tx_and_rx_and_irq;
1128 kaweth->rx_buf = usb_buffer_alloc( kaweth->dev,
1129 KAWETH_BUF_SIZE,
1130 GFP_KERNEL,
1131 &kaweth->rxbufferhandle);
1132 if (!kaweth->rx_buf)
1133 goto err_all_but_rxbuf;
1134
1135 memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr));
1136 memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr,
1137 sizeof(kaweth->configuration.hw_addr));
1138
1139 netdev->open = kaweth_open;
1140 netdev->stop = kaweth_close;
1141
1142 netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
1143 netdev->tx_timeout = kaweth_tx_timeout;
1144
1145 netdev->hard_start_xmit = kaweth_start_xmit;
1146 netdev->set_multicast_list = kaweth_set_rx_mode;
1147 netdev->get_stats = kaweth_netdev_stats;
1148 netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size);
1149 SET_ETHTOOL_OPS(netdev, &ops);
1150
1151 /* kaweth is zeroed as part of alloc_netdev */
1152
1153 INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
1154
1155 SET_MODULE_OWNER(netdev);
1156
1157 usb_set_intfdata(intf, kaweth);
1158
1159#if 0
1160// dma_supported() is deeply broken on almost all architectures
1161 if (dma_supported (&intf->dev, 0xffffffffffffffffULL))
1162 kaweth->net->features |= NETIF_F_HIGHDMA;
1163#endif
1164
1165 SET_NETDEV_DEV(netdev, &intf->dev);
1166 if (register_netdev(netdev) != 0) {
1167 err("Error registering netdev.");
1168 goto err_intfdata;
1169 }
1170
1171 info("kaweth interface created at %s", kaweth->net->name);
1172
1173 dbg("Kaweth probe returning.");
1174
1175 return 0;
1176
1177err_intfdata:
1178 usb_set_intfdata(intf, NULL);
1179 usb_buffer_free(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
1180err_all_but_rxbuf:
1181 usb_buffer_free(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
1182err_tx_and_rx_and_irq:
1183 usb_free_urb(kaweth->irq_urb);
1184err_tx_and_rx:
1185 usb_free_urb(kaweth->rx_urb);
1186err_only_tx:
1187 usb_free_urb(kaweth->tx_urb);
1188err_free_netdev:
1189 free_netdev(netdev);
1190
1191 return -EIO;
1192}
1193
1194/****************************************************************
1195 * kaweth_disconnect
1196 ****************************************************************/
1197static void kaweth_disconnect(struct usb_interface *intf)
1198{
1199 struct kaweth_device *kaweth = usb_get_intfdata(intf);
1200 struct net_device *netdev;
1201
1202 info("Unregistering");
1203
1204 usb_set_intfdata(intf, NULL);
1205 if (!kaweth) {
1206 warn("unregistering non-existant device");
1207 return;
1208 }
1209 netdev = kaweth->net;
1210
1211 dbg("Unregistering net device");
1212 unregister_netdev(netdev);
1213
1214 usb_free_urb(kaweth->rx_urb);
1215 usb_free_urb(kaweth->tx_urb);
1216 usb_free_urb(kaweth->irq_urb);
1217
1218 usb_buffer_free(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
1219 usb_buffer_free(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
1220
1221 free_netdev(netdev);
1222}
1223
1224
1225// FIXME this completion stuff is a modified clone of
1226// an OLD version of some stuff in usb.c ...
1227struct usb_api_data {
1228 wait_queue_head_t wqh;
1229 int done;
1230};
1231
1232/*-------------------------------------------------------------------*
1233 * completion handler for compatibility wrappers (sync control/bulk) *
1234 *-------------------------------------------------------------------*/
1235static void usb_api_blocking_completion(struct urb *urb)
1236{
1237 struct usb_api_data *awd = (struct usb_api_data *)urb->context;
1238
1239 awd->done=1;
1240 wake_up(&awd->wqh);
1241}
1242
1243/*-------------------------------------------------------------------*
1244 * COMPATIBILITY STUFF *
1245 *-------------------------------------------------------------------*/
1246
1247// Starts urb and waits for completion or timeout
1248static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
1249{
1250 struct usb_api_data awd;
1251 int status;
1252
1253 init_waitqueue_head(&awd.wqh);
1254 awd.done = 0;
1255
1256 urb->context = &awd;
1257 status = usb_submit_urb(urb, GFP_NOIO);
1258 if (status) {
1259 // something went wrong
1260 usb_free_urb(urb);
1261 return status;
1262 }
1263
1264 if (!wait_event_timeout(awd.wqh, awd.done, timeout)) {
1265 // timeout
1266 warn("usb_control/bulk_msg: timeout");
1267 usb_kill_urb(urb); // remove urb safely
1268 status = -ETIMEDOUT;
1269 }
1270 else {
1271 status = urb->status;
1272 }
1273
1274 if (actual_length) {
1275 *actual_length = urb->actual_length;
1276 }
1277
1278 usb_free_urb(urb);
1279 return status;
1280}
1281
1282/*-------------------------------------------------------------------*/
1283// returns status (negative) or length (positive)
1284static int kaweth_internal_control_msg(struct usb_device *usb_dev,
1285 unsigned int pipe,
1286 struct usb_ctrlrequest *cmd, void *data,
1287 int len, int timeout)
1288{
1289 struct urb *urb;
1290 int retv;
1291 int length = 0; /* shut up GCC */
1292
1293 urb = usb_alloc_urb(0, GFP_NOIO);
1294 if (!urb)
1295 return -ENOMEM;
1296
1297 usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char*)cmd, data,
1298 len, usb_api_blocking_completion, NULL);
1299
1300 retv = usb_start_wait_urb(urb, timeout, &length);
1301 if (retv < 0) {
1302 return retv;
1303 }
1304 else {
1305 return length;
1306 }
1307}
1308
1309
1310/****************************************************************
1311 * kaweth_init
1312 ****************************************************************/
1313static int __init kaweth_init(void)
1314{
1315 dbg("Driver loading");
1316 return usb_register(&kaweth_driver);
1317}
1318
1319/****************************************************************
1320 * kaweth_exit
1321 ****************************************************************/
1322static void __exit kaweth_exit(void)
1323{
1324 usb_deregister(&kaweth_driver);
1325}
1326
1327module_init(kaweth_init);
1328module_exit(kaweth_exit);
1329
1330
1331
1332
1333
1334
1335
1336
1337
diff --git a/drivers/net/usb/kawethfw.h b/drivers/net/usb/kawethfw.h
new file mode 100644
index 000000000000..cf85fcb0d1a6
--- /dev/null
+++ b/drivers/net/usb/kawethfw.h
@@ -0,0 +1,557 @@
1/******************************************/
2/* NOTE: B6/C3 is data header signature */
3/* 0xAA/0xBB is data length = total */
4/* bytes - 7, 0xCC is type, 0xDD is */
5/* interrupt to use. */
6/******************************************/
7
8/****************************************************************
9 * kaweth_trigger_code
10 ****************************************************************/
11static __u8 kaweth_trigger_code[] =
12{
13 0xB6, 0xC3, 0xAA, 0xBB, 0xCC, 0xDD,
14 0xc8, 0x07, 0xa0, 0x00, 0xf0, 0x07, 0x5e, 0x00,
15 0x06, 0x00, 0xf0, 0x07, 0x0a, 0x00, 0x08, 0x00,
16 0xf0, 0x09, 0x00, 0x00, 0x02, 0x00, 0xe7, 0x07,
17 0x36, 0x00, 0x00, 0x00, 0xf0, 0x07, 0x00, 0x00,
18 0x04, 0x00, 0xe7, 0x07, 0x50, 0xc3, 0x10, 0xc0,
19 0xf0, 0x09, 0x0e, 0xc0, 0x00, 0x00, 0xe7, 0x87,
20 0x01, 0x00, 0x0e, 0xc0, 0x97, 0xcf, 0xd7, 0x09,
21 0x00, 0xc0, 0x17, 0x02, 0xc8, 0x07, 0xa0, 0x00,
22 0xe7, 0x17, 0x50, 0xc3, 0x10, 0xc0, 0x30, 0xd8,
23 0x04, 0x00, 0x30, 0x5c, 0x08, 0x00, 0x04, 0x00,
24 0xb0, 0xc0, 0x06, 0x00, 0xc8, 0x05, 0xe7, 0x05,
25 0x00, 0xc0, 0xc0, 0xdf, 0x97, 0xcf, 0x49, 0xaf,
26 0xc0, 0x07, 0x00, 0x00, 0x60, 0xaf, 0x4a, 0xaf,
27 0x00, 0x0c, 0x0c, 0x00, 0x40, 0xd2, 0x00, 0x1c,
28 0x0c, 0x00, 0x40, 0xd2, 0x30, 0x00, 0x08, 0x00,
29 0xf0, 0x07, 0x00, 0x00, 0x04, 0x00, 0xf0, 0x07,
30 0x86, 0x00, 0x06, 0x00, 0x67, 0xcf, 0x27, 0x0c,
31 0x02, 0x00, 0x00, 0x00, 0x27, 0x0c, 0x00, 0x00,
32 0x0e, 0xc0, 0x49, 0xaf, 0x64, 0xaf, 0xc0, 0x07,
33 0x00, 0x00, 0x4b, 0xaf, 0x4a, 0xaf, 0x5a, 0xcf,
34 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
35 0x00, 0x00, 0x94, 0x00, 0x05, 0x00,
36 0x00, 0x00
37};
38/****************************************************************
39 * kaweth_trigger_code_fix
40 ****************************************************************/
41static __u8 kaweth_trigger_code_fix[] =
42{
43 0xB6, 0xC3, 0xAA, 0xBB, 0xCC, 0xDD,
44 0x02, 0x00, 0x06, 0x00, 0x18, 0x00, 0x3e, 0x00,
45 0x80, 0x00, 0x98, 0x00, 0xaa, 0x00,
46 0x00, 0x00
47};
48
49/****************************************************************
50 * kaweth_new_code
51 ****************************************************************/
52static __u8 kaweth_new_code[] =
53{
54 0xB6, 0xC3, 0xAA, 0xBB, 0xCC, 0xDD,
55 0x9f, 0xcf, 0xde, 0x06, 0xe7, 0x57, 0x00, 0x00,
56 0xc4, 0x06, 0x97, 0xc1, 0xe7, 0x67, 0xff, 0x1f,
57 0x28, 0xc0, 0xe7, 0x87, 0x00, 0x04, 0x24, 0xc0,
58 0xe7, 0x67, 0xff, 0xf9, 0x22, 0xc0, 0x97, 0xcf,
59 0xd7, 0x09, 0x00, 0xc0, 0xe7, 0x09, 0xa2, 0xc0,
60 0xbe, 0x06, 0x9f, 0xaf, 0x36, 0x00, 0xe7, 0x05,
61 0x00, 0xc0, 0xa7, 0xcf, 0xbc, 0x06, 0x97, 0xcf,
62 0xe7, 0x57, 0x00, 0x00, 0xb8, 0x06, 0xa7, 0xa1,
63 0xb8, 0x06, 0x97, 0xcf, 0xe7, 0x57, 0x00, 0x00,
64 0x14, 0x08, 0x0a, 0xc0, 0xe7, 0x57, 0x00, 0x00,
65 0xa4, 0xc0, 0xa7, 0xc0, 0x7a, 0x06, 0x9f, 0xaf,
66 0x92, 0x07, 0xe7, 0x07, 0x00, 0x00, 0x14, 0x08,
67 0xe7, 0x57, 0xff, 0xff, 0xba, 0x06, 0x9f, 0xa0,
68 0x38, 0x00, 0xe7, 0x59, 0xba, 0x06, 0xbe, 0x06,
69 0x9f, 0xa0, 0x38, 0x00, 0xc8, 0x09, 0xca, 0x06,
70 0x08, 0x62, 0x9f, 0xa1, 0x36, 0x08, 0xc0, 0x09,
71 0x76, 0x06, 0x00, 0x60, 0xa7, 0xc0, 0x7a, 0x06,
72 0x9f, 0xaf, 0xcc, 0x02, 0xe7, 0x57, 0x00, 0x00,
73 0xb8, 0x06, 0xa7, 0xc1, 0x7a, 0x06, 0x9f, 0xaf,
74 0x04, 0x00, 0xe7, 0x57, 0x00, 0x00, 0x8e, 0x06,
75 0x0a, 0xc1, 0xe7, 0x09, 0x20, 0xc0, 0x10, 0x08,
76 0xe7, 0xd0, 0x10, 0x08, 0xe7, 0x67, 0x40, 0x00,
77 0x10, 0x08, 0x9f, 0xaf, 0x92, 0x0c, 0xc0, 0x09,
78 0xd0, 0x06, 0x00, 0x60, 0x05, 0xc4, 0xc0, 0x59,
79 0xbe, 0x06, 0x02, 0xc0, 0x9f, 0xaf, 0xec, 0x00,
80 0x9f, 0xaf, 0x34, 0x02, 0xe7, 0x57, 0x00, 0x00,
81 0xa6, 0x06, 0x9f, 0xa0, 0x7a, 0x02, 0xa7, 0xcf,
82 0x7a, 0x06, 0x48, 0x02, 0xe7, 0x09, 0xbe, 0x06,
83 0xd0, 0x06, 0xc8, 0x37, 0x04, 0x00, 0x9f, 0xaf,
84 0x08, 0x03, 0x97, 0xcf, 0xe7, 0x57, 0x00, 0x00,
85 0xce, 0x06, 0x97, 0xc0, 0xd7, 0x09, 0x00, 0xc0,
86 0xc1, 0xdf, 0xc8, 0x09, 0xc6, 0x06, 0x08, 0x62,
87 0x14, 0xc0, 0x27, 0x04, 0xc6, 0x06, 0x10, 0x94,
88 0xf0, 0x07, 0x10, 0x08, 0x02, 0x00, 0xc1, 0x07,
89 0x01, 0x00, 0x70, 0x00, 0x04, 0x00, 0xf0, 0x07,
90 0x30, 0x01, 0x06, 0x00, 0x50, 0xaf, 0xe7, 0x07,
91 0xff, 0xff, 0xd0, 0x06, 0xe7, 0x07, 0x00, 0x00,
92 0xce, 0x06, 0xe7, 0x05, 0x00, 0xc0, 0x97, 0xcf,
93 0xd7, 0x09, 0x00, 0xc0, 0xc1, 0xdf, 0x48, 0x02,
94 0xd0, 0x09, 0xc6, 0x06, 0x27, 0x02, 0xc6, 0x06,
95 0xe7, 0x05, 0x00, 0xc0, 0x97, 0xcf, 0x48, 0x02,
96 0xc8, 0x37, 0x04, 0x00, 0x00, 0x0c, 0x0c, 0x00,
97 0x00, 0x60, 0x21, 0xc0, 0xc0, 0x37, 0x3e, 0x00,
98 0x23, 0xc9, 0xc0, 0x57, 0xb4, 0x05, 0x1b, 0xc8,
99 0xc0, 0x17, 0x3f, 0x00, 0xc0, 0x67, 0xc0, 0xff,
100 0x30, 0x00, 0x08, 0x00, 0xf0, 0x07, 0x00, 0x00,
101 0x04, 0x00, 0x00, 0x02, 0xc0, 0x17, 0x4c, 0x00,
102 0x30, 0x00, 0x06, 0x00, 0xf0, 0x07, 0xa0, 0x01,
103 0x0a, 0x00, 0x48, 0x02, 0xc1, 0x07, 0x02, 0x00,
104 0xd7, 0x09, 0x00, 0xc0, 0xc1, 0xdf, 0x51, 0xaf,
105 0xe7, 0x05, 0x00, 0xc0, 0x97, 0xcf, 0x9f, 0xaf,
106 0x08, 0x03, 0x9f, 0xaf, 0x7a, 0x02, 0x97, 0xcf,
107 0x9f, 0xaf, 0x7a, 0x02, 0xc9, 0x37, 0x04, 0x00,
108 0xc1, 0xdf, 0xc8, 0x09, 0xa2, 0x06, 0x50, 0x02,
109 0x67, 0x02, 0xa2, 0x06, 0xd1, 0x07, 0x00, 0x00,
110 0x27, 0xd8, 0xaa, 0x06, 0xc0, 0xdf, 0x9f, 0xaf,
111 0xc4, 0x01, 0x97, 0xcf, 0xe7, 0x57, 0x00, 0x00,
112 0xd2, 0x06, 0x97, 0xc1, 0xe7, 0x57, 0x01, 0x00,
113 0xa8, 0x06, 0x97, 0xc0, 0xc8, 0x09, 0xa0, 0x06,
114 0x08, 0x62, 0x97, 0xc0, 0x00, 0x02, 0xc0, 0x17,
115 0x0e, 0x00, 0x27, 0x00, 0x34, 0x01, 0x27, 0x0c,
116 0x0c, 0x00, 0x36, 0x01, 0xe7, 0x07, 0x50, 0xc3,
117 0x12, 0xc0, 0xe7, 0x07, 0xcc, 0x0b, 0x02, 0x00,
118 0xe7, 0x07, 0x01, 0x00, 0xa8, 0x06, 0xe7, 0x07,
119 0x05, 0x00, 0x90, 0xc0, 0x97, 0xcf, 0xc8, 0x09,
120 0xa4, 0x06, 0x08, 0x62, 0x02, 0xc0, 0x10, 0x64,
121 0x07, 0xc1, 0xe7, 0x07, 0x00, 0x00, 0x9e, 0x06,
122 0xe7, 0x07, 0x72, 0x04, 0x24, 0x00, 0x97, 0xcf,
123 0x27, 0x04, 0xa4, 0x06, 0xc8, 0x17, 0x0e, 0x00,
124 0x27, 0x02, 0x9e, 0x06, 0xe7, 0x07, 0x80, 0x04,
125 0x24, 0x00, 0x97, 0xcf, 0xd7, 0x09, 0x00, 0xc0,
126 0xc1, 0xdf, 0xe7, 0x57, 0x00, 0x00, 0x90, 0x06,
127 0x13, 0xc1, 0x9f, 0xaf, 0x06, 0x02, 0xe7, 0x57,
128 0x00, 0x00, 0x9e, 0x06, 0x13, 0xc0, 0xe7, 0x09,
129 0x9e, 0x06, 0x30, 0x01, 0xe7, 0x07, 0xf2, 0x05,
130 0x32, 0x01, 0xe7, 0x07, 0x10, 0x00, 0x96, 0xc0,
131 0xe7, 0x09, 0x9e, 0x06, 0x90, 0x06, 0x04, 0xcf,
132 0xe7, 0x57, 0x00, 0x00, 0x9e, 0x06, 0x02, 0xc1,
133 0x9f, 0xaf, 0x06, 0x02, 0xe7, 0x05, 0x00, 0xc0,
134 0x97, 0xcf, 0xd7, 0x09, 0x00, 0xc0, 0xc1, 0xdf,
135 0x08, 0x92, 0xe7, 0x57, 0x02, 0x00, 0xaa, 0x06,
136 0x02, 0xc3, 0xc8, 0x09, 0xa4, 0x06, 0x27, 0x02,
137 0xa6, 0x06, 0x08, 0x62, 0x03, 0xc1, 0xe7, 0x05,
138 0x00, 0xc0, 0x97, 0xcf, 0x27, 0x04, 0xa4, 0x06,
139 0xe7, 0x05, 0x00, 0xc0, 0xf0, 0x07, 0x40, 0x00,
140 0x08, 0x00, 0xf0, 0x07, 0x00, 0x00, 0x04, 0x00,
141 0x00, 0x02, 0xc0, 0x17, 0x0c, 0x00, 0x30, 0x00,
142 0x06, 0x00, 0xf0, 0x07, 0x46, 0x01, 0x0a, 0x00,
143 0xc8, 0x17, 0x04, 0x00, 0xc1, 0x07, 0x02, 0x00,
144 0x51, 0xaf, 0x97, 0xcf, 0xe7, 0x57, 0x00, 0x00,
145 0x96, 0x06, 0x97, 0xc0, 0xc1, 0xdf, 0xc8, 0x09,
146 0x96, 0x06, 0x27, 0x04, 0x96, 0x06, 0x27, 0x52,
147 0x98, 0x06, 0x03, 0xc1, 0xe7, 0x07, 0x96, 0x06,
148 0x98, 0x06, 0xc0, 0xdf, 0x17, 0x02, 0xc8, 0x17,
149 0x0e, 0x00, 0x9f, 0xaf, 0xba, 0x03, 0xc8, 0x05,
150 0x00, 0x60, 0x03, 0xc0, 0x9f, 0xaf, 0x24, 0x03,
151 0x97, 0xcf, 0x9f, 0xaf, 0x08, 0x03, 0x97, 0xcf,
152 0x57, 0x02, 0xc9, 0x07, 0xa4, 0x06, 0xd7, 0x09,
153 0x00, 0xc0, 0xc1, 0xdf, 0x08, 0x62, 0x1b, 0xc0,
154 0x50, 0x04, 0x11, 0x02, 0xe7, 0x05, 0x00, 0xc0,
155 0xc9, 0x05, 0x97, 0xcf, 0x97, 0x02, 0xca, 0x09,
156 0xd6, 0x06, 0xf2, 0x17, 0x01, 0x00, 0x04, 0x00,
157 0xf2, 0x27, 0x00, 0x00, 0x06, 0x00, 0xca, 0x17,
158 0x2c, 0x00, 0xf8, 0x77, 0x01, 0x00, 0x0e, 0x00,
159 0x06, 0xc0, 0xca, 0xd9, 0xf8, 0x57, 0xff, 0x00,
160 0x0e, 0x00, 0x01, 0xc1, 0xca, 0xd9, 0x22, 0x1c,
161 0x0c, 0x00, 0xe2, 0x27, 0x00, 0x00, 0xe2, 0x17,
162 0x01, 0x00, 0xe2, 0x27, 0x00, 0x00, 0xca, 0x05,
163 0x00, 0x0c, 0x0c, 0x00, 0xc0, 0x17, 0x41, 0x00,
164 0xc0, 0x67, 0xc0, 0xff, 0x30, 0x00, 0x08, 0x00,
165 0x00, 0x02, 0xc0, 0x17, 0x0c, 0x00, 0x30, 0x00,
166 0x06, 0x00, 0xf0, 0x07, 0xda, 0x00, 0x0a, 0x00,
167 0xf0, 0x07, 0x00, 0x00, 0x04, 0x00, 0x00, 0x0c,
168 0x08, 0x00, 0x40, 0xd1, 0x01, 0x00, 0xc0, 0x19,
169 0xce, 0x06, 0xc0, 0x59, 0xc2, 0x06, 0x04, 0xc9,
170 0x49, 0xaf, 0x9f, 0xaf, 0xec, 0x00, 0x4a, 0xaf,
171 0x67, 0x10, 0xce, 0x06, 0xc8, 0x17, 0x04, 0x00,
172 0xc1, 0x07, 0x01, 0x00, 0xd7, 0x09, 0x00, 0xc0,
173 0xc1, 0xdf, 0x50, 0xaf, 0xe7, 0x05, 0x00, 0xc0,
174 0x97, 0xcf, 0xc0, 0x07, 0x01, 0x00, 0xc1, 0x09,
175 0xac, 0x06, 0xc1, 0x77, 0x01, 0x00, 0x97, 0xc1,
176 0xd8, 0x77, 0x01, 0x00, 0x12, 0xc0, 0xc9, 0x07,
177 0x6a, 0x06, 0x9f, 0xaf, 0x08, 0x04, 0x04, 0xc1,
178 0xc1, 0x77, 0x08, 0x00, 0x13, 0xc0, 0x97, 0xcf,
179 0xc1, 0x77, 0x02, 0x00, 0x97, 0xc1, 0xc1, 0x77,
180 0x10, 0x00, 0x0c, 0xc0, 0x9f, 0xaf, 0x2c, 0x04,
181 0x97, 0xcf, 0xc1, 0x77, 0x04, 0x00, 0x06, 0xc0,
182 0xc9, 0x07, 0x70, 0x06, 0x9f, 0xaf, 0x08, 0x04,
183 0x97, 0xc0, 0x00, 0xcf, 0x00, 0x90, 0x97, 0xcf,
184 0x50, 0x54, 0x97, 0xc1, 0x70, 0x5c, 0x02, 0x00,
185 0x02, 0x00, 0x97, 0xc1, 0x70, 0x5c, 0x04, 0x00,
186 0x04, 0x00, 0x97, 0xcf, 0x80, 0x01, 0xc0, 0x00,
187 0x60, 0x00, 0x30, 0x00, 0x18, 0x00, 0x0c, 0x00,
188 0x06, 0x00, 0x00, 0x00, 0xcb, 0x09, 0xb2, 0x06,
189 0xcc, 0x09, 0xb4, 0x06, 0x0b, 0x53, 0x11, 0xc0,
190 0xc9, 0x02, 0xca, 0x07, 0x1c, 0x04, 0x9f, 0xaf,
191 0x08, 0x04, 0x97, 0xc0, 0x0a, 0xc8, 0x82, 0x08,
192 0x0a, 0xcf, 0x82, 0x08, 0x9f, 0xaf, 0x08, 0x04,
193 0x97, 0xc0, 0x05, 0xc2, 0x89, 0x30, 0x82, 0x60,
194 0x78, 0xc1, 0x00, 0x90, 0x97, 0xcf, 0x89, 0x10,
195 0x09, 0x53, 0x79, 0xc2, 0x89, 0x30, 0x82, 0x08,
196 0x7a, 0xcf, 0xc0, 0xdf, 0x97, 0xcf, 0xc0, 0xdf,
197 0x97, 0xcf, 0xe7, 0x09, 0x96, 0xc0, 0x92, 0x06,
198 0xe7, 0x09, 0x98, 0xc0, 0x94, 0x06, 0x0f, 0xcf,
199 0xe7, 0x09, 0x96, 0xc0, 0x92, 0x06, 0xe7, 0x09,
200 0x98, 0xc0, 0x94, 0x06, 0xe7, 0x09, 0x9e, 0x06,
201 0x30, 0x01, 0xe7, 0x07, 0xf2, 0x05, 0x32, 0x01,
202 0xe7, 0x07, 0x10, 0x00, 0x96, 0xc0, 0xd7, 0x09,
203 0x00, 0xc0, 0x17, 0x02, 0xc8, 0x09, 0x90, 0x06,
204 0xc8, 0x37, 0x0e, 0x00, 0xe7, 0x77, 0x2a, 0x00,
205 0x92, 0x06, 0x30, 0xc0, 0x97, 0x02, 0xca, 0x09,
206 0xd6, 0x06, 0xe7, 0x77, 0x20, 0x00, 0x92, 0x06,
207 0x0e, 0xc0, 0xf2, 0x17, 0x01, 0x00, 0x10, 0x00,
208 0xf2, 0x27, 0x00, 0x00, 0x12, 0x00, 0xe7, 0x77,
209 0x0a, 0x00, 0x92, 0x06, 0xca, 0x05, 0x1e, 0xc0,
210 0x97, 0x02, 0xca, 0x09, 0xd6, 0x06, 0xf2, 0x17,
211 0x01, 0x00, 0x0c, 0x00, 0xf2, 0x27, 0x00, 0x00,
212 0x0e, 0x00, 0xe7, 0x77, 0x02, 0x00, 0x92, 0x06,
213 0x07, 0xc0, 0xf2, 0x17, 0x01, 0x00, 0x44, 0x00,
214 0xf2, 0x27, 0x00, 0x00, 0x46, 0x00, 0x06, 0xcf,
215 0xf2, 0x17, 0x01, 0x00, 0x60, 0x00, 0xf2, 0x27,
216 0x00, 0x00, 0x62, 0x00, 0xca, 0x05, 0x9f, 0xaf,
217 0x08, 0x03, 0x0f, 0xcf, 0x57, 0x02, 0x09, 0x02,
218 0xf1, 0x09, 0x94, 0x06, 0x0c, 0x00, 0xf1, 0xda,
219 0x0c, 0x00, 0xc8, 0x09, 0x98, 0x06, 0x50, 0x02,
220 0x67, 0x02, 0x98, 0x06, 0xd1, 0x07, 0x00, 0x00,
221 0xc9, 0x05, 0xe7, 0x09, 0x9e, 0x06, 0x90, 0x06,
222 0xe7, 0x57, 0x00, 0x00, 0x90, 0x06, 0x02, 0xc0,
223 0x9f, 0xaf, 0x06, 0x02, 0xc8, 0x05, 0xe7, 0x05,
224 0x00, 0xc0, 0xc0, 0xdf, 0x97, 0xcf, 0xd7, 0x09,
225 0x00, 0xc0, 0x17, 0x00, 0x17, 0x02, 0x97, 0x02,
226 0xc0, 0x09, 0x92, 0xc0, 0xe7, 0x07, 0x04, 0x00,
227 0x90, 0xc0, 0xca, 0x09, 0xd6, 0x06, 0xe7, 0x07,
228 0x00, 0x00, 0xa8, 0x06, 0xe7, 0x07, 0x6a, 0x04,
229 0x02, 0x00, 0xc0, 0x77, 0x02, 0x00, 0x08, 0xc0,
230 0xf2, 0x17, 0x01, 0x00, 0x50, 0x00, 0xf2, 0x27,
231 0x00, 0x00, 0x52, 0x00, 0x9f, 0xcf, 0x24, 0x06,
232 0xc0, 0x77, 0x10, 0x00, 0x06, 0xc0, 0xf2, 0x17,
233 0x01, 0x00, 0x58, 0x00, 0xf2, 0x27, 0x00, 0x00,
234 0x5a, 0x00, 0xc0, 0x77, 0x80, 0x00, 0x06, 0xc0,
235 0xf2, 0x17, 0x01, 0x00, 0x70, 0x00, 0xf2, 0x27,
236 0x00, 0x00, 0x72, 0x00, 0xc0, 0x77, 0x08, 0x00,
237 0x1d, 0xc1, 0xf2, 0x17, 0x01, 0x00, 0x08, 0x00,
238 0xf2, 0x27, 0x00, 0x00, 0x0a, 0x00, 0xc0, 0x77,
239 0x00, 0x02, 0x06, 0xc0, 0xf2, 0x17, 0x01, 0x00,
240 0x64, 0x00, 0xf2, 0x27, 0x00, 0x00, 0x66, 0x00,
241 0xc0, 0x77, 0x40, 0x00, 0x06, 0xc0, 0xf2, 0x17,
242 0x01, 0x00, 0x5c, 0x00, 0xf2, 0x27, 0x00, 0x00,
243 0x5e, 0x00, 0xc0, 0x77, 0x01, 0x00, 0x01, 0xc0,
244 0x1b, 0xcf, 0x1a, 0xcf, 0xf2, 0x17, 0x01, 0x00,
245 0x00, 0x00, 0xf2, 0x27, 0x00, 0x00, 0x02, 0x00,
246 0xc8, 0x09, 0x34, 0x01, 0xca, 0x17, 0x14, 0x00,
247 0xd8, 0x77, 0x01, 0x00, 0x05, 0xc0, 0xca, 0xd9,
248 0xd8, 0x57, 0xff, 0x00, 0x01, 0xc0, 0xca, 0xd9,
249 0xe2, 0x19, 0x94, 0xc0, 0xe2, 0x27, 0x00, 0x00,
250 0xe2, 0x17, 0x01, 0x00, 0xe2, 0x27, 0x00, 0x00,
251 0x9f, 0xaf, 0x40, 0x06, 0x9f, 0xaf, 0xc4, 0x01,
252 0xe7, 0x57, 0x00, 0x00, 0xd2, 0x06, 0x9f, 0xa1,
253 0x0e, 0x0a, 0xca, 0x05, 0xc8, 0x05, 0xc0, 0x05,
254 0xe7, 0x05, 0x00, 0xc0, 0xc0, 0xdf, 0x97, 0xcf,
255 0xc8, 0x09, 0xa0, 0x06, 0x08, 0x62, 0x97, 0xc0,
256 0x27, 0x04, 0xa0, 0x06, 0x27, 0x52, 0xa2, 0x06,
257 0x03, 0xc1, 0xe7, 0x07, 0xa0, 0x06, 0xa2, 0x06,
258 0x9f, 0xaf, 0x08, 0x03, 0xe7, 0x57, 0x00, 0x00,
259 0xaa, 0x06, 0x02, 0xc0, 0x27, 0xda, 0xaa, 0x06,
260 0x97, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
273 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0xff, 0xff, 0xfb, 0x13, 0xe7, 0x57,
275 0x00, 0x80, 0xb2, 0x00, 0x06, 0xc2, 0xe7, 0x07,
276 0xee, 0x0b, 0x12, 0x00, 0xe7, 0x07, 0x34, 0x0c,
277 0xb2, 0x00, 0xe7, 0x07, 0xc6, 0x07, 0xf2, 0x02,
278 0xc8, 0x09, 0xb4, 0x00, 0xf8, 0x07, 0x02, 0x00,
279 0x0d, 0x00, 0xd7, 0x09, 0x0e, 0xc0, 0xe7, 0x07,
280 0x00, 0x00, 0x0e, 0xc0, 0xc8, 0x09, 0xde, 0x00,
281 0xc8, 0x17, 0x09, 0x00, 0xc9, 0x07, 0xda, 0x06,
282 0xc0, 0x07, 0x04, 0x00, 0x68, 0x0a, 0x00, 0xda,
283 0x7d, 0xc1, 0xe7, 0x09, 0xc0, 0x00, 0x7c, 0x06,
284 0xe7, 0x09, 0xbe, 0x00, 0x78, 0x06, 0xe7, 0x09,
285 0x10, 0x00, 0xbc, 0x06, 0xc8, 0x07, 0xd6, 0x07,
286 0x9f, 0xaf, 0xae, 0x07, 0x9f, 0xaf, 0x00, 0x0a,
287 0xc8, 0x09, 0xde, 0x00, 0x00, 0x0e, 0x0f, 0x00,
288 0x41, 0x90, 0x9f, 0xde, 0x06, 0x00, 0x44, 0xaf,
289 0x27, 0x00, 0xb2, 0x06, 0x27, 0x00, 0xb4, 0x06,
290 0x27, 0x00, 0xb6, 0x06, 0xc0, 0x07, 0x74, 0x00,
291 0x44, 0xaf, 0x27, 0x00, 0xd6, 0x06, 0x08, 0x00,
292 0x00, 0x90, 0xc1, 0x07, 0x3a, 0x00, 0x20, 0x00,
293 0x01, 0xda, 0x7d, 0xc1, 0x9f, 0xaf, 0xba, 0x09,
294 0xc0, 0x07, 0x44, 0x00, 0x48, 0xaf, 0x27, 0x00,
295 0x7a, 0x06, 0x9f, 0xaf, 0x96, 0x0a, 0xe7, 0x07,
296 0x01, 0x00, 0xc0, 0x06, 0xe7, 0x05, 0x0e, 0xc0,
297 0x97, 0xcf, 0x49, 0xaf, 0xe7, 0x87, 0x43, 0x00,
298 0x0e, 0xc0, 0xe7, 0x07, 0xff, 0xff, 0xbe, 0x06,
299 0x9f, 0xaf, 0xae, 0x0a, 0xc0, 0x07, 0x01, 0x00,
300 0x60, 0xaf, 0x4a, 0xaf, 0x97, 0xcf, 0x00, 0x08,
301 0x09, 0x08, 0x11, 0x08, 0x00, 0xda, 0x7c, 0xc1,
302 0x97, 0xcf, 0x67, 0x04, 0xcc, 0x02, 0xc0, 0xdf,
303 0x51, 0x94, 0xb1, 0xaf, 0x06, 0x00, 0xc1, 0xdf,
304 0xc9, 0x09, 0xcc, 0x02, 0x49, 0x62, 0x75, 0xc1,
305 0xc0, 0xdf, 0xa7, 0xcf, 0xd6, 0x02, 0x0e, 0x00,
306 0x24, 0x00, 0x80, 0x04, 0x22, 0x00, 0x4e, 0x05,
307 0xd0, 0x00, 0x0e, 0x0a, 0xaa, 0x00, 0x30, 0x08,
308 0xbe, 0x00, 0x4a, 0x0a, 0x10, 0x00, 0x20, 0x00,
309 0x04, 0x00, 0x6e, 0x04, 0x02, 0x00, 0x6a, 0x04,
310 0x06, 0x00, 0x00, 0x00, 0x24, 0xc0, 0x04, 0x04,
311 0x28, 0xc0, 0xfe, 0xfb, 0x1e, 0xc0, 0x00, 0x04,
312 0x22, 0xc0, 0xff, 0xf4, 0xc0, 0x00, 0x90, 0x09,
313 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x56, 0x08,
314 0x60, 0x08, 0xd0, 0x08, 0xda, 0x08, 0x00, 0x09,
315 0x04, 0x09, 0x08, 0x09, 0x32, 0x09, 0x42, 0x09,
316 0x50, 0x09, 0x52, 0x09, 0x5a, 0x09, 0x5a, 0x09,
317 0x27, 0x02, 0xca, 0x06, 0x97, 0xcf, 0xe7, 0x07,
318 0x00, 0x00, 0xca, 0x06, 0x0a, 0x0e, 0x01, 0x00,
319 0xca, 0x57, 0x0e, 0x00, 0x9f, 0xc3, 0x5a, 0x09,
320 0xca, 0x37, 0x00, 0x00, 0x9f, 0xc2, 0x5a, 0x09,
321 0x0a, 0xd2, 0xb2, 0xcf, 0x16, 0x08, 0xc8, 0x09,
322 0xde, 0x00, 0x07, 0x06, 0x9f, 0xcf, 0x6c, 0x09,
323 0x17, 0x02, 0xc8, 0x09, 0xde, 0x00, 0x00, 0x0e,
324 0x0f, 0x00, 0x41, 0x90, 0x9f, 0xde, 0x06, 0x00,
325 0xc8, 0x05, 0x30, 0x50, 0x06, 0x00, 0x9f, 0xc8,
326 0x5a, 0x09, 0x27, 0x0c, 0x02, 0x00, 0xb0, 0x06,
327 0xc0, 0x09, 0xb2, 0x06, 0x27, 0x00, 0xb4, 0x06,
328 0xe7, 0x07, 0x00, 0x00, 0xae, 0x06, 0x27, 0x00,
329 0x80, 0x06, 0x00, 0x1c, 0x06, 0x00, 0x27, 0x00,
330 0xb6, 0x06, 0x41, 0x90, 0x67, 0x50, 0xb0, 0x06,
331 0x0d, 0xc0, 0x67, 0x00, 0x7e, 0x06, 0x27, 0x0c,
332 0x06, 0x00, 0x82, 0x06, 0xe7, 0x07, 0xbc, 0x08,
333 0x84, 0x06, 0xc8, 0x07, 0x7e, 0x06, 0x41, 0x90,
334 0x51, 0xaf, 0x97, 0xcf, 0x9f, 0xaf, 0x48, 0x0c,
335 0xe7, 0x09, 0xb6, 0x06, 0xb4, 0x06, 0xe7, 0x09,
336 0xb0, 0x06, 0xae, 0x06, 0x59, 0xaf, 0x97, 0xcf,
337 0x27, 0x0c, 0x02, 0x00, 0xac, 0x06, 0x59, 0xaf,
338 0x97, 0xcf, 0x09, 0x0c, 0x02, 0x00, 0x09, 0xda,
339 0x49, 0xd2, 0xc9, 0x19, 0xd6, 0x06, 0xc8, 0x07,
340 0x7e, 0x06, 0xe0, 0x07, 0x00, 0x00, 0x60, 0x02,
341 0xe0, 0x07, 0x04, 0x00, 0xd0, 0x07, 0xcc, 0x08,
342 0x48, 0xdb, 0x41, 0x90, 0x50, 0xaf, 0x97, 0xcf,
343 0x59, 0xaf, 0x97, 0xcf, 0x59, 0xaf, 0x97, 0xcf,
344 0xf0, 0x57, 0x06, 0x00, 0x06, 0x00, 0x25, 0xc1,
345 0xe7, 0x07, 0x70, 0x06, 0x80, 0x06, 0x41, 0x90,
346 0x67, 0x00, 0x7e, 0x06, 0x27, 0x0c, 0x06, 0x00,
347 0x82, 0x06, 0xe7, 0x07, 0x8c, 0x09, 0x84, 0x06,
348 0xc8, 0x07, 0x7e, 0x06, 0x41, 0x90, 0x51, 0xaf,
349 0x97, 0xcf, 0x07, 0x0c, 0x06, 0x00, 0xc7, 0x57,
350 0x06, 0x00, 0x0f, 0xc1, 0xc8, 0x07, 0x70, 0x06,
351 0x15, 0xcf, 0x00, 0x0c, 0x02, 0x00, 0x00, 0xda,
352 0x40, 0xd1, 0x27, 0x00, 0xc2, 0x06, 0x1e, 0xcf,
353 0x1d, 0xcf, 0x27, 0x0c, 0x02, 0x00, 0xcc, 0x06,
354 0x19, 0xcf, 0x27, 0x02, 0x20, 0x01, 0xe7, 0x07,
355 0x08, 0x00, 0x22, 0x01, 0xe7, 0x07, 0x13, 0x00,
356 0xb0, 0xc0, 0x97, 0xcf, 0x41, 0x90, 0x67, 0x00,
357 0x7e, 0x06, 0xe7, 0x01, 0x82, 0x06, 0x27, 0x02,
358 0x80, 0x06, 0xe7, 0x07, 0x8c, 0x09, 0x84, 0x06,
359 0xc8, 0x07, 0x7e, 0x06, 0xc1, 0x07, 0x00, 0x80,
360 0x50, 0xaf, 0x97, 0xcf, 0x59, 0xaf, 0x97, 0xcf,
361 0x00, 0x60, 0x05, 0xc0, 0xe7, 0x07, 0x00, 0x00,
362 0xc4, 0x06, 0xa7, 0xcf, 0x7c, 0x06, 0x9f, 0xaf,
363 0x00, 0x0a, 0xe7, 0x07, 0x01, 0x00, 0xc4, 0x06,
364 0x49, 0xaf, 0xd7, 0x09, 0x00, 0xc0, 0x07, 0xaf,
365 0xe7, 0x05, 0x00, 0xc0, 0x4a, 0xaf, 0xa7, 0xcf,
366 0x7c, 0x06, 0xc0, 0x07, 0xfe, 0x7f, 0x44, 0xaf,
367 0x40, 0x00, 0xc0, 0x37, 0x00, 0x01, 0x41, 0x90,
368 0xc0, 0x37, 0x08, 0x00, 0xdf, 0xde, 0x50, 0x06,
369 0xc0, 0x57, 0x10, 0x00, 0x02, 0xc2, 0xc0, 0x07,
370 0x10, 0x00, 0x27, 0x00, 0x9a, 0x06, 0x41, 0x90,
371 0x9f, 0xde, 0x40, 0x06, 0x44, 0xaf, 0x27, 0x00,
372 0x9c, 0x06, 0xc0, 0x09, 0x9a, 0x06, 0x41, 0x90,
373 0x00, 0xd2, 0x00, 0xd8, 0x9f, 0xde, 0x08, 0x00,
374 0x44, 0xaf, 0x27, 0x00, 0xc8, 0x06, 0x97, 0xcf,
375 0xe7, 0x87, 0x00, 0x84, 0x28, 0xc0, 0xe7, 0x67,
376 0xff, 0xfb, 0x24, 0xc0, 0x97, 0xcf, 0xe7, 0x87,
377 0x01, 0x00, 0xd2, 0x06, 0xe7, 0x57, 0x00, 0x00,
378 0xa8, 0x06, 0x97, 0xc1, 0x9f, 0xaf, 0x00, 0x0a,
379 0xe7, 0x87, 0x00, 0x06, 0x22, 0xc0, 0xe7, 0x07,
380 0x00, 0x00, 0x90, 0xc0, 0xe7, 0x67, 0xfe, 0xff,
381 0x3e, 0xc0, 0xe7, 0x07, 0x26, 0x00, 0x0a, 0xc0,
382 0xe7, 0x87, 0x01, 0x00, 0x3e, 0xc0, 0xe7, 0x07,
383 0xff, 0xff, 0xbe, 0x06, 0x9f, 0xaf, 0x10, 0x0b,
384 0x97, 0xcf, 0x17, 0x00, 0xa7, 0xaf, 0x78, 0x06,
385 0xc0, 0x05, 0x27, 0x00, 0x76, 0x06, 0xe7, 0x87,
386 0x01, 0x00, 0xd2, 0x06, 0x9f, 0xaf, 0x00, 0x0a,
387 0xe7, 0x07, 0x0c, 0x00, 0x40, 0xc0, 0x9f, 0xaf,
388 0x10, 0x0b, 0x00, 0x90, 0x27, 0x00, 0xa6, 0x06,
389 0x27, 0x00, 0xaa, 0x06, 0xe7, 0x09, 0xb2, 0x06,
390 0xb4, 0x06, 0x27, 0x00, 0xae, 0x06, 0x27, 0x00,
391 0xac, 0x06, 0x9f, 0xaf, 0xae, 0x0a, 0xc0, 0x07,
392 0x00, 0x00, 0x27, 0x00, 0xb2, 0x02, 0x27, 0x00,
393 0xb4, 0x02, 0x27, 0x00, 0x8e, 0x06, 0xc0, 0x07,
394 0x06, 0x00, 0xc8, 0x09, 0xde, 0x00, 0xc8, 0x17,
395 0x03, 0x00, 0xc9, 0x07, 0x70, 0x06, 0x29, 0x0a,
396 0x00, 0xda, 0x7d, 0xc1, 0x97, 0xcf, 0xd7, 0x09,
397 0x00, 0xc0, 0xc1, 0xdf, 0x00, 0x90, 0x27, 0x00,
398 0x96, 0x06, 0xe7, 0x07, 0x96, 0x06, 0x98, 0x06,
399 0x27, 0x00, 0xa0, 0x06, 0xe7, 0x07, 0xa0, 0x06,
400 0xa2, 0x06, 0x27, 0x00, 0xa6, 0x06, 0x27, 0x00,
401 0x90, 0x06, 0x27, 0x00, 0x9e, 0x06, 0xc8, 0x09,
402 0x9c, 0x06, 0xc1, 0x09, 0x9a, 0x06, 0xc9, 0x07,
403 0xa4, 0x06, 0x11, 0x02, 0x09, 0x02, 0xc8, 0x17,
404 0x40, 0x06, 0x01, 0xda, 0x7a, 0xc1, 0x51, 0x94,
405 0xc8, 0x09, 0xc8, 0x06, 0xc9, 0x07, 0xc6, 0x06,
406 0xc1, 0x09, 0x9a, 0x06, 0x11, 0x02, 0x09, 0x02,
407 0xc8, 0x17, 0x08, 0x00, 0x01, 0xda, 0x7a, 0xc1,
408 0x51, 0x94, 0xe7, 0x05, 0x00, 0xc0, 0x97, 0xcf,
409 0xe7, 0x57, 0x00, 0x00, 0x76, 0x06, 0x97, 0xc0,
410 0x9f, 0xaf, 0x04, 0x00, 0xe7, 0x09, 0xbe, 0x06,
411 0xba, 0x06, 0xe7, 0x57, 0xff, 0xff, 0xba, 0x06,
412 0x04, 0xc1, 0xe7, 0x07, 0x10, 0x0b, 0xb8, 0x06,
413 0x97, 0xcf, 0xe7, 0x17, 0x32, 0x00, 0xba, 0x06,
414 0xe7, 0x67, 0xff, 0x07, 0xba, 0x06, 0xe7, 0x07,
415 0x46, 0x0b, 0xb8, 0x06, 0x97, 0xcf, 0xe7, 0x57,
416 0x00, 0x00, 0xc0, 0x06, 0x23, 0xc0, 0xe7, 0x07,
417 0x04, 0x00, 0x90, 0xc0, 0xe7, 0x07, 0x00, 0x80,
418 0x80, 0xc0, 0xe7, 0x07, 0x00, 0x00, 0x80, 0xc0,
419 0xe7, 0x07, 0x00, 0x80, 0x80, 0xc0, 0xc0, 0x07,
420 0x00, 0x00, 0xc0, 0x07, 0x00, 0x00, 0xc0, 0x07,
421 0x00, 0x00, 0xe7, 0x07, 0x00, 0x00, 0x80, 0xc0,
422 0xe7, 0x07, 0x00, 0x80, 0x80, 0xc0, 0xe7, 0x07,
423 0x00, 0x80, 0x40, 0xc0, 0xc0, 0x07, 0x00, 0x00,
424 0xe7, 0x07, 0x00, 0x00, 0x40, 0xc0, 0xe7, 0x07,
425 0x00, 0x00, 0x80, 0xc0, 0xe7, 0x07, 0x04, 0x00,
426 0x90, 0xc0, 0xe7, 0x07, 0x00, 0x02, 0x40, 0xc0,
427 0xe7, 0x07, 0x0c, 0x02, 0x40, 0xc0, 0xe7, 0x07,
428 0x00, 0x00, 0xc0, 0x06, 0xe7, 0x07, 0x00, 0x00,
429 0xb8, 0x06, 0xe7, 0x07, 0x00, 0x00, 0xd2, 0x06,
430 0xd7, 0x09, 0x00, 0xc0, 0xc1, 0xdf, 0x9f, 0xaf,
431 0x34, 0x02, 0xe7, 0x05, 0x00, 0xc0, 0x9f, 0xaf,
432 0xc4, 0x01, 0x97, 0xcf, 0xd7, 0x09, 0x00, 0xc0,
433 0x17, 0x00, 0x17, 0x02, 0x97, 0x02, 0xe7, 0x57,
434 0x00, 0x00, 0xa8, 0x06, 0x06, 0xc0, 0xc0, 0x09,
435 0x92, 0xc0, 0xc0, 0x77, 0x09, 0x02, 0x9f, 0xc1,
436 0x5c, 0x05, 0x9f, 0xcf, 0x32, 0x06, 0xd7, 0x09,
437 0x0e, 0xc0, 0xe7, 0x07, 0x00, 0x00, 0x0e, 0xc0,
438 0x9f, 0xaf, 0x02, 0x0c, 0xe7, 0x05, 0x0e, 0xc0,
439 0x97, 0xcf, 0xd7, 0x09, 0x00, 0xc0, 0x17, 0x02,
440 0xc8, 0x09, 0xb0, 0xc0, 0xe7, 0x67, 0xfe, 0x7f,
441 0xb0, 0xc0, 0xc8, 0x77, 0x00, 0x20, 0x9f, 0xc1,
442 0x64, 0xeb, 0xe7, 0x57, 0x00, 0x00, 0xc8, 0x02,
443 0x9f, 0xc1, 0x80, 0xeb, 0xc8, 0x99, 0xca, 0x02,
444 0xc8, 0x67, 0x04, 0x00, 0x9f, 0xc1, 0x96, 0xeb,
445 0x9f, 0xcf, 0x4c, 0xeb, 0xe7, 0x07, 0x00, 0x00,
446 0xa6, 0xc0, 0xe7, 0x09, 0xb0, 0xc0, 0xc8, 0x02,
447 0xe7, 0x07, 0x03, 0x00, 0xb0, 0xc0, 0x97, 0xcf,
448 0xc0, 0x09, 0xb0, 0x06, 0xc0, 0x37, 0x01, 0x00,
449 0x97, 0xc9, 0xc9, 0x09, 0xb2, 0x06, 0x02, 0x00,
450 0x41, 0x90, 0x48, 0x02, 0xc9, 0x17, 0x06, 0x00,
451 0x9f, 0xaf, 0x08, 0x04, 0x9f, 0xa2, 0x72, 0x0c,
452 0x02, 0xda, 0x77, 0xc1, 0x41, 0x60, 0x71, 0xc1,
453 0x97, 0xcf, 0x17, 0x02, 0x57, 0x02, 0x43, 0x04,
454 0x21, 0x04, 0xe0, 0x00, 0x43, 0x04, 0x21, 0x04,
455 0xe0, 0x00, 0x43, 0x04, 0x21, 0x04, 0xe0, 0x00,
456 0xc1, 0x07, 0x01, 0x00, 0xc9, 0x05, 0xc8, 0x05,
457 0x97, 0xcf, 0xe7, 0x07, 0x01, 0x00, 0x8e, 0x06,
458 0xc8, 0x07, 0x86, 0x06, 0xe7, 0x07, 0x00, 0x00,
459 0x86, 0x06, 0xe7, 0x07, 0x10, 0x08, 0x88, 0x06,
460 0xe7, 0x07, 0x04, 0x00, 0x8a, 0x06, 0xe7, 0x07,
461 0xbc, 0x0c, 0x8c, 0x06, 0xc1, 0x07, 0x03, 0x80,
462 0x50, 0xaf, 0x97, 0xcf, 0xe7, 0x07, 0x00, 0x00,
463 0x8e, 0x06, 0x97, 0xcf,
464 0x00, 0x00
465};
466
467/****************************************************************
468 * kaweth_new_code_fix
469 ****************************************************************/
470static __u8 kaweth_new_code_fix[] =
471{
472 0xB6, 0xC3, 0xAA, 0xBB, 0xCC, 0xDD,
473 0x02, 0x00, 0x08, 0x00, 0x28, 0x00, 0x2c, 0x00,
474 0x34, 0x00, 0x3c, 0x00, 0x40, 0x00, 0x48, 0x00,
475 0x54, 0x00, 0x58, 0x00, 0x5e, 0x00, 0x64, 0x00,
476 0x68, 0x00, 0x6e, 0x00, 0x6c, 0x00, 0x72, 0x00,
477 0x76, 0x00, 0x7c, 0x00, 0x80, 0x00, 0x86, 0x00,
478 0x8a, 0x00, 0x90, 0x00, 0x94, 0x00, 0x98, 0x00,
479 0x9e, 0x00, 0xa6, 0x00, 0xaa, 0x00, 0xb0, 0x00,
480 0xb4, 0x00, 0xb8, 0x00, 0xc0, 0x00, 0xc6, 0x00,
481 0xca, 0x00, 0xd0, 0x00, 0xd4, 0x00, 0xd8, 0x00,
482 0xe0, 0x00, 0xde, 0x00, 0xe8, 0x00, 0xf0, 0x00,
483 0xfc, 0x00, 0x04, 0x01, 0x0a, 0x01, 0x18, 0x01,
484 0x22, 0x01, 0x28, 0x01, 0x3a, 0x01, 0x3e, 0x01,
485 0x7e, 0x01, 0x98, 0x01, 0x9c, 0x01, 0xa2, 0x01,
486 0xac, 0x01, 0xb2, 0x01, 0xba, 0x01, 0xc0, 0x01,
487 0xc8, 0x01, 0xd0, 0x01, 0xd6, 0x01, 0xf4, 0x01,
488 0xfc, 0x01, 0x08, 0x02, 0x16, 0x02, 0x1a, 0x02,
489 0x22, 0x02, 0x2a, 0x02, 0x2e, 0x02, 0x3e, 0x02,
490 0x44, 0x02, 0x4a, 0x02, 0x50, 0x02, 0x64, 0x02,
491 0x62, 0x02, 0x6c, 0x02, 0x72, 0x02, 0x86, 0x02,
492 0x8c, 0x02, 0x90, 0x02, 0x9e, 0x02, 0xbc, 0x02,
493 0xd0, 0x02, 0xd8, 0x02, 0xdc, 0x02, 0xe0, 0x02,
494 0xe8, 0x02, 0xe6, 0x02, 0xf4, 0x02, 0xfe, 0x02,
495 0x04, 0x03, 0x0c, 0x03, 0x28, 0x03, 0x7c, 0x03,
496 0x90, 0x03, 0x94, 0x03, 0x9c, 0x03, 0xa2, 0x03,
497 0xc0, 0x03, 0xd0, 0x03, 0xd4, 0x03, 0xee, 0x03,
498 0xfa, 0x03, 0xfe, 0x03, 0x2e, 0x04, 0x32, 0x04,
499 0x3c, 0x04, 0x40, 0x04, 0x4e, 0x04, 0x76, 0x04,
500 0x7c, 0x04, 0x84, 0x04, 0x8a, 0x04, 0x8e, 0x04,
501 0xa6, 0x04, 0xb0, 0x04, 0xb8, 0x04, 0xbe, 0x04,
502 0xd2, 0x04, 0xdc, 0x04, 0xee, 0x04, 0x10, 0x05,
503 0x1a, 0x05, 0x24, 0x05, 0x2a, 0x05, 0x36, 0x05,
504 0x34, 0x05, 0x3c, 0x05, 0x42, 0x05, 0x64, 0x05,
505 0x6a, 0x05, 0x6e, 0x05, 0x86, 0x05, 0x22, 0x06,
506 0x26, 0x06, 0x2c, 0x06, 0x30, 0x06, 0x42, 0x06,
507 0x4a, 0x06, 0x4e, 0x06, 0x56, 0x06, 0x54, 0x06,
508 0x5a, 0x06, 0x60, 0x06, 0x66, 0x06, 0xe8, 0x06,
509 0xee, 0x06, 0xf4, 0x06, 0x16, 0x07, 0x26, 0x07,
510 0x2c, 0x07, 0x32, 0x07, 0x36, 0x07, 0x3a, 0x07,
511 0x3e, 0x07, 0x52, 0x07, 0x56, 0x07, 0x5a, 0x07,
512 0x64, 0x07, 0x76, 0x07, 0x7a, 0x07, 0x80, 0x07,
513 0x84, 0x07, 0x8a, 0x07, 0x9e, 0x07, 0xa2, 0x07,
514 0xda, 0x07, 0xde, 0x07, 0xe2, 0x07, 0xe6, 0x07,
515 0xea, 0x07, 0xee, 0x07, 0xf2, 0x07, 0xf6, 0x07,
516 0x0e, 0x08, 0x16, 0x08, 0x18, 0x08, 0x1a, 0x08,
517 0x1c, 0x08, 0x1e, 0x08, 0x20, 0x08, 0x22, 0x08,
518 0x24, 0x08, 0x26, 0x08, 0x28, 0x08, 0x2a, 0x08,
519 0x2c, 0x08, 0x2e, 0x08, 0x32, 0x08, 0x3a, 0x08,
520 0x46, 0x08, 0x4e, 0x08, 0x54, 0x08, 0x5e, 0x08,
521 0x78, 0x08, 0x7e, 0x08, 0x82, 0x08, 0x86, 0x08,
522 0x8c, 0x08, 0x90, 0x08, 0x98, 0x08, 0x9e, 0x08,
523 0xa4, 0x08, 0xaa, 0x08, 0xb0, 0x08, 0xae, 0x08,
524 0xb4, 0x08, 0xbe, 0x08, 0xc4, 0x08, 0xc2, 0x08,
525 0xca, 0x08, 0xc8, 0x08, 0xd4, 0x08, 0xe4, 0x08,
526 0xe8, 0x08, 0xf6, 0x08, 0x14, 0x09, 0x12, 0x09,
527 0x1a, 0x09, 0x20, 0x09, 0x26, 0x09, 0x24, 0x09,
528 0x2a, 0x09, 0x3e, 0x09, 0x4c, 0x09, 0x56, 0x09,
529 0x70, 0x09, 0x74, 0x09, 0x78, 0x09, 0x7e, 0x09,
530 0x7c, 0x09, 0x82, 0x09, 0x98, 0x09, 0x9c, 0x09,
531 0xa0, 0x09, 0xa6, 0x09, 0xb8, 0x09, 0xdc, 0x09,
532 0xe8, 0x09, 0xec, 0x09, 0xfc, 0x09, 0x12, 0x0a,
533 0x18, 0x0a, 0x1e, 0x0a, 0x42, 0x0a, 0x46, 0x0a,
534 0x4e, 0x0a, 0x54, 0x0a, 0x5a, 0x0a, 0x5e, 0x0a,
535 0x68, 0x0a, 0x6e, 0x0a, 0x72, 0x0a, 0x78, 0x0a,
536 0x76, 0x0a, 0x7c, 0x0a, 0x80, 0x0a, 0x84, 0x0a,
537 0x94, 0x0a, 0xa4, 0x0a, 0xb8, 0x0a, 0xbe, 0x0a,
538 0xbc, 0x0a, 0xc2, 0x0a, 0xc8, 0x0a, 0xc6, 0x0a,
539 0xcc, 0x0a, 0xd0, 0x0a, 0xd4, 0x0a, 0xd8, 0x0a,
540 0xdc, 0x0a, 0xe0, 0x0a, 0xf2, 0x0a, 0xf6, 0x0a,
541 0xfa, 0x0a, 0x14, 0x0b, 0x1a, 0x0b, 0x20, 0x0b,
542 0x1e, 0x0b, 0x26, 0x0b, 0x2e, 0x0b, 0x2c, 0x0b,
543 0x36, 0x0b, 0x3c, 0x0b, 0x42, 0x0b, 0x40, 0x0b,
544 0x4a, 0x0b, 0xaa, 0x0b, 0xb0, 0x0b, 0xb6, 0x0b,
545 0xc0, 0x0b, 0xc8, 0x0b, 0xda, 0x0b, 0xe8, 0x0b,
546 0xec, 0x0b, 0xfa, 0x0b, 0x4a, 0x0c, 0x54, 0x0c,
547 0x62, 0x0c, 0x66, 0x0c, 0x96, 0x0c, 0x9a, 0x0c,
548 0xa0, 0x0c, 0xa6, 0x0c, 0xa4, 0x0c, 0xac, 0x0c,
549 0xb2, 0x0c, 0xb0, 0x0c, 0xc0, 0x0c,
550 0x00, 0x00
551};
552
553
554static const int len_kaweth_trigger_code = sizeof(kaweth_trigger_code);
555static const int len_kaweth_trigger_code_fix = sizeof(kaweth_trigger_code_fix);
556static const int len_kaweth_new_code = sizeof(kaweth_new_code);
557static const int len_kaweth_new_code_fix = sizeof(kaweth_new_code_fix);
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
new file mode 100644
index 000000000000..6240b978fe3d
--- /dev/null
+++ b/drivers/net/usb/mcs7830.c
@@ -0,0 +1,534 @@
1/*
2 * MosChips MCS7830 based USB 2.0 Ethernet Devices
3 *
4 * based on usbnet.c, asix.c and the vendor provided mcs7830 driver
5 *
6 * Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>
7 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
8 * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
9 * Copyright (c) 2002-2003 TiVo Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/crc32.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/init.h>
30#include <linux/mii.h>
31#include <linux/module.h>
32#include <linux/netdevice.h>
33#include <linux/usb.h>
34
35#include "usbnet.h"
36
37/* requests */
38#define MCS7830_RD_BMREQ (USB_DIR_IN | USB_TYPE_VENDOR | \
39 USB_RECIP_DEVICE)
40#define MCS7830_WR_BMREQ (USB_DIR_OUT | USB_TYPE_VENDOR | \
41 USB_RECIP_DEVICE)
42#define MCS7830_RD_BREQ 0x0E
43#define MCS7830_WR_BREQ 0x0D
44
45#define MCS7830_CTRL_TIMEOUT 1000
46#define MCS7830_MAX_MCAST 64
47
48#define MCS7830_VENDOR_ID 0x9710
49#define MCS7830_PRODUCT_ID 0x7830
50
51#define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \
52 ADVERTISE_100HALF | ADVERTISE_10FULL | \
53 ADVERTISE_10HALF | ADVERTISE_CSMA)
54
55/* HIF_REG_XX coressponding index value */
56enum {
57 HIF_REG_MULTICAST_HASH = 0x00,
58 HIF_REG_PACKET_GAP1 = 0x08,
59 HIF_REG_PACKET_GAP2 = 0x09,
60 HIF_REG_PHY_DATA = 0x0a,
61 HIF_REG_PHY_CMD1 = 0x0c,
62 HIF_REG_PHY_CMD1_READ = 0x40,
63 HIF_REG_PHY_CMD1_WRITE = 0x20,
64 HIF_REG_PHY_CMD1_PHYADDR = 0x01,
65 HIF_REG_PHY_CMD2 = 0x0d,
66 HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80,
67 HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40,
68 HIF_REG_CONFIG = 0x0e,
69 HIF_REG_CONFIG_CFG = 0x80,
70 HIF_REG_CONFIG_SPEED100 = 0x40,
71 HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20,
72 HIF_REG_CONFIG_RXENABLE = 0x10,
73 HIF_REG_CONFIG_TXENABLE = 0x08,
74 HIF_REG_CONFIG_SLEEPMODE = 0x04,
75 HIF_REG_CONFIG_ALLMULTICAST = 0x02,
76 HIF_REG_CONFIG_PROMISCIOUS = 0x01,
77 HIF_REG_ETHERNET_ADDR = 0x0f,
78 HIF_REG_22 = 0x15,
79 HIF_REG_PAUSE_THRESHOLD = 0x16,
80 HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0,
81};
82
83struct mcs7830_data {
84 u8 multi_filter[8];
85 u8 config;
86};
87
88static const char driver_name[] = "MOSCHIP usb-ethernet driver";
89
90static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
91{
92 struct usb_device *xdev = dev->udev;
93 int ret;
94
95 ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ,
96 MCS7830_RD_BMREQ, 0x0000, index, data,
97 size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT));
98 return ret;
99}
100
101static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
102{
103 struct usb_device *xdev = dev->udev;
104 int ret;
105
106 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
107 MCS7830_WR_BMREQ, 0x0000, index, data,
108 size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT));
109 return ret;
110}
111
112static void mcs7830_async_cmd_callback(struct urb *urb)
113{
114 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
115
116 if (urb->status < 0)
117 printk(KERN_DEBUG "mcs7830_async_cmd_callback() failed with %d",
118 urb->status);
119
120 kfree(req);
121 usb_free_urb(urb);
122}
123
124static void mcs7830_set_reg_async(struct usbnet *dev, u16 index, u16 size, void *data)
125{
126 struct usb_ctrlrequest *req;
127 int ret;
128 struct urb *urb;
129
130 urb = usb_alloc_urb(0, GFP_ATOMIC);
131 if (!urb) {
132 dev_dbg(&dev->udev->dev, "Error allocating URB "
133 "in write_cmd_async!");
134 return;
135 }
136
137 req = kmalloc(sizeof *req, GFP_ATOMIC);
138 if (!req) {
139 dev_err(&dev->udev->dev, "Failed to allocate memory for "
140 "control request");
141 goto out;
142 }
143 req->bRequestType = MCS7830_WR_BMREQ;
144 req->bRequest = MCS7830_WR_BREQ;
145 req->wValue = 0;
146 req->wIndex = cpu_to_le16(index);
147 req->wLength = cpu_to_le16(size);
148
149 usb_fill_control_urb(urb, dev->udev,
150 usb_sndctrlpipe(dev->udev, 0),
151 (void *)req, data, size,
152 mcs7830_async_cmd_callback, req);
153
154 ret = usb_submit_urb(urb, GFP_ATOMIC);
155 if (ret < 0) {
156 dev_err(&dev->udev->dev, "Error submitting the control "
157 "message: ret=%d", ret);
158 goto out;
159 }
160 return;
161out:
162 kfree(req);
163 usb_free_urb(urb);
164}
165
166static int mcs7830_get_address(struct usbnet *dev)
167{
168 int ret;
169 ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
170 dev->net->dev_addr);
171 if (ret < 0)
172 return ret;
173 return 0;
174}
175
176static int mcs7830_read_phy(struct usbnet *dev, u8 index)
177{
178 int ret;
179 int i;
180 __le16 val;
181
182 u8 cmd[2] = {
183 HIF_REG_PHY_CMD1_READ | HIF_REG_PHY_CMD1_PHYADDR,
184 HIF_REG_PHY_CMD2_PEND_FLAG_BIT | index,
185 };
186
187 mutex_lock(&dev->phy_mutex);
188 /* write the MII command */
189 ret = mcs7830_set_reg(dev, HIF_REG_PHY_CMD1, 2, cmd);
190 if (ret < 0)
191 goto out;
192
193 /* wait for the data to become valid, should be within < 1ms */
194 for (i = 0; i < 10; i++) {
195 ret = mcs7830_get_reg(dev, HIF_REG_PHY_CMD1, 2, cmd);
196 if ((ret < 0) || (cmd[1] & HIF_REG_PHY_CMD2_READY_FLAG_BIT))
197 break;
198 ret = -EIO;
199 msleep(1);
200 }
201 if (ret < 0)
202 goto out;
203
204 /* read actual register contents */
205 ret = mcs7830_get_reg(dev, HIF_REG_PHY_DATA, 2, &val);
206 if (ret < 0)
207 goto out;
208 ret = le16_to_cpu(val);
209 dev_dbg(&dev->udev->dev, "read PHY reg %02x: %04x (%d tries)\n",
210 index, val, i);
211out:
212 mutex_unlock(&dev->phy_mutex);
213 return ret;
214}
215
216static int mcs7830_write_phy(struct usbnet *dev, u8 index, u16 val)
217{
218 int ret;
219 int i;
220 __le16 le_val;
221
222 u8 cmd[2] = {
223 HIF_REG_PHY_CMD1_WRITE | HIF_REG_PHY_CMD1_PHYADDR,
224 HIF_REG_PHY_CMD2_PEND_FLAG_BIT | (index & 0x1F),
225 };
226
227 mutex_lock(&dev->phy_mutex);
228
229 /* write the new register contents */
230 le_val = cpu_to_le16(val);
231 ret = mcs7830_set_reg(dev, HIF_REG_PHY_DATA, 2, &le_val);
232 if (ret < 0)
233 goto out;
234
235 /* write the MII command */
236 ret = mcs7830_set_reg(dev, HIF_REG_PHY_CMD1, 2, cmd);
237 if (ret < 0)
238 goto out;
239
240 /* wait for the command to be accepted by the PHY */
241 for (i = 0; i < 10; i++) {
242 ret = mcs7830_get_reg(dev, HIF_REG_PHY_CMD1, 2, cmd);
243 if ((ret < 0) || (cmd[1] & HIF_REG_PHY_CMD2_READY_FLAG_BIT))
244 break;
245 ret = -EIO;
246 msleep(1);
247 }
248 if (ret < 0)
249 goto out;
250
251 ret = 0;
252 dev_dbg(&dev->udev->dev, "write PHY reg %02x: %04x (%d tries)\n",
253 index, val, i);
254out:
255 mutex_unlock(&dev->phy_mutex);
256 return ret;
257}
258
259/*
260 * This algorithm comes from the original mcs7830 version 1.4 driver,
261 * not sure if it is needed.
262 */
263static int mcs7830_set_autoneg(struct usbnet *dev, int ptrUserPhyMode)
264{
265 int ret;
266 /* Enable all media types */
267 ret = mcs7830_write_phy(dev, MII_ADVERTISE, MCS7830_MII_ADVERTISE);
268
269 /* First reset BMCR */
270 if (!ret)
271 ret = mcs7830_write_phy(dev, MII_BMCR, 0x0000);
272 /* Enable Auto Neg */
273 if (!ret)
274 ret = mcs7830_write_phy(dev, MII_BMCR, BMCR_ANENABLE);
275 /* Restart Auto Neg (Keep the Enable Auto Neg Bit Set) */
276 if (!ret)
277 ret = mcs7830_write_phy(dev, MII_BMCR,
278 BMCR_ANENABLE | BMCR_ANRESTART );
279 return ret < 0 ? : 0;
280}
281
282
283/*
284 * if we can read register 22, the chip revision is C or higher
285 */
286static int mcs7830_get_rev(struct usbnet *dev)
287{
288 u8 dummy[2];
289 int ret;
290 ret = mcs7830_get_reg(dev, HIF_REG_22, 2, dummy);
291 if (ret > 0)
292 return 2; /* Rev C or later */
293 return 1; /* earlier revision */
294}
295
296/*
297 * On rev. C we need to set the pause threshold
298 */
299static void mcs7830_rev_C_fixup(struct usbnet *dev)
300{
301 u8 pause_threshold = HIF_REG_PAUSE_THRESHOLD_DEFAULT;
302 int retry;
303
304 for (retry = 0; retry < 2; retry++) {
305 if (mcs7830_get_rev(dev) == 2) {
306 dev_info(&dev->udev->dev, "applying rev.C fixup\n");
307 mcs7830_set_reg(dev, HIF_REG_PAUSE_THRESHOLD,
308 1, &pause_threshold);
309 }
310 msleep(1);
311 }
312}
313
314static int mcs7830_init_dev(struct usbnet *dev)
315{
316 int ret;
317 int retry;
318
319 /* Read MAC address from EEPROM */
320 ret = -EINVAL;
321 for (retry = 0; retry < 5 && ret; retry++)
322 ret = mcs7830_get_address(dev);
323 if (ret) {
324 dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
325 goto out;
326 }
327
328 /* Set up PHY */
329 ret = mcs7830_set_autoneg(dev, 0);
330 if (ret) {
331 dev_info(&dev->udev->dev, "Cannot set autoneg\n");
332 goto out;
333 }
334
335 mcs7830_rev_C_fixup(dev);
336 ret = 0;
337out:
338 return ret;
339}
340
341static int mcs7830_mdio_read(struct net_device *netdev, int phy_id,
342 int location)
343{
344 struct usbnet *dev = netdev->priv;
345 return mcs7830_read_phy(dev, location);
346}
347
348static void mcs7830_mdio_write(struct net_device *netdev, int phy_id,
349 int location, int val)
350{
351 struct usbnet *dev = netdev->priv;
352 mcs7830_write_phy(dev, location, val);
353}
354
355static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
356{
357 struct usbnet *dev = netdev_priv(net);
358 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
359}
360
361/* credits go to asix_set_multicast */
362static void mcs7830_set_multicast(struct net_device *net)
363{
364 struct usbnet *dev = netdev_priv(net);
365 struct mcs7830_data *data = (struct mcs7830_data *)&dev->data;
366
367 data->config = HIF_REG_CONFIG_TXENABLE;
368
369 /* this should not be needed, but it doesn't work otherwise */
370 data->config |= HIF_REG_CONFIG_ALLMULTICAST;
371
372 if (net->flags & IFF_PROMISC) {
373 data->config |= HIF_REG_CONFIG_PROMISCIOUS;
374 } else if (net->flags & IFF_ALLMULTI
375 || net->mc_count > MCS7830_MAX_MCAST) {
376 data->config |= HIF_REG_CONFIG_ALLMULTICAST;
377 } else if (net->mc_count == 0) {
378 /* just broadcast and directed */
379 } else {
380 /* We use the 20 byte dev->data
381 * for our 8 byte filter buffer
382 * to avoid allocating memory that
383 * is tricky to free later */
384 struct dev_mc_list *mc_list = net->mc_list;
385 u32 crc_bits;
386 int i;
387
388 memset(data->multi_filter, 0, sizeof data->multi_filter);
389
390 /* Build the multicast hash filter. */
391 for (i = 0; i < net->mc_count; i++) {
392 crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
393 data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
394 mc_list = mc_list->next;
395 }
396
397 mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
398 sizeof data->multi_filter,
399 data->multi_filter);
400 }
401
402 mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
403}
404
405static int mcs7830_get_regs_len(struct net_device *net)
406{
407 struct usbnet *dev = netdev_priv(net);
408
409 switch (mcs7830_get_rev(dev)) {
410 case 1:
411 return 21;
412 case 2:
413 return 32;
414 }
415 return 0;
416}
417
418static void mcs7830_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *drvinfo)
419{
420 usbnet_get_drvinfo(net, drvinfo);
421 drvinfo->regdump_len = mcs7830_get_regs_len(net);
422}
423
424static void mcs7830_get_regs(struct net_device *net, struct ethtool_regs *regs, void *data)
425{
426 struct usbnet *dev = netdev_priv(net);
427
428 regs->version = mcs7830_get_rev(dev);
429 mcs7830_get_reg(dev, 0, regs->len, data);
430}
431
432static struct ethtool_ops mcs7830_ethtool_ops = {
433 .get_drvinfo = mcs7830_get_drvinfo,
434 .get_regs_len = mcs7830_get_regs_len,
435 .get_regs = mcs7830_get_regs,
436
437 /* common usbnet calls */
438 .get_link = usbnet_get_link,
439 .get_msglevel = usbnet_get_msglevel,
440 .set_msglevel = usbnet_set_msglevel,
441 .get_settings = usbnet_get_settings,
442 .set_settings = usbnet_set_settings,
443 .nway_reset = usbnet_nway_reset,
444};
445
446static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
447{
448 struct net_device *net = dev->net;
449 int ret;
450
451 ret = mcs7830_init_dev(dev);
452 if (ret)
453 goto out;
454
455 net->do_ioctl = mcs7830_ioctl;
456 net->ethtool_ops = &mcs7830_ethtool_ops;
457 net->set_multicast_list = mcs7830_set_multicast;
458 mcs7830_set_multicast(net);
459
460 /* reserve space for the status byte on rx */
461 dev->rx_urb_size = ETH_FRAME_LEN + 1;
462
463 dev->mii.mdio_read = mcs7830_mdio_read;
464 dev->mii.mdio_write = mcs7830_mdio_write;
465 dev->mii.dev = net;
466 dev->mii.phy_id_mask = 0x3f;
467 dev->mii.reg_num_mask = 0x1f;
468 dev->mii.phy_id = *((u8 *) net->dev_addr + 1);
469
470 ret = usbnet_get_endpoints(dev, udev);
471out:
472 return ret;
473}
474
475/* The chip always appends a status bytes that we need to strip */
476static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
477{
478 u8 status;
479
480 if (skb->len == 0) {
481 dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
482 return 0;
483 }
484
485 skb_trim(skb, skb->len - 1);
486 status = skb->data[skb->len];
487
488 if (status != 0x20)
489 dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status);
490
491 return skb->len > 0;
492}
493
494static const struct driver_info moschip_info = {
495 .description = "MOSCHIP 7830 usb-NET adapter",
496 .bind = mcs7830_bind,
497 .rx_fixup = mcs7830_rx_fixup,
498 .flags = FLAG_ETHER,
499 .in = 1,
500 .out = 2,
501};
502
503static const struct usb_device_id products[] = {
504 {
505 USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID),
506 .driver_info = (unsigned long) &moschip_info,
507 },
508 {},
509};
510MODULE_DEVICE_TABLE(usb, products);
511
512static struct usb_driver mcs7830_driver = {
513 .name = driver_name,
514 .id_table = products,
515 .probe = usbnet_probe,
516 .disconnect = usbnet_disconnect,
517 .suspend = usbnet_suspend,
518 .resume = usbnet_resume,
519};
520
521static int __init mcs7830_init(void)
522{
523 return usb_register(&mcs7830_driver);
524}
525module_init(mcs7830_init);
526
527static void __exit mcs7830_exit(void)
528{
529 usb_deregister(&mcs7830_driver);
530}
531module_exit(mcs7830_exit);
532
533MODULE_DESCRIPTION("USB to network adapter MCS7830)");
534MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
new file mode 100644
index 000000000000..19bf8dae70c9
--- /dev/null
+++ b/drivers/net/usb/net1080.c
@@ -0,0 +1,615 @@
1/*
2 * Net1080 based USB host-to-host cables
3 * Copyright (C) 2000-2005 by David Brownell
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20// #define DEBUG // error path messages, extra info
21// #define VERBOSE // more; success messages
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/workqueue.h>
29#include <linux/mii.h>
30#include <linux/usb.h>
31
32#include <asm/unaligned.h>
33
34#include "usbnet.h"
35
36
37/*
38 * Netchip 1080 driver ... http://www.netchip.com
39 * (Sept 2004: End-of-life announcement has been sent.)
40 * Used in (some) LapLink cables
41 */
42
43#define frame_errors data[1]
44
45/*
46 * NetChip framing of ethernet packets, supporting additional error
47 * checks for links that may drop bulk packets from inside messages.
48 * Odd USB length == always short read for last usb packet.
49 * - nc_header
50 * - Ethernet header (14 bytes)
51 * - payload
52 * - (optional padding byte, if needed so length becomes odd)
53 * - nc_trailer
54 *
55 * This framing is to be avoided for non-NetChip devices.
56 */
57
58struct nc_header { // packed:
59 __le16 hdr_len; // sizeof nc_header (LE, all)
60 __le16 packet_len; // payload size (including ethhdr)
61 __le16 packet_id; // detects dropped packets
62#define MIN_HEADER 6
63
64 // all else is optional, and must start with:
65 // __le16 vendorId; // from usb-if
66 // __le16 productId;
67} __attribute__((__packed__));
68
69#define PAD_BYTE ((unsigned char)0xAC)
70
71struct nc_trailer {
72 __le16 packet_id;
73} __attribute__((__packed__));
74
75// packets may use FLAG_FRAMING_NC and optional pad
76#define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \
77 + sizeof (struct ethhdr) \
78 + (mtu) \
79 + 1 \
80 + sizeof (struct nc_trailer))
81
82#define MIN_FRAMED FRAMED_SIZE(0)
83
84/* packets _could_ be up to 64KB... */
85#define NC_MAX_PACKET 32767
86
87
88/*
89 * Zero means no timeout; else, how long a 64 byte bulk packet may be queued
90 * before the hardware drops it. If that's done, the driver will need to
91 * frame network packets to guard against the dropped USB packets. The win32
92 * driver sets this for both sides of the link.
93 */
94#define NC_READ_TTL_MS ((u8)255) // ms
95
96/*
97 * We ignore most registers and EEPROM contents.
98 */
99#define REG_USBCTL ((u8)0x04)
100#define REG_TTL ((u8)0x10)
101#define REG_STATUS ((u8)0x11)
102
103/*
104 * Vendor specific requests to read/write data
105 */
106#define REQUEST_REGISTER ((u8)0x10)
107#define REQUEST_EEPROM ((u8)0x11)
108
109static int
110nc_vendor_read(struct usbnet *dev, u8 req, u8 regnum, u16 *retval_ptr)
111{
112 int status = usb_control_msg(dev->udev,
113 usb_rcvctrlpipe(dev->udev, 0),
114 req,
115 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
116 0, regnum,
117 retval_ptr, sizeof *retval_ptr,
118 USB_CTRL_GET_TIMEOUT);
119 if (status > 0)
120 status = 0;
121 if (!status)
122 le16_to_cpus(retval_ptr);
123 return status;
124}
125
126static inline int
127nc_register_read(struct usbnet *dev, u8 regnum, u16 *retval_ptr)
128{
129 return nc_vendor_read(dev, REQUEST_REGISTER, regnum, retval_ptr);
130}
131
132// no retval ... can become async, usable in_interrupt()
133static void
134nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value)
135{
136 usb_control_msg(dev->udev,
137 usb_sndctrlpipe(dev->udev, 0),
138 req,
139 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
140 value, regnum,
141 NULL, 0, // data is in setup packet
142 USB_CTRL_SET_TIMEOUT);
143}
144
145static inline void
146nc_register_write(struct usbnet *dev, u8 regnum, u16 value)
147{
148 nc_vendor_write(dev, REQUEST_REGISTER, regnum, value);
149}
150
151
152#if 0
153static void nc_dump_registers(struct usbnet *dev)
154{
155 u8 reg;
156 u16 *vp = kmalloc(sizeof (u16));
157
158 if (!vp) {
159 dbg("no memory?");
160 return;
161 }
162
163 dbg("%s registers:", dev->net->name);
164 for (reg = 0; reg < 0x20; reg++) {
165 int retval;
166
167 // reading some registers is trouble
168 if (reg >= 0x08 && reg <= 0xf)
169 continue;
170 if (reg >= 0x12 && reg <= 0x1e)
171 continue;
172
173 retval = nc_register_read(dev, reg, vp);
174 if (retval < 0)
175 dbg("%s reg [0x%x] ==> error %d",
176 dev->net->name, reg, retval);
177 else
178 dbg("%s reg [0x%x] = 0x%x",
179 dev->net->name, reg, *vp);
180 }
181 kfree(vp);
182}
183#endif
184
185
186/*-------------------------------------------------------------------------*/
187
188/*
189 * Control register
190 */
191
192#define USBCTL_WRITABLE_MASK 0x1f0f
193// bits 15-13 reserved, r/o
194#define USBCTL_ENABLE_LANG (1 << 12)
195#define USBCTL_ENABLE_MFGR (1 << 11)
196#define USBCTL_ENABLE_PROD (1 << 10)
197#define USBCTL_ENABLE_SERIAL (1 << 9)
198#define USBCTL_ENABLE_DEFAULTS (1 << 8)
199// bits 7-4 reserved, r/o
200#define USBCTL_FLUSH_OTHER (1 << 3)
201#define USBCTL_FLUSH_THIS (1 << 2)
202#define USBCTL_DISCONN_OTHER (1 << 1)
203#define USBCTL_DISCONN_THIS (1 << 0)
204
205static inline void nc_dump_usbctl(struct usbnet *dev, u16 usbctl)
206{
207 if (!netif_msg_link(dev))
208 return;
209 devdbg(dev, "net1080 %s-%s usbctl 0x%x:%s%s%s%s%s;"
210 " this%s%s;"
211 " other%s%s; r/o 0x%x",
212 dev->udev->bus->bus_name, dev->udev->devpath,
213 usbctl,
214 (usbctl & USBCTL_ENABLE_LANG) ? " lang" : "",
215 (usbctl & USBCTL_ENABLE_MFGR) ? " mfgr" : "",
216 (usbctl & USBCTL_ENABLE_PROD) ? " prod" : "",
217 (usbctl & USBCTL_ENABLE_SERIAL) ? " serial" : "",
218 (usbctl & USBCTL_ENABLE_DEFAULTS) ? " defaults" : "",
219
220 (usbctl & USBCTL_FLUSH_OTHER) ? " FLUSH" : "",
221 (usbctl & USBCTL_DISCONN_OTHER) ? " DIS" : "",
222 (usbctl & USBCTL_FLUSH_THIS) ? " FLUSH" : "",
223 (usbctl & USBCTL_DISCONN_THIS) ? " DIS" : "",
224 usbctl & ~USBCTL_WRITABLE_MASK
225 );
226}
227
228/*-------------------------------------------------------------------------*/
229
230/*
231 * Status register
232 */
233
234#define STATUS_PORT_A (1 << 15)
235
236#define STATUS_CONN_OTHER (1 << 14)
237#define STATUS_SUSPEND_OTHER (1 << 13)
238#define STATUS_MAILBOX_OTHER (1 << 12)
239#define STATUS_PACKETS_OTHER(n) (((n) >> 8) & 0x03)
240
241#define STATUS_CONN_THIS (1 << 6)
242#define STATUS_SUSPEND_THIS (1 << 5)
243#define STATUS_MAILBOX_THIS (1 << 4)
244#define STATUS_PACKETS_THIS(n) (((n) >> 0) & 0x03)
245
246#define STATUS_UNSPEC_MASK 0x0c8c
247#define STATUS_NOISE_MASK ((u16)~(0x0303|STATUS_UNSPEC_MASK))
248
249
250static inline void nc_dump_status(struct usbnet *dev, u16 status)
251{
252 if (!netif_msg_link(dev))
253 return;
254 devdbg(dev, "net1080 %s-%s status 0x%x:"
255 " this (%c) PKT=%d%s%s%s;"
256 " other PKT=%d%s%s%s; unspec 0x%x",
257 dev->udev->bus->bus_name, dev->udev->devpath,
258 status,
259
260 // XXX the packet counts don't seem right
261 // (1 at reset, not 0); maybe UNSPEC too
262
263 (status & STATUS_PORT_A) ? 'A' : 'B',
264 STATUS_PACKETS_THIS(status),
265 (status & STATUS_CONN_THIS) ? " CON" : "",
266 (status & STATUS_SUSPEND_THIS) ? " SUS" : "",
267 (status & STATUS_MAILBOX_THIS) ? " MBOX" : "",
268
269 STATUS_PACKETS_OTHER(status),
270 (status & STATUS_CONN_OTHER) ? " CON" : "",
271 (status & STATUS_SUSPEND_OTHER) ? " SUS" : "",
272 (status & STATUS_MAILBOX_OTHER) ? " MBOX" : "",
273
274 status & STATUS_UNSPEC_MASK
275 );
276}
277
278/*-------------------------------------------------------------------------*/
279
280/*
281 * TTL register
282 */
283
284#define TTL_THIS(ttl) (0x00ff & ttl)
285#define TTL_OTHER(ttl) (0x00ff & (ttl >> 8))
286#define MK_TTL(this,other) ((u16)(((other)<<8)|(0x00ff&(this))))
287
288static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl)
289{
290 if (netif_msg_link(dev))
291 devdbg(dev, "net1080 %s-%s ttl 0x%x this = %d, other = %d",
292 dev->udev->bus->bus_name, dev->udev->devpath,
293 ttl, TTL_THIS(ttl), TTL_OTHER(ttl));
294}
295
296/*-------------------------------------------------------------------------*/
297
298static int net1080_reset(struct usbnet *dev)
299{
300 u16 usbctl, status, ttl;
301 u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL);
302 int retval;
303
304 if (!vp)
305 return -ENOMEM;
306
307 // nc_dump_registers(dev);
308
309 if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) {
310 dbg("can't read %s-%s status: %d",
311 dev->udev->bus->bus_name, dev->udev->devpath, retval);
312 goto done;
313 }
314 status = *vp;
315 nc_dump_status(dev, status);
316
317 if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) {
318 dbg("can't read USBCTL, %d", retval);
319 goto done;
320 }
321 usbctl = *vp;
322 nc_dump_usbctl(dev, usbctl);
323
324 nc_register_write(dev, REG_USBCTL,
325 USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
326
327 if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) {
328 dbg("can't read TTL, %d", retval);
329 goto done;
330 }
331 ttl = *vp;
332 // nc_dump_ttl(dev, ttl);
333
334 nc_register_write(dev, REG_TTL,
335 MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
336 dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS);
337
338 if (netif_msg_link(dev))
339 devinfo(dev, "port %c, peer %sconnected",
340 (status & STATUS_PORT_A) ? 'A' : 'B',
341 (status & STATUS_CONN_OTHER) ? "" : "dis"
342 );
343 retval = 0;
344
345done:
346 kfree(vp);
347 return retval;
348}
349
350static int net1080_check_connect(struct usbnet *dev)
351{
352 int retval;
353 u16 status;
354 u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL);
355
356 if (!vp)
357 return -ENOMEM;
358 retval = nc_register_read(dev, REG_STATUS, vp);
359 status = *vp;
360 kfree(vp);
361 if (retval != 0) {
362 dbg("%s net1080_check_conn read - %d", dev->net->name, retval);
363 return retval;
364 }
365 if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER)
366 return -ENOLINK;
367 return 0;
368}
369
370static void nc_flush_complete(struct urb *urb)
371{
372 kfree(urb->context);
373 usb_free_urb(urb);
374}
375
376static void nc_ensure_sync(struct usbnet *dev)
377{
378 dev->frame_errors++;
379 if (dev->frame_errors > 5) {
380 struct urb *urb;
381 struct usb_ctrlrequest *req;
382 int status;
383
384 /* Send a flush */
385 urb = usb_alloc_urb(0, GFP_ATOMIC);
386 if (!urb)
387 return;
388
389 req = kmalloc(sizeof *req, GFP_ATOMIC);
390 if (!req) {
391 usb_free_urb(urb);
392 return;
393 }
394
395 req->bRequestType = USB_DIR_OUT
396 | USB_TYPE_VENDOR
397 | USB_RECIP_DEVICE;
398 req->bRequest = REQUEST_REGISTER;
399 req->wValue = cpu_to_le16(USBCTL_FLUSH_THIS
400 | USBCTL_FLUSH_OTHER);
401 req->wIndex = cpu_to_le16(REG_USBCTL);
402 req->wLength = cpu_to_le16(0);
403
404 /* queue an async control request, we don't need
405 * to do anything when it finishes except clean up.
406 */
407 usb_fill_control_urb(urb, dev->udev,
408 usb_sndctrlpipe(dev->udev, 0),
409 (unsigned char *) req,
410 NULL, 0,
411 nc_flush_complete, req);
412 status = usb_submit_urb(urb, GFP_ATOMIC);
413 if (status) {
414 kfree(req);
415 usb_free_urb(urb);
416 return;
417 }
418
419 if (netif_msg_rx_err(dev))
420 devdbg(dev, "flush net1080; too many framing errors");
421 dev->frame_errors = 0;
422 }
423}
424
425static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
426{
427 struct nc_header *header;
428 struct nc_trailer *trailer;
429 u16 hdr_len, packet_len;
430
431 if (!(skb->len & 0x01)) {
432#ifdef DEBUG
433 struct net_device *net = dev->net;
434 dbg("rx framesize %d range %d..%d mtu %d", skb->len,
435 net->hard_header_len, dev->hard_mtu, net->mtu);
436#endif
437 dev->stats.rx_frame_errors++;
438 nc_ensure_sync(dev);
439 return 0;
440 }
441
442 header = (struct nc_header *) skb->data;
443 hdr_len = le16_to_cpup(&header->hdr_len);
444 packet_len = le16_to_cpup(&header->packet_len);
445 if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
446 dev->stats.rx_frame_errors++;
447 dbg("packet too big, %d", packet_len);
448 nc_ensure_sync(dev);
449 return 0;
450 } else if (hdr_len < MIN_HEADER) {
451 dev->stats.rx_frame_errors++;
452 dbg("header too short, %d", hdr_len);
453 nc_ensure_sync(dev);
454 return 0;
455 } else if (hdr_len > MIN_HEADER) {
456 // out of band data for us?
457 dbg("header OOB, %d bytes", hdr_len - MIN_HEADER);
458 nc_ensure_sync(dev);
459 // switch (vendor/product ids) { ... }
460 }
461 skb_pull(skb, hdr_len);
462
463 trailer = (struct nc_trailer *)
464 (skb->data + skb->len - sizeof *trailer);
465 skb_trim(skb, skb->len - sizeof *trailer);
466
467 if ((packet_len & 0x01) == 0) {
468 if (skb->data [packet_len] != PAD_BYTE) {
469 dev->stats.rx_frame_errors++;
470 dbg("bad pad");
471 return 0;
472 }
473 skb_trim(skb, skb->len - 1);
474 }
475 if (skb->len != packet_len) {
476 dev->stats.rx_frame_errors++;
477 dbg("bad packet len %d (expected %d)",
478 skb->len, packet_len);
479 nc_ensure_sync(dev);
480 return 0;
481 }
482 if (header->packet_id != get_unaligned(&trailer->packet_id)) {
483 dev->stats.rx_fifo_errors++;
484 dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
485 le16_to_cpu(header->packet_id),
486 le16_to_cpu(trailer->packet_id));
487 return 0;
488 }
489#if 0
490 devdbg(dev, "frame <rx h %d p %d id %d", header->hdr_len,
491 header->packet_len, header->packet_id);
492#endif
493 dev->frame_errors = 0;
494 return 1;
495}
496
497static struct sk_buff *
498net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
499{
500 struct sk_buff *skb2;
501 struct nc_header *header = NULL;
502 struct nc_trailer *trailer = NULL;
503 int padlen = sizeof (struct nc_trailer);
504 int len = skb->len;
505
506 if (!((len + padlen + sizeof (struct nc_header)) & 0x01))
507 padlen++;
508 if (!skb_cloned(skb)) {
509 int headroom = skb_headroom(skb);
510 int tailroom = skb_tailroom(skb);
511
512 if (padlen <= tailroom &&
513 sizeof(struct nc_header) <= headroom)
514 /* There's enough head and tail room */
515 goto encapsulate;
516
517 if ((sizeof (struct nc_header) + padlen) <
518 (headroom + tailroom)) {
519 /* There's enough total room, so just readjust */
520 skb->data = memmove(skb->head
521 + sizeof (struct nc_header),
522 skb->data, skb->len);
523 skb_set_tail_pointer(skb, len);
524 goto encapsulate;
525 }
526 }
527
528 /* Create a new skb to use with the correct size */
529 skb2 = skb_copy_expand(skb,
530 sizeof (struct nc_header),
531 padlen,
532 flags);
533 dev_kfree_skb_any(skb);
534 if (!skb2)
535 return skb2;
536 skb = skb2;
537
538encapsulate:
539 /* header first */
540 header = (struct nc_header *) skb_push(skb, sizeof *header);
541 header->hdr_len = cpu_to_le16(sizeof (*header));
542 header->packet_len = cpu_to_le16(len);
543 header->packet_id = cpu_to_le16((u16)dev->xid++);
544
545 /* maybe pad; then trailer */
546 if (!((skb->len + sizeof *trailer) & 0x01))
547 *skb_put(skb, 1) = PAD_BYTE;
548 trailer = (struct nc_trailer *) skb_put(skb, sizeof *trailer);
549 put_unaligned(header->packet_id, &trailer->packet_id);
550#if 0
551 devdbg(dev, "frame >tx h %d p %d id %d",
552 header->hdr_len, header->packet_len,
553 header->packet_id);
554#endif
555 return skb;
556}
557
558static int net1080_bind(struct usbnet *dev, struct usb_interface *intf)
559{
560 unsigned extra = sizeof (struct nc_header)
561 + 1
562 + sizeof (struct nc_trailer);
563
564 dev->net->hard_header_len += extra;
565 dev->rx_urb_size = dev->net->hard_header_len + dev->net->mtu;
566 dev->hard_mtu = NC_MAX_PACKET;
567 return usbnet_get_endpoints (dev, intf);
568}
569
570static const struct driver_info net1080_info = {
571 .description = "NetChip TurboCONNECT",
572 .flags = FLAG_FRAMING_NC,
573 .bind = net1080_bind,
574 .reset = net1080_reset,
575 .check_connect = net1080_check_connect,
576 .rx_fixup = net1080_rx_fixup,
577 .tx_fixup = net1080_tx_fixup,
578};
579
580static const struct usb_device_id products [] = {
581{
582 USB_DEVICE(0x0525, 0x1080), // NetChip ref design
583 .driver_info = (unsigned long) &net1080_info,
584}, {
585 USB_DEVICE(0x06D0, 0x0622), // Laplink Gold
586 .driver_info = (unsigned long) &net1080_info,
587},
588 { }, // END
589};
590MODULE_DEVICE_TABLE(usb, products);
591
592static struct usb_driver net1080_driver = {
593 .name = "net1080",
594 .id_table = products,
595 .probe = usbnet_probe,
596 .disconnect = usbnet_disconnect,
597 .suspend = usbnet_suspend,
598 .resume = usbnet_resume,
599};
600
601static int __init net1080_init(void)
602{
603 return usb_register(&net1080_driver);
604}
605module_init(net1080_init);
606
607static void __exit net1080_exit(void)
608{
609 usb_deregister(&net1080_driver);
610}
611module_exit(net1080_exit);
612
613MODULE_AUTHOR("David Brownell");
614MODULE_DESCRIPTION("NetChip 1080 based USB Host-to-Host Links");
615MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
new file mode 100644
index 000000000000..a05fd97e5bc2
--- /dev/null
+++ b/drivers/net/usb/pegasus.c
@@ -0,0 +1,1504 @@
1/*
2 * Copyright (c) 1999-2005 Petko Manolov (petkan@users.sourceforge.net)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * ChangeLog:
9 * .... Most of the time spent on reading sources & docs.
10 * v0.2.x First official release for the Linux kernel.
11 * v0.3.0 Beutified and structured, some bugs fixed.
12 * v0.3.x URBifying bulk requests and bugfixing. First relatively
13 * stable release. Still can touch device's registers only
14 * from top-halves.
15 * v0.4.0 Control messages remained unurbified are now URBs.
16 * Now we can touch the HW at any time.
17 * v0.4.9 Control urbs again use process context to wait. Argh...
18 * Some long standing bugs (enable_net_traffic) fixed.
19 * Also nasty trick about resubmiting control urb from
20 * interrupt context used. Please let me know how it
21 * behaves. Pegasus II support added since this version.
22 * TODO: suppressing HCD warnings spewage on disconnect.
23 * v0.4.13 Ethernet address is now set at probe(), not at open()
24 * time as this seems to break dhcpd.
25 * v0.5.0 branch to 2.5.x kernels
26 * v0.5.1 ethtool support added
27 * v0.5.5 rx socket buffers are in a pool and the their allocation
28 * is out of the interrupt routine.
29 */
30
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
39#include <linux/usb.h>
40#include <linux/module.h>
41#include <asm/byteorder.h>
42#include <asm/uaccess.h>
43#include "pegasus.h"
44
45/*
46 * Version Information
47 */
48#define DRIVER_VERSION "v0.6.14 (2006/09/27)"
49#define DRIVER_AUTHOR "Petko Manolov <petkan@users.sourceforge.net>"
50#define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver"
51
52static const char driver_name[] = "pegasus";
53
54#undef PEGASUS_WRITE_EEPROM
55#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
56 BMSR_100FULL | BMSR_ANEGCAPABLE)
57
58static int loopback = 0;
59static int mii_mode = 0;
60static char *devid=NULL;
61
62static struct usb_eth_dev usb_dev_id[] = {
63#define PEGASUS_DEV(pn, vid, pid, flags) \
64 {.name = pn, .vendor = vid, .device = pid, .private = flags},
65#include "pegasus.h"
66#undef PEGASUS_DEV
67 {NULL, 0, 0, 0},
68 {NULL, 0, 0, 0}
69};
70
71static struct usb_device_id pegasus_ids[] = {
72#define PEGASUS_DEV(pn, vid, pid, flags) \
73 {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid},
74#include "pegasus.h"
75#undef PEGASUS_DEV
76 {},
77 {}
78};
79
80MODULE_AUTHOR(DRIVER_AUTHOR);
81MODULE_DESCRIPTION(DRIVER_DESC);
82MODULE_LICENSE("GPL");
83module_param(loopback, bool, 0);
84module_param(mii_mode, bool, 0);
85module_param(devid, charp, 0);
86MODULE_PARM_DESC(loopback, "Enable MAC loopback mode (bit 0)");
87MODULE_PARM_DESC(mii_mode, "Enable HomePNA mode (bit 0),default=MII mode = 0");
88MODULE_PARM_DESC(devid, "The format is: 'DEV_name:VendorID:DeviceID:Flags'");
89
90/* use ethtool to change the level for any given device */
91static int msg_level = -1;
92module_param (msg_level, int, 0);
93MODULE_PARM_DESC (msg_level, "Override default message level");
94
95MODULE_DEVICE_TABLE(usb, pegasus_ids);
96
97static int update_eth_regs_async(pegasus_t *);
98/* Aargh!!! I _really_ hate such tweaks */
99static void ctrl_callback(struct urb *urb)
100{
101 pegasus_t *pegasus = urb->context;
102
103 if (!pegasus)
104 return;
105
106 switch (urb->status) {
107 case 0:
108 if (pegasus->flags & ETH_REGS_CHANGE) {
109 pegasus->flags &= ~ETH_REGS_CHANGE;
110 pegasus->flags |= ETH_REGS_CHANGED;
111 update_eth_regs_async(pegasus);
112 return;
113 }
114 break;
115 case -EINPROGRESS:
116 return;
117 case -ENOENT:
118 break;
119 default:
120 if (netif_msg_drv(pegasus))
121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n",
122 __FUNCTION__, urb->status);
123 }
124 pegasus->flags &= ~ETH_REGS_CHANGED;
125 wake_up(&pegasus->ctrl_wait);
126}
127
128static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
129 void *data)
130{
131 int ret;
132 char *buffer;
133 DECLARE_WAITQUEUE(wait, current);
134
135 buffer = kmalloc(size, GFP_KERNEL);
136 if (!buffer) {
137 if (netif_msg_drv(pegasus))
138 dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
139 __FUNCTION__);
140 return -ENOMEM;
141 }
142 add_wait_queue(&pegasus->ctrl_wait, &wait);
143 set_current_state(TASK_UNINTERRUPTIBLE);
144 while (pegasus->flags & ETH_REGS_CHANGED)
145 schedule();
146 remove_wait_queue(&pegasus->ctrl_wait, &wait);
147 set_current_state(TASK_RUNNING);
148
149 pegasus->dr.bRequestType = PEGASUS_REQT_READ;
150 pegasus->dr.bRequest = PEGASUS_REQ_GET_REGS;
151 pegasus->dr.wValue = cpu_to_le16(0);
152 pegasus->dr.wIndex = cpu_to_le16p(&indx);
153 pegasus->dr.wLength = cpu_to_le16p(&size);
154 pegasus->ctrl_urb->transfer_buffer_length = size;
155
156 usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
157 usb_rcvctrlpipe(pegasus->usb, 0),
158 (char *) &pegasus->dr,
159 buffer, size, ctrl_callback, pegasus);
160
161 add_wait_queue(&pegasus->ctrl_wait, &wait);
162 set_current_state(TASK_UNINTERRUPTIBLE);
163
164 /* using ATOMIC, we'd never wake up if we slept */
165 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
166 set_current_state(TASK_RUNNING);
167 if (ret == -ENODEV)
168 netif_device_detach(pegasus->net);
169 if (netif_msg_drv(pegasus))
170 dev_err(&pegasus->intf->dev, "%s, status %d\n",
171 __FUNCTION__, ret);
172 goto out;
173 }
174
175 schedule();
176out:
177 remove_wait_queue(&pegasus->ctrl_wait, &wait);
178 memcpy(data, buffer, size);
179 kfree(buffer);
180
181 return ret;
182}
183
184static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
185 void *data)
186{
187 int ret;
188 char *buffer;
189 DECLARE_WAITQUEUE(wait, current);
190
191 buffer = kmalloc(size, GFP_KERNEL);
192 if (!buffer) {
193 if (netif_msg_drv(pegasus))
194 dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
195 __FUNCTION__);
196 return -ENOMEM;
197 }
198 memcpy(buffer, data, size);
199
200 add_wait_queue(&pegasus->ctrl_wait, &wait);
201 set_current_state(TASK_UNINTERRUPTIBLE);
202 while (pegasus->flags & ETH_REGS_CHANGED)
203 schedule();
204 remove_wait_queue(&pegasus->ctrl_wait, &wait);
205 set_current_state(TASK_RUNNING);
206
207 pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
208 pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
209 pegasus->dr.wValue = cpu_to_le16(0);
210 pegasus->dr.wIndex = cpu_to_le16p(&indx);
211 pegasus->dr.wLength = cpu_to_le16p(&size);
212 pegasus->ctrl_urb->transfer_buffer_length = size;
213
214 usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
215 usb_sndctrlpipe(pegasus->usb, 0),
216 (char *) &pegasus->dr,
217 buffer, size, ctrl_callback, pegasus);
218
219 add_wait_queue(&pegasus->ctrl_wait, &wait);
220 set_current_state(TASK_UNINTERRUPTIBLE);
221
222 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
223 if (ret == -ENODEV)
224 netif_device_detach(pegasus->net);
225 if (netif_msg_drv(pegasus))
226 dev_err(&pegasus->intf->dev, "%s, status %d\n",
227 __FUNCTION__, ret);
228 goto out;
229 }
230
231 schedule();
232out:
233 remove_wait_queue(&pegasus->ctrl_wait, &wait);
234 kfree(buffer);
235
236 return ret;
237}
238
239static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
240{
241 int ret;
242 char *tmp;
243 DECLARE_WAITQUEUE(wait, current);
244
245 tmp = kmalloc(1, GFP_KERNEL);
246 if (!tmp) {
247 if (netif_msg_drv(pegasus))
248 dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
249 __FUNCTION__);
250 return -ENOMEM;
251 }
252 memcpy(tmp, &data, 1);
253 add_wait_queue(&pegasus->ctrl_wait, &wait);
254 set_current_state(TASK_UNINTERRUPTIBLE);
255 while (pegasus->flags & ETH_REGS_CHANGED)
256 schedule();
257 remove_wait_queue(&pegasus->ctrl_wait, &wait);
258 set_current_state(TASK_RUNNING);
259
260 pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
261 pegasus->dr.bRequest = PEGASUS_REQ_SET_REG;
262 pegasus->dr.wValue = cpu_to_le16(data);
263 pegasus->dr.wIndex = cpu_to_le16p(&indx);
264 pegasus->dr.wLength = cpu_to_le16(1);
265 pegasus->ctrl_urb->transfer_buffer_length = 1;
266
267 usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
268 usb_sndctrlpipe(pegasus->usb, 0),
269 (char *) &pegasus->dr,
270 tmp, 1, ctrl_callback, pegasus);
271
272 add_wait_queue(&pegasus->ctrl_wait, &wait);
273 set_current_state(TASK_UNINTERRUPTIBLE);
274
275 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
276 if (ret == -ENODEV)
277 netif_device_detach(pegasus->net);
278 if (netif_msg_drv(pegasus))
279 dev_err(&pegasus->intf->dev, "%s, status %d\n",
280 __FUNCTION__, ret);
281 goto out;
282 }
283
284 schedule();
285out:
286 remove_wait_queue(&pegasus->ctrl_wait, &wait);
287 kfree(tmp);
288
289 return ret;
290}
291
292static int update_eth_regs_async(pegasus_t * pegasus)
293{
294 int ret;
295
296 pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
297 pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
298 pegasus->dr.wValue = 0;
299 pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
300 pegasus->dr.wLength = cpu_to_le16(3);
301 pegasus->ctrl_urb->transfer_buffer_length = 3;
302
303 usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
304 usb_sndctrlpipe(pegasus->usb, 0),
305 (char *) &pegasus->dr,
306 pegasus->eth_regs, 3, ctrl_callback, pegasus);
307
308 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
309 if (ret == -ENODEV)
310 netif_device_detach(pegasus->net);
311 if (netif_msg_drv(pegasus))
312 dev_err(&pegasus->intf->dev, "%s, status %d\n",
313 __FUNCTION__, ret);
314 }
315
316 return ret;
317}
318
319/* Returns 0 on success, error on failure */
320static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
321{
322 int i;
323 __u8 data[4] = { phy, 0, 0, indx };
324 __le16 regdi;
325 int ret;
326
327 set_register(pegasus, PhyCtrl, 0);
328 set_registers(pegasus, PhyAddr, sizeof (data), data);
329 set_register(pegasus, PhyCtrl, (indx | PHY_READ));
330 for (i = 0; i < REG_TIMEOUT; i++) {
331 ret = get_registers(pegasus, PhyCtrl, 1, data);
332 if (ret == -ESHUTDOWN)
333 goto fail;
334 if (data[0] & PHY_DONE)
335 break;
336 }
337 if (i < REG_TIMEOUT) {
338 ret = get_registers(pegasus, PhyData, 2, &regdi);
339 *regd = le16_to_cpu(regdi);
340 return ret;
341 }
342fail:
343 if (netif_msg_drv(pegasus))
344 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__);
345
346 return ret;
347}
348
349static int mdio_read(struct net_device *dev, int phy_id, int loc)
350{
351 pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
352 u16 res;
353
354 read_mii_word(pegasus, phy_id, loc, &res);
355 return (int)res;
356}
357
358static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd)
359{
360 int i;
361 __u8 data[4] = { phy, 0, 0, indx };
362 int ret;
363
364 data[1] = (u8) regd;
365 data[2] = (u8) (regd >> 8);
366 set_register(pegasus, PhyCtrl, 0);
367 set_registers(pegasus, PhyAddr, sizeof(data), data);
368 set_register(pegasus, PhyCtrl, (indx | PHY_WRITE));
369 for (i = 0; i < REG_TIMEOUT; i++) {
370 ret = get_registers(pegasus, PhyCtrl, 1, data);
371 if (ret == -ESHUTDOWN)
372 goto fail;
373 if (data[0] & PHY_DONE)
374 break;
375 }
376 if (i < REG_TIMEOUT)
377 return ret;
378
379fail:
380 if (netif_msg_drv(pegasus))
381 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__);
382 return -ETIMEDOUT;
383}
384
385static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
386{
387 pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
388
389 write_mii_word(pegasus, phy_id, loc, val);
390}
391
392static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
393{
394 int i;
395 __u8 tmp;
396 __le16 retdatai;
397 int ret;
398
399 set_register(pegasus, EpromCtrl, 0);
400 set_register(pegasus, EpromOffset, index);
401 set_register(pegasus, EpromCtrl, EPROM_READ);
402
403 for (i = 0; i < REG_TIMEOUT; i++) {
404 ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
405 if (tmp & EPROM_DONE)
406 break;
407 if (ret == -ESHUTDOWN)
408 goto fail;
409 }
410 if (i < REG_TIMEOUT) {
411 ret = get_registers(pegasus, EpromData, 2, &retdatai);
412 *retdata = le16_to_cpu(retdatai);
413 return ret;
414 }
415
416fail:
417 if (netif_msg_drv(pegasus))
418 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__);
419 return -ETIMEDOUT;
420}
421
422#ifdef PEGASUS_WRITE_EEPROM
423static inline void enable_eprom_write(pegasus_t * pegasus)
424{
425 __u8 tmp;
426 int ret;
427
428 get_registers(pegasus, EthCtrl2, 1, &tmp);
429 set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE);
430}
431
432static inline void disable_eprom_write(pegasus_t * pegasus)
433{
434 __u8 tmp;
435 int ret;
436
437 get_registers(pegasus, EthCtrl2, 1, &tmp);
438 set_register(pegasus, EpromCtrl, 0);
439 set_register(pegasus, EthCtrl2, tmp & ~EPROM_WR_ENABLE);
440}
441
442static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
443{
444 int i;
445 __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
446 int ret;
447
448 set_registers(pegasus, EpromOffset, 4, d);
449 enable_eprom_write(pegasus);
450 set_register(pegasus, EpromOffset, index);
451 set_registers(pegasus, EpromData, 2, &data);
452 set_register(pegasus, EpromCtrl, EPROM_WRITE);
453
454 for (i = 0; i < REG_TIMEOUT; i++) {
455 ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
456 if (ret == -ESHUTDOWN)
457 goto fail;
458 if (tmp & EPROM_DONE)
459 break;
460 }
461 disable_eprom_write(pegasus);
462 if (i < REG_TIMEOUT)
463 return ret;
464fail:
465 if (netif_msg_drv(pegasus))
466 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__);
467 return -ETIMEDOUT;
468}
469#endif /* PEGASUS_WRITE_EEPROM */
470
471static inline void get_node_id(pegasus_t * pegasus, __u8 * id)
472{
473 int i;
474 __u16 w16;
475
476 for (i = 0; i < 3; i++) {
477 read_eprom_word(pegasus, i, &w16);
478 ((__le16 *) id)[i] = cpu_to_le16p(&w16);
479 }
480}
481
482static void set_ethernet_addr(pegasus_t * pegasus)
483{
484 __u8 node_id[6];
485
486 if (pegasus->features & PEGASUS_II) {
487 get_registers(pegasus, 0x10, sizeof(node_id), node_id);
488 } else {
489 get_node_id(pegasus, node_id);
490 set_registers(pegasus, EthID, sizeof (node_id), node_id);
491 }
492 memcpy(pegasus->net->dev_addr, node_id, sizeof (node_id));
493}
494
495static inline int reset_mac(pegasus_t * pegasus)
496{
497 __u8 data = 0x8;
498 int i;
499
500 set_register(pegasus, EthCtrl1, data);
501 for (i = 0; i < REG_TIMEOUT; i++) {
502 get_registers(pegasus, EthCtrl1, 1, &data);
503 if (~data & 0x08) {
504 if (loopback & 1)
505 break;
506 if (mii_mode && (pegasus->features & HAS_HOME_PNA))
507 set_register(pegasus, Gpio1, 0x34);
508 else
509 set_register(pegasus, Gpio1, 0x26);
510 set_register(pegasus, Gpio0, pegasus->features);
511 set_register(pegasus, Gpio0, DEFAULT_GPIO_SET);
512 break;
513 }
514 }
515 if (i == REG_TIMEOUT)
516 return -ETIMEDOUT;
517
518 if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS ||
519 usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
520 set_register(pegasus, Gpio0, 0x24);
521 set_register(pegasus, Gpio0, 0x26);
522 }
523 if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) {
524 __u16 auxmode;
525 read_mii_word(pegasus, 3, 0x1b, &auxmode);
526 write_mii_word(pegasus, 3, 0x1b, auxmode | 4);
527 }
528
529 return 0;
530}
531
532static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
533{
534 __u16 linkpart;
535 __u8 data[4];
536 pegasus_t *pegasus = netdev_priv(dev);
537 int ret;
538
539 read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
540 data[0] = 0xc9;
541 data[1] = 0;
542 if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
543 data[1] |= 0x20; /* set full duplex */
544 if (linkpart & (ADVERTISE_100FULL | ADVERTISE_100HALF))
545 data[1] |= 0x10; /* set 100 Mbps */
546 if (mii_mode)
547 data[1] = 0;
548 data[2] = (loopback & 1) ? 0x09 : 0x01;
549
550 memcpy(pegasus->eth_regs, data, sizeof (data));
551 ret = set_registers(pegasus, EthCtrl0, 3, data);
552
553 if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS ||
554 usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 ||
555 usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
556 u16 auxmode;
557 read_mii_word(pegasus, 0, 0x1b, &auxmode);
558 write_mii_word(pegasus, 0, 0x1b, auxmode | 4);
559 }
560
561 return ret;
562}
563
564static void fill_skb_pool(pegasus_t * pegasus)
565{
566 int i;
567
568 for (i = 0; i < RX_SKBS; i++) {
569 if (pegasus->rx_pool[i])
570 continue;
571 pegasus->rx_pool[i] = dev_alloc_skb(PEGASUS_MTU + 2);
572 /*
573 ** we give up if the allocation fail. the tasklet will be
574 ** rescheduled again anyway...
575 */
576 if (pegasus->rx_pool[i] == NULL)
577 return;
578 skb_reserve(pegasus->rx_pool[i], 2);
579 }
580}
581
582static void free_skb_pool(pegasus_t * pegasus)
583{
584 int i;
585
586 for (i = 0; i < RX_SKBS; i++) {
587 if (pegasus->rx_pool[i]) {
588 dev_kfree_skb(pegasus->rx_pool[i]);
589 pegasus->rx_pool[i] = NULL;
590 }
591 }
592}
593
594static inline struct sk_buff *pull_skb(pegasus_t * pegasus)
595{
596 int i;
597 struct sk_buff *skb;
598
599 for (i = 0; i < RX_SKBS; i++) {
600 if (likely(pegasus->rx_pool[i] != NULL)) {
601 skb = pegasus->rx_pool[i];
602 pegasus->rx_pool[i] = NULL;
603 return skb;
604 }
605 }
606 return NULL;
607}
608
609static void read_bulk_callback(struct urb *urb)
610{
611 pegasus_t *pegasus = urb->context;
612 struct net_device *net;
613 int rx_status, count = urb->actual_length;
614 u8 *buf = urb->transfer_buffer;
615 __u16 pkt_len;
616
617 if (!pegasus)
618 return;
619
620 net = pegasus->net;
621 if (!netif_device_present(net) || !netif_running(net))
622 return;
623
624 switch (urb->status) {
625 case 0:
626 break;
627 case -ETIME:
628 if (netif_msg_rx_err(pegasus))
629 pr_debug("%s: reset MAC\n", net->name);
630 pegasus->flags &= ~PEGASUS_RX_BUSY;
631 break;
632 case -EPIPE: /* stall, or disconnect from TT */
633 /* FIXME schedule work to clear the halt */
634 if (netif_msg_rx_err(pegasus))
635 printk(KERN_WARNING "%s: no rx stall recovery\n",
636 net->name);
637 return;
638 case -ENOENT:
639 case -ECONNRESET:
640 case -ESHUTDOWN:
641 if (netif_msg_ifdown(pegasus))
642 pr_debug("%s: rx unlink, %d\n", net->name, urb->status);
643 return;
644 default:
645 if (netif_msg_rx_err(pegasus))
646 pr_debug("%s: RX status %d\n", net->name, urb->status);
647 goto goon;
648 }
649
650 if (!count || count < 4)
651 goto goon;
652
653 rx_status = buf[count - 2];
654 if (rx_status & 0x1e) {
655 if (netif_msg_rx_err(pegasus))
656 pr_debug("%s: RX packet error %x\n",
657 net->name, rx_status);
658 pegasus->stats.rx_errors++;
659 if (rx_status & 0x06) // long or runt
660 pegasus->stats.rx_length_errors++;
661 if (rx_status & 0x08)
662 pegasus->stats.rx_crc_errors++;
663 if (rx_status & 0x10) // extra bits
664 pegasus->stats.rx_frame_errors++;
665 goto goon;
666 }
667 if (pegasus->chip == 0x8513) {
668 pkt_len = le32_to_cpu(*(__le32 *)urb->transfer_buffer);
669 pkt_len &= 0x0fff;
670 pegasus->rx_skb->data += 2;
671 } else {
672 pkt_len = buf[count - 3] << 8;
673 pkt_len += buf[count - 4];
674 pkt_len &= 0xfff;
675 pkt_len -= 8;
676 }
677
678 /*
679 * If the packet is unreasonably long, quietly drop it rather than
680 * kernel panicing by calling skb_put.
681 */
682 if (pkt_len > PEGASUS_MTU)
683 goto goon;
684
685 /*
686 * at this point we are sure pegasus->rx_skb != NULL
687 * so we go ahead and pass up the packet.
688 */
689 skb_put(pegasus->rx_skb, pkt_len);
690 pegasus->rx_skb->protocol = eth_type_trans(pegasus->rx_skb, net);
691 netif_rx(pegasus->rx_skb);
692 pegasus->stats.rx_packets++;
693 pegasus->stats.rx_bytes += pkt_len;
694
695 if (pegasus->flags & PEGASUS_UNPLUG)
696 return;
697
698 spin_lock(&pegasus->rx_pool_lock);
699 pegasus->rx_skb = pull_skb(pegasus);
700 spin_unlock(&pegasus->rx_pool_lock);
701
702 if (pegasus->rx_skb == NULL)
703 goto tl_sched;
704goon:
705 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
706 usb_rcvbulkpipe(pegasus->usb, 1),
707 pegasus->rx_skb->data, PEGASUS_MTU + 8,
708 read_bulk_callback, pegasus);
709 rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
710 if (rx_status == -ENODEV)
711 netif_device_detach(pegasus->net);
712 else if (rx_status) {
713 pegasus->flags |= PEGASUS_RX_URB_FAIL;
714 goto tl_sched;
715 } else {
716 pegasus->flags &= ~PEGASUS_RX_URB_FAIL;
717 }
718
719 return;
720
721tl_sched:
722 tasklet_schedule(&pegasus->rx_tl);
723}
724
725static void rx_fixup(unsigned long data)
726{
727 pegasus_t *pegasus;
728 unsigned long flags;
729 int status;
730
731 pegasus = (pegasus_t *) data;
732 if (pegasus->flags & PEGASUS_UNPLUG)
733 return;
734
735 spin_lock_irqsave(&pegasus->rx_pool_lock, flags);
736 fill_skb_pool(pegasus);
737 if (pegasus->flags & PEGASUS_RX_URB_FAIL)
738 if (pegasus->rx_skb)
739 goto try_again;
740 if (pegasus->rx_skb == NULL) {
741 pegasus->rx_skb = pull_skb(pegasus);
742 }
743 if (pegasus->rx_skb == NULL) {
744 if (netif_msg_rx_err(pegasus))
745 printk(KERN_WARNING "%s: low on memory\n",
746 pegasus->net->name);
747 tasklet_schedule(&pegasus->rx_tl);
748 goto done;
749 }
750 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
751 usb_rcvbulkpipe(pegasus->usb, 1),
752 pegasus->rx_skb->data, PEGASUS_MTU + 8,
753 read_bulk_callback, pegasus);
754try_again:
755 status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
756 if (status == -ENODEV)
757 netif_device_detach(pegasus->net);
758 else if (status) {
759 pegasus->flags |= PEGASUS_RX_URB_FAIL;
760 tasklet_schedule(&pegasus->rx_tl);
761 } else {
762 pegasus->flags &= ~PEGASUS_RX_URB_FAIL;
763 }
764done:
765 spin_unlock_irqrestore(&pegasus->rx_pool_lock, flags);
766}
767
768static void write_bulk_callback(struct urb *urb)
769{
770 pegasus_t *pegasus = urb->context;
771 struct net_device *net = pegasus->net;
772
773 if (!pegasus)
774 return;
775
776 if (!netif_device_present(net) || !netif_running(net))
777 return;
778
779 switch (urb->status) {
780 case -EPIPE:
781 /* FIXME schedule_work() to clear the tx halt */
782 netif_stop_queue(net);
783 if (netif_msg_tx_err(pegasus))
784 printk(KERN_WARNING "%s: no tx stall recovery\n",
785 net->name);
786 return;
787 case -ENOENT:
788 case -ECONNRESET:
789 case -ESHUTDOWN:
790 if (netif_msg_ifdown(pegasus))
791 pr_debug("%s: tx unlink, %d\n", net->name, urb->status);
792 return;
793 default:
794 if (netif_msg_tx_err(pegasus))
795 pr_info("%s: TX status %d\n", net->name, urb->status);
796 /* FALL THROUGH */
797 case 0:
798 break;
799 }
800
801 net->trans_start = jiffies;
802 netif_wake_queue(net);
803}
804
805static void intr_callback(struct urb *urb)
806{
807 pegasus_t *pegasus = urb->context;
808 struct net_device *net;
809 int status;
810
811 if (!pegasus)
812 return;
813 net = pegasus->net;
814
815 switch (urb->status) {
816 case 0:
817 break;
818 case -ECONNRESET: /* unlink */
819 case -ENOENT:
820 case -ESHUTDOWN:
821 return;
822 default:
823 /* some Pegasus-I products report LOTS of data
824 * toggle errors... avoid log spamming
825 */
826 if (netif_msg_timer(pegasus))
827 pr_debug("%s: intr status %d\n", net->name,
828 urb->status);
829 }
830
831 if (urb->actual_length >= 6) {
832 u8 * d = urb->transfer_buffer;
833
834 /* byte 0 == tx_status1, reg 2B */
835 if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL
836 |LATE_COL|JABBER_TIMEOUT)) {
837 pegasus->stats.tx_errors++;
838 if (d[0] & TX_UNDERRUN)
839 pegasus->stats.tx_fifo_errors++;
840 if (d[0] & (EXCESSIVE_COL | JABBER_TIMEOUT))
841 pegasus->stats.tx_aborted_errors++;
842 if (d[0] & LATE_COL)
843 pegasus->stats.tx_window_errors++;
844 }
845
846 /* d[5].LINK_STATUS lies on some adapters.
847 * d[0].NO_CARRIER kicks in only with failed TX.
848 * ... so monitoring with MII may be safest.
849 */
850
851 /* bytes 3-4 == rx_lostpkt, reg 2E/2F */
852 pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
853 }
854
855 status = usb_submit_urb(urb, GFP_ATOMIC);
856 if (status == -ENODEV)
857 netif_device_detach(pegasus->net);
858 if (status && netif_msg_timer(pegasus))
859 printk(KERN_ERR "%s: can't resubmit interrupt urb, %d\n",
860 net->name, status);
861}
862
863static void pegasus_tx_timeout(struct net_device *net)
864{
865 pegasus_t *pegasus = netdev_priv(net);
866 if (netif_msg_timer(pegasus))
867 printk(KERN_WARNING "%s: tx timeout\n", net->name);
868 usb_unlink_urb(pegasus->tx_urb);
869 pegasus->stats.tx_errors++;
870}
871
872static int pegasus_start_xmit(struct sk_buff *skb, struct net_device *net)
873{
874 pegasus_t *pegasus = netdev_priv(net);
875 int count = ((skb->len + 2) & 0x3f) ? skb->len + 2 : skb->len + 3;
876 int res;
877 __u16 l16 = skb->len;
878
879 netif_stop_queue(net);
880
881 ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16);
882 skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len);
883 usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb,
884 usb_sndbulkpipe(pegasus->usb, 2),
885 pegasus->tx_buff, count,
886 write_bulk_callback, pegasus);
887 if ((res = usb_submit_urb(pegasus->tx_urb, GFP_ATOMIC))) {
888 if (netif_msg_tx_err(pegasus))
889 printk(KERN_WARNING "%s: fail tx, %d\n",
890 net->name, res);
891 switch (res) {
892 case -EPIPE: /* stall, or disconnect from TT */
893 /* cleanup should already have been scheduled */
894 break;
895 case -ENODEV: /* disconnect() upcoming */
896 netif_device_detach(pegasus->net);
897 break;
898 default:
899 pegasus->stats.tx_errors++;
900 netif_start_queue(net);
901 }
902 } else {
903 pegasus->stats.tx_packets++;
904 pegasus->stats.tx_bytes += skb->len;
905 net->trans_start = jiffies;
906 }
907 dev_kfree_skb(skb);
908
909 return 0;
910}
911
912static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
913{
914 return &((pegasus_t *) netdev_priv(dev))->stats;
915}
916
917static inline void disable_net_traffic(pegasus_t * pegasus)
918{
919 int tmp = 0;
920
921 set_registers(pegasus, EthCtrl0, 2, &tmp);
922}
923
924static inline void get_interrupt_interval(pegasus_t * pegasus)
925{
926 __u8 data[2];
927
928 read_eprom_word(pegasus, 4, (__u16 *) data);
929 if (pegasus->usb->speed != USB_SPEED_HIGH) {
930 if (data[1] < 0x80) {
931 if (netif_msg_timer(pegasus))
932 dev_info(&pegasus->intf->dev, "intr interval "
933 "changed from %ums to %ums\n",
934 data[1], 0x80);
935 data[1] = 0x80;
936#ifdef PEGASUS_WRITE_EEPROM
937 write_eprom_word(pegasus, 4, *(__u16 *) data);
938#endif
939 }
940 }
941 pegasus->intr_interval = data[1];
942}
943
944static void set_carrier(struct net_device *net)
945{
946 pegasus_t *pegasus = netdev_priv(net);
947 u16 tmp;
948
949 if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
950 return;
951
952 if (tmp & BMSR_LSTATUS)
953 netif_carrier_on(net);
954 else
955 netif_carrier_off(net);
956}
957
958static void free_all_urbs(pegasus_t * pegasus)
959{
960 usb_free_urb(pegasus->intr_urb);
961 usb_free_urb(pegasus->tx_urb);
962 usb_free_urb(pegasus->rx_urb);
963 usb_free_urb(pegasus->ctrl_urb);
964}
965
966static void unlink_all_urbs(pegasus_t * pegasus)
967{
968 usb_kill_urb(pegasus->intr_urb);
969 usb_kill_urb(pegasus->tx_urb);
970 usb_kill_urb(pegasus->rx_urb);
971 usb_kill_urb(pegasus->ctrl_urb);
972}
973
974static int alloc_urbs(pegasus_t * pegasus)
975{
976 pegasus->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
977 if (!pegasus->ctrl_urb) {
978 return 0;
979 }
980 pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
981 if (!pegasus->rx_urb) {
982 usb_free_urb(pegasus->ctrl_urb);
983 return 0;
984 }
985 pegasus->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
986 if (!pegasus->tx_urb) {
987 usb_free_urb(pegasus->rx_urb);
988 usb_free_urb(pegasus->ctrl_urb);
989 return 0;
990 }
991 pegasus->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
992 if (!pegasus->intr_urb) {
993 usb_free_urb(pegasus->tx_urb);
994 usb_free_urb(pegasus->rx_urb);
995 usb_free_urb(pegasus->ctrl_urb);
996 return 0;
997 }
998
999 return 1;
1000}
1001
1002static int pegasus_open(struct net_device *net)
1003{
1004 pegasus_t *pegasus = netdev_priv(net);
1005 int res;
1006
1007 if (pegasus->rx_skb == NULL)
1008 pegasus->rx_skb = pull_skb(pegasus);
1009 /*
1010 ** Note: no point to free the pool. it is empty :-)
1011 */
1012 if (!pegasus->rx_skb)
1013 return -ENOMEM;
1014
1015 res = set_registers(pegasus, EthID, 6, net->dev_addr);
1016
1017 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
1018 usb_rcvbulkpipe(pegasus->usb, 1),
1019 pegasus->rx_skb->data, PEGASUS_MTU + 8,
1020 read_bulk_callback, pegasus);
1021 if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
1022 if (res == -ENODEV)
1023 netif_device_detach(pegasus->net);
1024 if (netif_msg_ifup(pegasus))
1025 pr_debug("%s: failed rx_urb, %d", net->name, res);
1026 goto exit;
1027 }
1028
1029 usb_fill_int_urb(pegasus->intr_urb, pegasus->usb,
1030 usb_rcvintpipe(pegasus->usb, 3),
1031 pegasus->intr_buff, sizeof (pegasus->intr_buff),
1032 intr_callback, pegasus, pegasus->intr_interval);
1033 if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) {
1034 if (res == -ENODEV)
1035 netif_device_detach(pegasus->net);
1036 if (netif_msg_ifup(pegasus))
1037 pr_debug("%s: failed intr_urb, %d\n", net->name, res);
1038 usb_kill_urb(pegasus->rx_urb);
1039 goto exit;
1040 }
1041 if ((res = enable_net_traffic(net, pegasus->usb))) {
1042 if (netif_msg_ifup(pegasus))
1043 pr_debug("%s: can't enable_net_traffic() - %d\n",
1044 net->name, res);
1045 res = -EIO;
1046 usb_kill_urb(pegasus->rx_urb);
1047 usb_kill_urb(pegasus->intr_urb);
1048 free_skb_pool(pegasus);
1049 goto exit;
1050 }
1051 set_carrier(net);
1052 netif_start_queue(net);
1053 if (netif_msg_ifup(pegasus))
1054 pr_debug("%s: open\n", net->name);
1055 res = 0;
1056exit:
1057 return res;
1058}
1059
1060static int pegasus_close(struct net_device *net)
1061{
1062 pegasus_t *pegasus = netdev_priv(net);
1063
1064 netif_stop_queue(net);
1065 if (!(pegasus->flags & PEGASUS_UNPLUG))
1066 disable_net_traffic(pegasus);
1067 tasklet_kill(&pegasus->rx_tl);
1068 unlink_all_urbs(pegasus);
1069
1070 return 0;
1071}
1072
1073static void pegasus_get_drvinfo(struct net_device *dev,
1074 struct ethtool_drvinfo *info)
1075{
1076 pegasus_t *pegasus = netdev_priv(dev);
1077 strncpy(info->driver, driver_name, sizeof (info->driver) - 1);
1078 strncpy(info->version, DRIVER_VERSION, sizeof (info->version) - 1);
1079 usb_make_path(pegasus->usb, info->bus_info, sizeof (info->bus_info));
1080}
1081
1082/* also handles three patterns of some kind in hardware */
1083#define WOL_SUPPORTED (WAKE_MAGIC|WAKE_PHY)
1084
1085static void
1086pegasus_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1087{
1088 pegasus_t *pegasus = netdev_priv(dev);
1089
1090 wol->supported = WAKE_MAGIC | WAKE_PHY;
1091 wol->wolopts = pegasus->wolopts;
1092}
1093
1094static int
1095pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1096{
1097 pegasus_t *pegasus = netdev_priv(dev);
1098 u8 reg78 = 0x04;
1099
1100 if (wol->wolopts & ~WOL_SUPPORTED)
1101 return -EINVAL;
1102
1103 if (wol->wolopts & WAKE_MAGIC)
1104 reg78 |= 0x80;
1105 if (wol->wolopts & WAKE_PHY)
1106 reg78 |= 0x40;
1107 /* FIXME this 0x10 bit still needs to get set in the chip... */
1108 if (wol->wolopts)
1109 pegasus->eth_regs[0] |= 0x10;
1110 else
1111 pegasus->eth_regs[0] &= ~0x10;
1112 pegasus->wolopts = wol->wolopts;
1113 return set_register(pegasus, WakeupControl, reg78);
1114}
1115
1116static inline void pegasus_reset_wol(struct net_device *dev)
1117{
1118 struct ethtool_wolinfo wol;
1119
1120 memset(&wol, 0, sizeof wol);
1121 (void) pegasus_set_wol(dev, &wol);
1122}
1123
1124static int
1125pegasus_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1126{
1127 pegasus_t *pegasus;
1128
1129 if (in_atomic())
1130 return 0;
1131
1132 pegasus = netdev_priv(dev);
1133 mii_ethtool_gset(&pegasus->mii, ecmd);
1134
1135 return 0;
1136}
1137
1138static int
1139pegasus_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1140{
1141 pegasus_t *pegasus = netdev_priv(dev);
1142 return mii_ethtool_sset(&pegasus->mii, ecmd);
1143}
1144
1145static int pegasus_nway_reset(struct net_device *dev)
1146{
1147 pegasus_t *pegasus = netdev_priv(dev);
1148 return mii_nway_restart(&pegasus->mii);
1149}
1150
1151static u32 pegasus_get_link(struct net_device *dev)
1152{
1153 pegasus_t *pegasus = netdev_priv(dev);
1154 return mii_link_ok(&pegasus->mii);
1155}
1156
1157static u32 pegasus_get_msglevel(struct net_device *dev)
1158{
1159 pegasus_t *pegasus = netdev_priv(dev);
1160 return pegasus->msg_enable;
1161}
1162
1163static void pegasus_set_msglevel(struct net_device *dev, u32 v)
1164{
1165 pegasus_t *pegasus = netdev_priv(dev);
1166 pegasus->msg_enable = v;
1167}
1168
1169static struct ethtool_ops ops = {
1170 .get_drvinfo = pegasus_get_drvinfo,
1171 .get_settings = pegasus_get_settings,
1172 .set_settings = pegasus_set_settings,
1173 .nway_reset = pegasus_nway_reset,
1174 .get_link = pegasus_get_link,
1175 .get_msglevel = pegasus_get_msglevel,
1176 .set_msglevel = pegasus_set_msglevel,
1177 .get_wol = pegasus_get_wol,
1178 .set_wol = pegasus_set_wol,
1179};
1180
1181static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
1182{
1183 __u16 *data = (__u16 *) & rq->ifr_ifru;
1184 pegasus_t *pegasus = netdev_priv(net);
1185 int res;
1186
1187 switch (cmd) {
1188 case SIOCDEVPRIVATE:
1189 data[0] = pegasus->phy;
1190 case SIOCDEVPRIVATE + 1:
1191 read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
1192 res = 0;
1193 break;
1194 case SIOCDEVPRIVATE + 2:
1195 if (!capable(CAP_NET_ADMIN))
1196 return -EPERM;
1197 write_mii_word(pegasus, pegasus->phy, data[1] & 0x1f, data[2]);
1198 res = 0;
1199 break;
1200 default:
1201 res = -EOPNOTSUPP;
1202 }
1203 return res;
1204}
1205
1206static void pegasus_set_multicast(struct net_device *net)
1207{
1208 pegasus_t *pegasus = netdev_priv(net);
1209
1210 if (net->flags & IFF_PROMISC) {
1211 pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
1212 if (netif_msg_link(pegasus))
1213 pr_info("%s: Promiscuous mode enabled.\n", net->name);
1214 } else if (net->mc_count ||
1215 (net->flags & IFF_ALLMULTI)) {
1216 pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
1217 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
1218 if (netif_msg_link(pegasus))
1219 pr_info("%s: set allmulti\n", net->name);
1220 } else {
1221 pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST;
1222 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
1223 }
1224
1225 pegasus->flags |= ETH_REGS_CHANGE;
1226 ctrl_callback(pegasus->ctrl_urb);
1227}
1228
1229static __u8 mii_phy_probe(pegasus_t * pegasus)
1230{
1231 int i;
1232 __u16 tmp;
1233
1234 for (i = 0; i < 32; i++) {
1235 read_mii_word(pegasus, i, MII_BMSR, &tmp);
1236 if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0)
1237 continue;
1238 else
1239 return i;
1240 }
1241
1242 return 0xff;
1243}
1244
1245static inline void setup_pegasus_II(pegasus_t * pegasus)
1246{
1247 __u8 data = 0xa5;
1248
1249 set_register(pegasus, Reg1d, 0);
1250 set_register(pegasus, Reg7b, 1);
1251 mdelay(100);
1252 if ((pegasus->features & HAS_HOME_PNA) && mii_mode)
1253 set_register(pegasus, Reg7b, 0);
1254 else
1255 set_register(pegasus, Reg7b, 2);
1256
1257 set_register(pegasus, 0x83, data);
1258 get_registers(pegasus, 0x83, 1, &data);
1259
1260 if (data == 0xa5) {
1261 pegasus->chip = 0x8513;
1262 } else {
1263 pegasus->chip = 0;
1264 }
1265
1266 set_register(pegasus, 0x80, 0xc0);
1267 set_register(pegasus, 0x83, 0xff);
1268 set_register(pegasus, 0x84, 0x01);
1269
1270 if (pegasus->features & HAS_HOME_PNA && mii_mode)
1271 set_register(pegasus, Reg81, 6);
1272 else
1273 set_register(pegasus, Reg81, 2);
1274}
1275
1276
1277static struct workqueue_struct *pegasus_workqueue = NULL;
1278#define CARRIER_CHECK_DELAY (2 * HZ)
1279
1280static void check_carrier(struct work_struct *work)
1281{
1282 pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
1283 set_carrier(pegasus->net);
1284 if (!(pegasus->flags & PEGASUS_UNPLUG)) {
1285 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
1286 CARRIER_CHECK_DELAY);
1287 }
1288}
1289
1290static int pegasus_probe(struct usb_interface *intf,
1291 const struct usb_device_id *id)
1292{
1293 struct usb_device *dev = interface_to_usbdev(intf);
1294 struct net_device *net;
1295 pegasus_t *pegasus;
1296 int dev_index = id - pegasus_ids;
1297 int res = -ENOMEM;
1298
1299 usb_get_dev(dev);
1300 net = alloc_etherdev(sizeof(struct pegasus));
1301 if (!net) {
1302 dev_err(&intf->dev, "can't allocate %s\n", "device");
1303 goto out;
1304 }
1305
1306 pegasus = netdev_priv(net);
1307 memset(pegasus, 0, sizeof (struct pegasus));
1308 pegasus->dev_index = dev_index;
1309 init_waitqueue_head(&pegasus->ctrl_wait);
1310
1311 if (!alloc_urbs(pegasus)) {
1312 dev_err(&intf->dev, "can't allocate %s\n", "urbs");
1313 goto out1;
1314 }
1315
1316 tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
1317
1318 INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
1319
1320 pegasus->intf = intf;
1321 pegasus->usb = dev;
1322 pegasus->net = net;
1323 SET_MODULE_OWNER(net);
1324 net->open = pegasus_open;
1325 net->stop = pegasus_close;
1326 net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
1327 net->tx_timeout = pegasus_tx_timeout;
1328 net->do_ioctl = pegasus_ioctl;
1329 net->hard_start_xmit = pegasus_start_xmit;
1330 net->set_multicast_list = pegasus_set_multicast;
1331 net->get_stats = pegasus_netdev_stats;
1332 SET_ETHTOOL_OPS(net, &ops);
1333 pegasus->mii.dev = net;
1334 pegasus->mii.mdio_read = mdio_read;
1335 pegasus->mii.mdio_write = mdio_write;
1336 pegasus->mii.phy_id_mask = 0x1f;
1337 pegasus->mii.reg_num_mask = 0x1f;
1338 spin_lock_init(&pegasus->rx_pool_lock);
1339 pegasus->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1340 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1341
1342 pegasus->features = usb_dev_id[dev_index].private;
1343 get_interrupt_interval(pegasus);
1344 if (reset_mac(pegasus)) {
1345 dev_err(&intf->dev, "can't reset MAC\n");
1346 res = -EIO;
1347 goto out2;
1348 }
1349 set_ethernet_addr(pegasus);
1350 fill_skb_pool(pegasus);
1351 if (pegasus->features & PEGASUS_II) {
1352 dev_info(&intf->dev, "setup Pegasus II specific registers\n");
1353 setup_pegasus_II(pegasus);
1354 }
1355 pegasus->phy = mii_phy_probe(pegasus);
1356 if (pegasus->phy == 0xff) {
1357 dev_warn(&intf->dev, "can't locate MII phy, using default\n");
1358 pegasus->phy = 1;
1359 }
1360 pegasus->mii.phy_id = pegasus->phy;
1361 usb_set_intfdata(intf, pegasus);
1362 SET_NETDEV_DEV(net, &intf->dev);
1363 pegasus_reset_wol(net);
1364 res = register_netdev(net);
1365 if (res)
1366 goto out3;
1367 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
1368 CARRIER_CHECK_DELAY);
1369
1370 dev_info(&intf->dev, "%s, %s, %02x:%02x:%02x:%02x:%02x:%02x\n",
1371 net->name,
1372 usb_dev_id[dev_index].name,
1373 net->dev_addr [0], net->dev_addr [1],
1374 net->dev_addr [2], net->dev_addr [3],
1375 net->dev_addr [4], net->dev_addr [5]);
1376 return 0;
1377
1378out3:
1379 usb_set_intfdata(intf, NULL);
1380 free_skb_pool(pegasus);
1381out2:
1382 free_all_urbs(pegasus);
1383out1:
1384 free_netdev(net);
1385out:
1386 usb_put_dev(dev);
1387 return res;
1388}
1389
1390static void pegasus_disconnect(struct usb_interface *intf)
1391{
1392 struct pegasus *pegasus = usb_get_intfdata(intf);
1393
1394 usb_set_intfdata(intf, NULL);
1395 if (!pegasus) {
1396 dev_dbg(&intf->dev, "unregistering non-bound device?\n");
1397 return;
1398 }
1399
1400 pegasus->flags |= PEGASUS_UNPLUG;
1401 cancel_delayed_work(&pegasus->carrier_check);
1402 unregister_netdev(pegasus->net);
1403 usb_put_dev(interface_to_usbdev(intf));
1404 unlink_all_urbs(pegasus);
1405 free_all_urbs(pegasus);
1406 free_skb_pool(pegasus);
1407 if (pegasus->rx_skb != NULL) {
1408 dev_kfree_skb(pegasus->rx_skb);
1409 pegasus->rx_skb = NULL;
1410 }
1411 free_netdev(pegasus->net);
1412}
1413
1414static int pegasus_suspend (struct usb_interface *intf, pm_message_t message)
1415{
1416 struct pegasus *pegasus = usb_get_intfdata(intf);
1417
1418 netif_device_detach (pegasus->net);
1419 cancel_delayed_work(&pegasus->carrier_check);
1420 if (netif_running(pegasus->net)) {
1421 usb_kill_urb(pegasus->rx_urb);
1422 usb_kill_urb(pegasus->intr_urb);
1423 }
1424 return 0;
1425}
1426
1427static int pegasus_resume (struct usb_interface *intf)
1428{
1429 struct pegasus *pegasus = usb_get_intfdata(intf);
1430
1431 netif_device_attach (pegasus->net);
1432 if (netif_running(pegasus->net)) {
1433 pegasus->rx_urb->status = 0;
1434 pegasus->rx_urb->actual_length = 0;
1435 read_bulk_callback(pegasus->rx_urb);
1436
1437 pegasus->intr_urb->status = 0;
1438 pegasus->intr_urb->actual_length = 0;
1439 intr_callback(pegasus->intr_urb);
1440 }
1441 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
1442 CARRIER_CHECK_DELAY);
1443 return 0;
1444}
1445
1446static struct usb_driver pegasus_driver = {
1447 .name = driver_name,
1448 .probe = pegasus_probe,
1449 .disconnect = pegasus_disconnect,
1450 .id_table = pegasus_ids,
1451 .suspend = pegasus_suspend,
1452 .resume = pegasus_resume,
1453};
1454
1455static void parse_id(char *id)
1456{
1457 unsigned int vendor_id=0, device_id=0, flags=0, i=0;
1458 char *token, *name=NULL;
1459
1460 if ((token = strsep(&id, ":")) != NULL)
1461 name = token;
1462 /* name now points to a null terminated string*/
1463 if ((token = strsep(&id, ":")) != NULL)
1464 vendor_id = simple_strtoul(token, NULL, 16);
1465 if ((token = strsep(&id, ":")) != NULL)
1466 device_id = simple_strtoul(token, NULL, 16);
1467 flags = simple_strtoul(id, NULL, 16);
1468 pr_info("%s: new device %s, vendor ID 0x%04x, device ID 0x%04x, flags: 0x%x\n",
1469 driver_name, name, vendor_id, device_id, flags);
1470
1471 if (vendor_id > 0x10000 || vendor_id == 0)
1472 return;
1473 if (device_id > 0x10000 || device_id == 0)
1474 return;
1475
1476 for (i=0; usb_dev_id[i].name; i++);
1477 usb_dev_id[i].name = name;
1478 usb_dev_id[i].vendor = vendor_id;
1479 usb_dev_id[i].device = device_id;
1480 usb_dev_id[i].private = flags;
1481 pegasus_ids[i].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
1482 pegasus_ids[i].idVendor = vendor_id;
1483 pegasus_ids[i].idProduct = device_id;
1484}
1485
1486static int __init pegasus_init(void)
1487{
1488 pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION);
1489 if (devid)
1490 parse_id(devid);
1491 pegasus_workqueue = create_singlethread_workqueue("pegasus");
1492 if (!pegasus_workqueue)
1493 return -ENOMEM;
1494 return usb_register(&pegasus_driver);
1495}
1496
1497static void __exit pegasus_exit(void)
1498{
1499 destroy_workqueue(pegasus_workqueue);
1500 usb_deregister(&pegasus_driver);
1501}
1502
1503module_init(pegasus_init);
1504module_exit(pegasus_exit);
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
new file mode 100644
index 000000000000..c7467823cd1c
--- /dev/null
+++ b/drivers/net/usb/pegasus.h
@@ -0,0 +1,307 @@
1/*
2 * Copyright (c) 1999-2003 Petko Manolov - Petkan (petkan@users.sourceforge.net)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as published
6 * by the Free Software Foundation.
7 */
8
9
10#ifndef PEGASUS_DEV
11
12#define PEGASUS_II 0x80000000
13#define HAS_HOME_PNA 0x40000000
14
15#define PEGASUS_MTU 1536
16#define RX_SKBS 4
17
18#define EPROM_WRITE 0x01
19#define EPROM_READ 0x02
20#define EPROM_DONE 0x04
21#define EPROM_WR_ENABLE 0x10
22#define EPROM_LOAD 0x20
23
24#define PHY_DONE 0x80
25#define PHY_READ 0x40
26#define PHY_WRITE 0x20
27#define DEFAULT_GPIO_RESET 0x24
28#define DEFAULT_GPIO_SET 0x26
29
30#define PEGASUS_PRESENT 0x00000001
31#define PEGASUS_TX_BUSY 0x00000004
32#define PEGASUS_RX_BUSY 0x00000008
33#define CTRL_URB_RUNNING 0x00000010
34#define CTRL_URB_SLEEP 0x00000020
35#define PEGASUS_UNPLUG 0x00000040
36#define PEGASUS_RX_URB_FAIL 0x00000080
37#define ETH_REGS_CHANGE 0x40000000
38#define ETH_REGS_CHANGED 0x80000000
39
40#define RX_MULTICAST 2
41#define RX_PROMISCUOUS 4
42
43#define REG_TIMEOUT (HZ)
44#define PEGASUS_TX_TIMEOUT (HZ*10)
45
46#define TX_UNDERRUN 0x80
47#define EXCESSIVE_COL 0x40
48#define LATE_COL 0x20
49#define NO_CARRIER 0x10
50#define LOSS_CARRIER 0x08
51#define JABBER_TIMEOUT 0x04
52
53#define LINK_STATUS 0x01
54
55#define PEGASUS_REQT_READ 0xc0
56#define PEGASUS_REQT_WRITE 0x40
57#define PEGASUS_REQ_GET_REGS 0xf0
58#define PEGASUS_REQ_SET_REGS 0xf1
59#define PEGASUS_REQ_SET_REG PEGASUS_REQ_SET_REGS
60
61enum pegasus_registers {
62 EthCtrl0 = 0,
63 EthCtrl1 = 1,
64 EthCtrl2 = 2,
65 EthID = 0x10,
66 Reg1d = 0x1d,
67 EpromOffset = 0x20,
68 EpromData = 0x21, /* 0x21 low, 0x22 high byte */
69 EpromCtrl = 0x23,
70 PhyAddr = 0x25,
71 PhyData = 0x26, /* 0x26 low, 0x27 high byte */
72 PhyCtrl = 0x28,
73 UsbStst = 0x2a,
74 EthTxStat0 = 0x2b,
75 EthTxStat1 = 0x2c,
76 EthRxStat = 0x2d,
77 WakeupControl = 0x78,
78 Reg7b = 0x7b,
79 Gpio0 = 0x7e,
80 Gpio1 = 0x7f,
81 Reg81 = 0x81,
82};
83
84
85typedef struct pegasus {
86 struct usb_device *usb;
87 struct usb_interface *intf;
88 struct net_device *net;
89 struct net_device_stats stats;
90 struct mii_if_info mii;
91 unsigned flags;
92 unsigned features;
93 u32 msg_enable;
94 u32 wolopts;
95 int dev_index;
96 int intr_interval;
97 struct tasklet_struct rx_tl;
98 struct delayed_work carrier_check;
99 struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb;
100 struct sk_buff *rx_pool[RX_SKBS];
101 struct sk_buff *rx_skb;
102 struct usb_ctrlrequest dr;
103 wait_queue_head_t ctrl_wait;
104 spinlock_t rx_pool_lock;
105 int chip;
106 unsigned char intr_buff[8];
107 __u8 tx_buff[PEGASUS_MTU];
108 __u8 eth_regs[4];
109 __u8 phy;
110 __u8 gpio_res;
111} pegasus_t;
112
113
114struct usb_eth_dev {
115 char *name;
116 __u16 vendor;
117 __u16 device;
118 __u32 private; /* LSB is gpio reset value */
119};
120
121#define VENDOR_3COM 0x0506
122#define VENDOR_ABOCOM 0x07b8
123#define VENDOR_ACCTON 0x083a
124#define VENDOR_ADMTEK 0x07a6
125#define VENDOR_AEILAB 0x3334
126#define VENDOR_ALLIEDTEL 0x07c9
127#define VENDOR_ATEN 0x0557
128#define VENDOR_BELKIN 0x050d
129#define VENDOR_BILLIONTON 0x08dd
130#define VENDOR_COMPAQ 0x049f
131#define VENDOR_COREGA 0x07aa
132#define VENDOR_DLINK 0x2001
133#define VENDOR_ELCON 0x0db7
134#define VENDOR_ELECOM 0x056e
135#define VENDOR_ELSA 0x05cc
136#define VENDOR_GIGABYTE 0x1044
137#define VENDOR_HAWKING 0x0e66
138#define VENDOR_HP 0x03f0
139#define VENDOR_IODATA 0x04bb
140#define VENDOR_KINGSTON 0x0951
141#define VENDOR_LANEED 0x056e
142#define VENDOR_LINKSYS 0x066b
143#define VENDOR_LINKSYS2 0x077b
144#define VENDOR_MELCO 0x0411
145#define VENDOR_MICROSOFT 0x045e
146#define VENDOR_MOBILITY 0x1342
147#define VENDOR_NETGEAR 0x0846
148#define VENDOR_OCT 0x0b39
149#define VENDOR_SMARTBRIDGES 0x08d1
150#define VENDOR_SMC 0x0707
151#define VENDOR_SOHOWARE 0x15e8
152#define VENDOR_SIEMENS 0x067c
153
154
155#else /* PEGASUS_DEV */
156
157PEGASUS_DEV( "3Com USB Ethernet 3C460B", VENDOR_3COM, 0x4601,
158 DEFAULT_GPIO_RESET | PEGASUS_II )
159PEGASUS_DEV( "ATEN USB Ethernet UC-110T", VENDOR_ATEN, 0x2007,
160 DEFAULT_GPIO_RESET | PEGASUS_II )
161PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x110c,
162 DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA )
163PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4104,
164 DEFAULT_GPIO_RESET | HAS_HOME_PNA )
165PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4004,
166 DEFAULT_GPIO_RESET | HAS_HOME_PNA )
167PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4007,
168 DEFAULT_GPIO_RESET | HAS_HOME_PNA )
169PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4102,
170 DEFAULT_GPIO_RESET | PEGASUS_II )
171PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4002,
172 DEFAULT_GPIO_RESET )
173PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400b,
174 DEFAULT_GPIO_RESET | PEGASUS_II )
175PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400c,
176 DEFAULT_GPIO_RESET | PEGASUS_II )
177PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0xabc1,
178 DEFAULT_GPIO_RESET )
179PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x200c,
180 DEFAULT_GPIO_RESET | PEGASUS_II )
181PEGASUS_DEV( "Accton USB 10/100 Ethernet Adapter", VENDOR_ACCTON, 0x1046,
182 DEFAULT_GPIO_RESET )
183PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_ACCTON, 0x5046,
184 DEFAULT_GPIO_RESET | PEGASUS_II )
185PEGASUS_DEV( "Philips USB 10/100 Ethernet", VENDOR_ACCTON, 0xb004,
186 DEFAULT_GPIO_RESET | PEGASUS_II )
187PEGASUS_DEV( "ADMtek ADM8511 \"Pegasus II\" USB Ethernet",
188 VENDOR_ADMTEK, 0x8511,
189 DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA )
190PEGASUS_DEV( "ADMtek ADM8513 \"Pegasus II\" USB Ethernet",
191 VENDOR_ADMTEK, 0x8513,
192 DEFAULT_GPIO_RESET | PEGASUS_II )
193PEGASUS_DEV( "ADMtek ADM8515 \"Pegasus II\" USB-2.0 Ethernet",
194 VENDOR_ADMTEK, 0x8515,
195 DEFAULT_GPIO_RESET | PEGASUS_II )
196PEGASUS_DEV( "ADMtek AN986 \"Pegasus\" USB Ethernet (evaluation board)",
197 VENDOR_ADMTEK, 0x0986,
198 DEFAULT_GPIO_RESET | HAS_HOME_PNA )
199PEGASUS_DEV( "AN986A USB MAC", VENDOR_ADMTEK, 1986,
200 DEFAULT_GPIO_RESET | PEGASUS_II )
201PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
202 DEFAULT_GPIO_RESET | PEGASUS_II )
203PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
204 DEFAULT_GPIO_RESET | PEGASUS_II )
205PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121,
206 DEFAULT_GPIO_RESET | PEGASUS_II )
207PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
208 DEFAULT_GPIO_RESET )
209PEGASUS_DEV( "Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987,
210 DEFAULT_GPIO_RESET | HAS_HOME_PNA )
211PEGASUS_DEV( "iPAQ Networking 10/100 USB", VENDOR_COMPAQ, 0x8511,
212 DEFAULT_GPIO_RESET | PEGASUS_II )
213PEGASUS_DEV( "Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988,
214 DEFAULT_GPIO_RESET )
215PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511,
216 DEFAULT_GPIO_RESET | PEGASUS_II )
217PEGASUS_DEV( "Corega FEther USB-TX", VENDOR_COREGA, 0x0004,
218 DEFAULT_GPIO_RESET )
219PEGASUS_DEV( "Corega FEther USB-TXS", VENDOR_COREGA, 0x000d,
220 DEFAULT_GPIO_RESET | PEGASUS_II )
221PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001,
222 DEFAULT_GPIO_RESET )
223PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4002,
224 DEFAULT_GPIO_RESET )
225PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4102,
226 DEFAULT_GPIO_RESET | PEGASUS_II )
227PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x400b,
228 DEFAULT_GPIO_RESET | PEGASUS_II )
229PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x200c,
230 DEFAULT_GPIO_RESET | PEGASUS_II )
231PEGASUS_DEV( "D-Link DSB-650TX(PNA)", VENDOR_DLINK, 0x4003,
232 DEFAULT_GPIO_RESET | HAS_HOME_PNA )
233PEGASUS_DEV( "D-Link DSB-650", VENDOR_DLINK, 0xabc1,
234 DEFAULT_GPIO_RESET )
235PEGASUS_DEV( "GOLDPFEIL USB Adapter", VENDOR_ELCON, 0x0002,
236 DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA )
237PEGASUS_DEV( "ELECOM USB Ethernet LD-USB20", VENDOR_ELECOM, 0x4010,
238 DEFAULT_GPIO_RESET | PEGASUS_II )
239PEGASUS_DEV( "EasiDock Ethernet", VENDOR_MOBILITY, 0x0304,
240 DEFAULT_GPIO_RESET )
241PEGASUS_DEV( "Elsa Micolink USB2Ethernet", VENDOR_ELSA, 0x3000,
242 DEFAULT_GPIO_RESET )
243PEGASUS_DEV( "GIGABYTE GN-BR402W Wireless Router", VENDOR_GIGABYTE, 0x8002,
244 DEFAULT_GPIO_RESET )
245PEGASUS_DEV( "Hawking UF100 10/100 Ethernet", VENDOR_HAWKING, 0x400c,
246 DEFAULT_GPIO_RESET | PEGASUS_II )
247PEGASUS_DEV( "HP hn210c Ethernet USB", VENDOR_HP, 0x811c,
248 DEFAULT_GPIO_RESET | PEGASUS_II )
249PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
250 DEFAULT_GPIO_RESET )
251PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
252 DEFAULT_GPIO_RESET | PEGASUS_II )
253PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
254 DEFAULT_GPIO_RESET)
255PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
256 DEFAULT_GPIO_RESET )
257PEGASUS_DEV( "LANEED USB Ethernet LD-USBL/TX", VENDOR_LANEED, 0x4005,
258 DEFAULT_GPIO_RESET | PEGASUS_II)
259PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x400b,
260 DEFAULT_GPIO_RESET | PEGASUS_II )
261PEGASUS_DEV( "LANEED USB Ethernet LD-USB/T", VENDOR_LANEED, 0xabc1,
262 DEFAULT_GPIO_RESET )
263PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x200c,
264 DEFAULT_GPIO_RESET | PEGASUS_II )
265PEGASUS_DEV( "Linksys USB10TX", VENDOR_LINKSYS, 0x2202,
266 DEFAULT_GPIO_RESET )
267PEGASUS_DEV( "Linksys USB100TX", VENDOR_LINKSYS, 0x2203,
268 DEFAULT_GPIO_RESET )
269PEGASUS_DEV( "Linksys USB100TX", VENDOR_LINKSYS, 0x2204,
270 DEFAULT_GPIO_RESET | HAS_HOME_PNA )
271PEGASUS_DEV( "Linksys USB10T Ethernet Adapter", VENDOR_LINKSYS, 0x2206,
272 DEFAULT_GPIO_RESET | PEGASUS_II)
273PEGASUS_DEV( "Linksys USBVPN1", VENDOR_LINKSYS2, 0x08b4,
274 DEFAULT_GPIO_RESET )
275PEGASUS_DEV( "Linksys USB USB100TX", VENDOR_LINKSYS, 0x400b,
276 DEFAULT_GPIO_RESET | PEGASUS_II )
277PEGASUS_DEV( "Linksys USB10TX", VENDOR_LINKSYS, 0x200c,
278 DEFAULT_GPIO_RESET | PEGASUS_II )
279PEGASUS_DEV( "MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0001,
280 DEFAULT_GPIO_RESET )
281PEGASUS_DEV( "MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0005,
282 DEFAULT_GPIO_RESET )
283PEGASUS_DEV( "MELCO/BUFFALO LUA2-TX", VENDOR_MELCO, 0x0009,
284 DEFAULT_GPIO_RESET | PEGASUS_II )
285PEGASUS_DEV( "Microsoft MN-110", VENDOR_MICROSOFT, 0x007a,
286 DEFAULT_GPIO_RESET | PEGASUS_II )
287PEGASUS_DEV( "NETGEAR FA101", VENDOR_NETGEAR, 0x1020,
288 DEFAULT_GPIO_RESET | PEGASUS_II )
289PEGASUS_DEV( "OCT Inc.", VENDOR_OCT, 0x0109,
290 DEFAULT_GPIO_RESET | PEGASUS_II )
291PEGASUS_DEV( "OCT USB TO Ethernet", VENDOR_OCT, 0x0901,
292 DEFAULT_GPIO_RESET | PEGASUS_II )
293PEGASUS_DEV( "smartNIC 2 PnP Adapter", VENDOR_SMARTBRIDGES, 0x0003,
294 DEFAULT_GPIO_RESET | PEGASUS_II )
295PEGASUS_DEV( "SMC 202 USB Ethernet", VENDOR_SMC, 0x0200,
296 DEFAULT_GPIO_RESET )
297PEGASUS_DEV( "SMC 2206 USB Ethernet", VENDOR_SMC, 0x0201,
298 DEFAULT_GPIO_RESET | PEGASUS_II)
299PEGASUS_DEV( "SOHOware NUB100 Ethernet", VENDOR_SOHOWARE, 0x9100,
300 DEFAULT_GPIO_RESET )
301PEGASUS_DEV( "SOHOware NUB110 Ethernet", VENDOR_SOHOWARE, 0x9110,
302 DEFAULT_GPIO_RESET | PEGASUS_II )
303PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_SIEMENS, 0x1001,
304 DEFAULT_GPIO_RESET | PEGASUS_II )
305
306
307#endif /* PEGASUS_DEV */
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
new file mode 100644
index 000000000000..45300939d185
--- /dev/null
+++ b/drivers/net/usb/plusb.c
@@ -0,0 +1,150 @@
1/*
2 * PL-2301/2302 USB host-to-host link cables
3 * Copyright (C) 2000-2005 by David Brownell
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20// #define DEBUG // error path messages, extra info
21// #define VERBOSE // more; success messages
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/workqueue.h>
29#include <linux/mii.h>
30#include <linux/usb.h>
31
32#include "usbnet.h"
33
34
35/*
36 * Prolific PL-2301/PL-2302 driver ... http://www.prolifictech.com
37 *
38 * The protocol and handshaking used here should be bug-compatible
39 * with the Linux 2.2 "plusb" driver, by Deti Fliegl.
40 *
41 * HEADS UP: this handshaking isn't all that robust. This driver
42 * gets confused easily if you unplug one end of the cable then
43 * try to connect it again; you'll need to restart both ends. The
44 * "naplink" software (used by some PlayStation/2 deveopers) does
45 * the handshaking much better! Also, sometimes this hardware
46 * seems to get wedged under load. Prolific docs are weak, and
47 * don't identify differences between PL2301 and PL2302, much less
48 * anything to explain the different PL2302 versions observed.
49 */
50
51/*
52 * Bits 0-4 can be used for software handshaking; they're set from
53 * one end, cleared from the other, "read" with the interrupt byte.
54 */
55#define PL_S_EN (1<<7) /* (feature only) suspend enable */
56/* reserved bit -- rx ready (6) ? */
57#define PL_TX_READY (1<<5) /* (interrupt only) transmit ready */
58#define PL_RESET_OUT (1<<4) /* reset output pipe */
59#define PL_RESET_IN (1<<3) /* reset input pipe */
60#define PL_TX_C (1<<2) /* transmission complete */
61#define PL_TX_REQ (1<<1) /* transmission received */
62#define PL_PEER_E (1<<0) /* peer exists */
63
64static inline int
65pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
66{
67 return usb_control_msg(dev->udev,
68 usb_rcvctrlpipe(dev->udev, 0),
69 req,
70 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
71 val, index,
72 NULL, 0,
73 USB_CTRL_GET_TIMEOUT);
74}
75
76static inline int
77pl_clear_QuickLink_features(struct usbnet *dev, int val)
78{
79 return pl_vendor_req(dev, 1, (u8) val, 0);
80}
81
82static inline int
83pl_set_QuickLink_features(struct usbnet *dev, int val)
84{
85 return pl_vendor_req(dev, 3, (u8) val, 0);
86}
87
88static int pl_reset(struct usbnet *dev)
89{
90 /* some units seem to need this reset, others reject it utterly.
91 * FIXME be more like "naplink" or windows drivers.
92 */
93 (void) pl_set_QuickLink_features(dev,
94 PL_S_EN|PL_RESET_OUT|PL_RESET_IN|PL_PEER_E);
95 return 0;
96}
97
98static const struct driver_info prolific_info = {
99 .description = "Prolific PL-2301/PL-2302",
100 .flags = FLAG_NO_SETINT,
101 /* some PL-2302 versions seem to fail usb_set_interface() */
102 .reset = pl_reset,
103};
104
105
106/*-------------------------------------------------------------------------*/
107
108/*
109 * Proilific's name won't normally be on the cables, and
110 * may not be on the device.
111 */
112
113static const struct usb_device_id products [] = {
114
115{
116 USB_DEVICE(0x067b, 0x0000), // PL-2301
117 .driver_info = (unsigned long) &prolific_info,
118}, {
119 USB_DEVICE(0x067b, 0x0001), // PL-2302
120 .driver_info = (unsigned long) &prolific_info,
121},
122
123 { }, // END
124};
125MODULE_DEVICE_TABLE(usb, products);
126
127static struct usb_driver plusb_driver = {
128 .name = "plusb",
129 .id_table = products,
130 .probe = usbnet_probe,
131 .disconnect = usbnet_disconnect,
132 .suspend = usbnet_suspend,
133 .resume = usbnet_resume,
134};
135
136static int __init plusb_init(void)
137{
138 return usb_register(&plusb_driver);
139}
140module_init(plusb_init);
141
142static void __exit plusb_exit(void)
143{
144 usb_deregister(&plusb_driver);
145}
146module_exit(plusb_exit);
147
148MODULE_AUTHOR("David Brownell");
149MODULE_DESCRIPTION("Prolific PL-2301/2302 USB Host to Host Link Driver");
150MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
new file mode 100644
index 000000000000..980e4aaa97aa
--- /dev/null
+++ b/drivers/net/usb/rndis_host.c
@@ -0,0 +1,727 @@
1/*
2 * Host Side support for RNDIS Networking Links
3 * Copyright (C) 2005 by David Brownell
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20// #define DEBUG // error path messages, extra info
21// #define VERBOSE // more; success messages
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/workqueue.h>
29#include <linux/mii.h>
30#include <linux/usb.h>
31#include <linux/usb/cdc.h>
32
33#include "usbnet.h"
34
35
36/*
37 * RNDIS is NDIS remoted over USB. It's a MSFT variant of CDC ACM ... of
38 * course ACM was intended for modems, not Ethernet links! USB's standard
39 * for Ethernet links is "CDC Ethernet", which is significantly simpler.
40 *
41 * NOTE that Microsoft's "RNDIS 1.0" specification is incomplete. Issues
42 * include:
43 * - Power management in particular relies on information that's scattered
44 * through other documentation, and which is incomplete or incorrect even
45 * there.
46 * - There are various undocumented protocol requirements, such as the
47 * need to send unused garbage in control-OUT messages.
48 * - In some cases, MS-Windows will emit undocumented requests; this
49 * matters more to peripheral implementations than host ones.
50 *
51 * Moreover there's a no-open-specs variant of RNDIS called "ActiveSync".
52 *
53 * For these reasons and others, ** USE OF RNDIS IS STRONGLY DISCOURAGED ** in
54 * favor of such non-proprietary alternatives as CDC Ethernet or the newer (and
55 * currently rare) "Ethernet Emulation Model" (EEM).
56 */
57
58/*
59 * CONTROL uses CDC "encapsulated commands" with funky notifications.
60 * - control-out: SEND_ENCAPSULATED
61 * - interrupt-in: RESPONSE_AVAILABLE
62 * - control-in: GET_ENCAPSULATED
63 *
64 * We'll try to ignore the RESPONSE_AVAILABLE notifications.
65 *
66 * REVISIT some RNDIS implementations seem to have curious issues still
67 * to be resolved.
68 */
69struct rndis_msg_hdr {
70 __le32 msg_type; /* RNDIS_MSG_* */
71 __le32 msg_len;
72 // followed by data that varies between messages
73 __le32 request_id;
74 __le32 status;
75 // ... and more
76} __attribute__ ((packed));
77
78/* MS-Windows uses this strange size, but RNDIS spec says 1024 minimum */
79#define CONTROL_BUFFER_SIZE 1025
80
81/* RNDIS defines an (absurdly huge) 10 second control timeout,
82 * but ActiveSync seems to use a more usual 5 second timeout
83 * (which matches the USB 2.0 spec).
84 */
85#define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000)
86
87
88#define ccpu2 __constant_cpu_to_le32
89
90#define RNDIS_MSG_COMPLETION ccpu2(0x80000000)
91
92/* codes for "msg_type" field of rndis messages;
93 * only the data channel uses packet messages (maybe batched);
94 * everything else goes on the control channel.
95 */
96#define RNDIS_MSG_PACKET ccpu2(0x00000001) /* 1-N packets */
97#define RNDIS_MSG_INIT ccpu2(0x00000002)
98#define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION)
99#define RNDIS_MSG_HALT ccpu2(0x00000003)
100#define RNDIS_MSG_QUERY ccpu2(0x00000004)
101#define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION)
102#define RNDIS_MSG_SET ccpu2(0x00000005)
103#define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION)
104#define RNDIS_MSG_RESET ccpu2(0x00000006)
105#define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION)
106#define RNDIS_MSG_INDICATE ccpu2(0x00000007)
107#define RNDIS_MSG_KEEPALIVE ccpu2(0x00000008)
108#define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION)
109
110/* codes for "status" field of completion messages */
111#define RNDIS_STATUS_SUCCESS ccpu2(0x00000000)
112#define RNDIS_STATUS_FAILURE ccpu2(0xc0000001)
113#define RNDIS_STATUS_INVALID_DATA ccpu2(0xc0010015)
114#define RNDIS_STATUS_NOT_SUPPORTED ccpu2(0xc00000bb)
115#define RNDIS_STATUS_MEDIA_CONNECT ccpu2(0x4001000b)
116#define RNDIS_STATUS_MEDIA_DISCONNECT ccpu2(0x4001000c)
117
118
119struct rndis_data_hdr {
120 __le32 msg_type; /* RNDIS_MSG_PACKET */
121 __le32 msg_len; // rndis_data_hdr + data_len + pad
122 __le32 data_offset; // 36 -- right after header
123 __le32 data_len; // ... real packet size
124
125 __le32 oob_data_offset; // zero
126 __le32 oob_data_len; // zero
127 __le32 num_oob; // zero
128 __le32 packet_data_offset; // zero
129
130 __le32 packet_data_len; // zero
131 __le32 vc_handle; // zero
132 __le32 reserved; // zero
133} __attribute__ ((packed));
134
135struct rndis_init { /* OUT */
136 // header and:
137 __le32 msg_type; /* RNDIS_MSG_INIT */
138 __le32 msg_len; // 24
139 __le32 request_id;
140 __le32 major_version; // of rndis (1.0)
141 __le32 minor_version;
142 __le32 max_transfer_size;
143} __attribute__ ((packed));
144
145struct rndis_init_c { /* IN */
146 // header and:
147 __le32 msg_type; /* RNDIS_MSG_INIT_C */
148 __le32 msg_len;
149 __le32 request_id;
150 __le32 status;
151 __le32 major_version; // of rndis (1.0)
152 __le32 minor_version;
153 __le32 device_flags;
154 __le32 medium; // zero == 802.3
155 __le32 max_packets_per_message;
156 __le32 max_transfer_size;
157 __le32 packet_alignment; // max 7; (1<<n) bytes
158 __le32 af_list_offset; // zero
159 __le32 af_list_size; // zero
160} __attribute__ ((packed));
161
162struct rndis_halt { /* OUT (no reply) */
163 // header and:
164 __le32 msg_type; /* RNDIS_MSG_HALT */
165 __le32 msg_len;
166 __le32 request_id;
167} __attribute__ ((packed));
168
169struct rndis_query { /* OUT */
170 // header and:
171 __le32 msg_type; /* RNDIS_MSG_QUERY */
172 __le32 msg_len;
173 __le32 request_id;
174 __le32 oid;
175 __le32 len;
176 __le32 offset;
177/*?*/ __le32 handle; // zero
178} __attribute__ ((packed));
179
180struct rndis_query_c { /* IN */
181 // header and:
182 __le32 msg_type; /* RNDIS_MSG_QUERY_C */
183 __le32 msg_len;
184 __le32 request_id;
185 __le32 status;
186 __le32 len;
187 __le32 offset;
188} __attribute__ ((packed));
189
190struct rndis_set { /* OUT */
191 // header and:
192 __le32 msg_type; /* RNDIS_MSG_SET */
193 __le32 msg_len;
194 __le32 request_id;
195 __le32 oid;
196 __le32 len;
197 __le32 offset;
198/*?*/ __le32 handle; // zero
199} __attribute__ ((packed));
200
201struct rndis_set_c { /* IN */
202 // header and:
203 __le32 msg_type; /* RNDIS_MSG_SET_C */
204 __le32 msg_len;
205 __le32 request_id;
206 __le32 status;
207} __attribute__ ((packed));
208
209struct rndis_reset { /* IN */
210 // header and:
211 __le32 msg_type; /* RNDIS_MSG_RESET */
212 __le32 msg_len;
213 __le32 reserved;
214} __attribute__ ((packed));
215
216struct rndis_reset_c { /* OUT */
217 // header and:
218 __le32 msg_type; /* RNDIS_MSG_RESET_C */
219 __le32 msg_len;
220 __le32 status;
221 __le32 addressing_lost;
222} __attribute__ ((packed));
223
224struct rndis_indicate { /* IN (unrequested) */
225 // header and:
226 __le32 msg_type; /* RNDIS_MSG_INDICATE */
227 __le32 msg_len;
228 __le32 status;
229 __le32 length;
230 __le32 offset;
231/**/ __le32 diag_status;
232 __le32 error_offset;
233/**/ __le32 message;
234} __attribute__ ((packed));
235
236struct rndis_keepalive { /* OUT (optionally IN) */
237 // header and:
238 __le32 msg_type; /* RNDIS_MSG_KEEPALIVE */
239 __le32 msg_len;
240 __le32 request_id;
241} __attribute__ ((packed));
242
243struct rndis_keepalive_c { /* IN (optionally OUT) */
244 // header and:
245 __le32 msg_type; /* RNDIS_MSG_KEEPALIVE_C */
246 __le32 msg_len;
247 __le32 request_id;
248 __le32 status;
249} __attribute__ ((packed));
250
251/* NOTE: about 30 OIDs are "mandatory" for peripherals to support ... and
252 * there are gobs more that may optionally be supported. We'll avoid as much
253 * of that mess as possible.
254 */
255#define OID_802_3_PERMANENT_ADDRESS ccpu2(0x01010101)
256#define OID_GEN_MAXIMUM_FRAME_SIZE ccpu2(0x00010106)
257#define OID_GEN_CURRENT_PACKET_FILTER ccpu2(0x0001010e)
258
259/*
260 * RNDIS notifications from device: command completion; "reverse"
261 * keepalives; etc
262 */
263static void rndis_status(struct usbnet *dev, struct urb *urb)
264{
265 devdbg(dev, "rndis status urb, len %d stat %d",
266 urb->actual_length, urb->status);
267 // FIXME for keepalives, respond immediately (asynchronously)
268 // if not an RNDIS status, do like cdc_status(dev,urb) does
269}
270
271/*
272 * RPC done RNDIS-style. Caller guarantees:
273 * - message is properly byteswapped
274 * - there's no other request pending
275 * - buf can hold up to 1KB response (required by RNDIS spec)
276 * On return, the first few entries are already byteswapped.
277 *
278 * Call context is likely probe(), before interface name is known,
279 * which is why we won't try to use it in the diagnostics.
280 */
281static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
282{
283 struct cdc_state *info = (void *) &dev->data;
284 int master_ifnum;
285 int retval;
286 unsigned count;
287 __le32 rsp;
288 u32 xid = 0, msg_len, request_id;
289
290 /* REVISIT when this gets called from contexts other than probe() or
291 * disconnect(): either serialize, or dispatch responses on xid
292 */
293
294 /* Issue the request; xid is unique, don't bother byteswapping it */
295 if (likely(buf->msg_type != RNDIS_MSG_HALT
296 && buf->msg_type != RNDIS_MSG_RESET)) {
297 xid = dev->xid++;
298 if (!xid)
299 xid = dev->xid++;
300 buf->request_id = (__force __le32) xid;
301 }
302 master_ifnum = info->control->cur_altsetting->desc.bInterfaceNumber;
303 retval = usb_control_msg(dev->udev,
304 usb_sndctrlpipe(dev->udev, 0),
305 USB_CDC_SEND_ENCAPSULATED_COMMAND,
306 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
307 0, master_ifnum,
308 buf, le32_to_cpu(buf->msg_len),
309 RNDIS_CONTROL_TIMEOUT_MS);
310 if (unlikely(retval < 0 || xid == 0))
311 return retval;
312
313 // FIXME Seems like some devices discard responses when
314 // we time out and cancel our "get response" requests...
315 // so, this is fragile. Probably need to poll for status.
316
317 /* ignore status endpoint, just poll the control channel;
318 * the request probably completed immediately
319 */
320 rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
321 for (count = 0; count < 10; count++) {
322 memset(buf, 0, CONTROL_BUFFER_SIZE);
323 retval = usb_control_msg(dev->udev,
324 usb_rcvctrlpipe(dev->udev, 0),
325 USB_CDC_GET_ENCAPSULATED_RESPONSE,
326 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
327 0, master_ifnum,
328 buf, CONTROL_BUFFER_SIZE,
329 RNDIS_CONTROL_TIMEOUT_MS);
330 if (likely(retval >= 8)) {
331 msg_len = le32_to_cpu(buf->msg_len);
332 request_id = (__force u32) buf->request_id;
333 if (likely(buf->msg_type == rsp)) {
334 if (likely(request_id == xid)) {
335 if (unlikely(rsp == RNDIS_MSG_RESET_C))
336 return 0;
337 if (likely(RNDIS_STATUS_SUCCESS
338 == buf->status))
339 return 0;
340 dev_dbg(&info->control->dev,
341 "rndis reply status %08x\n",
342 le32_to_cpu(buf->status));
343 return -EL3RST;
344 }
345 dev_dbg(&info->control->dev,
346 "rndis reply id %d expected %d\n",
347 request_id, xid);
348 /* then likely retry */
349 } else switch (buf->msg_type) {
350 case RNDIS_MSG_INDICATE: { /* fault */
351 // struct rndis_indicate *msg = (void *)buf;
352 dev_info(&info->control->dev,
353 "rndis fault indication\n");
354 }
355 break;
356 case RNDIS_MSG_KEEPALIVE: { /* ping */
357 struct rndis_keepalive_c *msg = (void *)buf;
358
359 msg->msg_type = RNDIS_MSG_KEEPALIVE_C;
360 msg->msg_len = ccpu2(sizeof *msg);
361 msg->status = RNDIS_STATUS_SUCCESS;
362 retval = usb_control_msg(dev->udev,
363 usb_sndctrlpipe(dev->udev, 0),
364 USB_CDC_SEND_ENCAPSULATED_COMMAND,
365 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
366 0, master_ifnum,
367 msg, sizeof *msg,
368 RNDIS_CONTROL_TIMEOUT_MS);
369 if (unlikely(retval < 0))
370 dev_dbg(&info->control->dev,
371 "rndis keepalive err %d\n",
372 retval);
373 }
374 break;
375 default:
376 dev_dbg(&info->control->dev,
377 "unexpected rndis msg %08x len %d\n",
378 le32_to_cpu(buf->msg_type), msg_len);
379 }
380 } else {
381 /* device probably issued a protocol stall; ignore */
382 dev_dbg(&info->control->dev,
383 "rndis response error, code %d\n", retval);
384 }
385 msleep(2);
386 }
387 dev_dbg(&info->control->dev, "rndis response timeout\n");
388 return -ETIMEDOUT;
389}
390
391/*
392 * rndis_query:
393 *
394 * Performs a query for @oid along with 0 or more bytes of payload as
395 * specified by @in_len. If @reply_len is not set to -1 then the reply
396 * length is checked against this value, resulting in an error if it
397 * doesn't match.
398 *
399 * NOTE: Adding a payload exactly or greater than the size of the expected
400 * response payload is an evident requirement MSFT added for ActiveSync.
401 *
402 * The only exception is for OIDs that return a variably sized response,
403 * in which case no payload should be added. This undocumented (and
404 * nonsensical!) issue was found by sniffing protocol requests from the
405 * ActiveSync 4.1 Windows driver.
406 */
407static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
408 void *buf, u32 oid, u32 in_len,
409 void **reply, int *reply_len)
410{
411 int retval;
412 union {
413 void *buf;
414 struct rndis_msg_hdr *header;
415 struct rndis_query *get;
416 struct rndis_query_c *get_c;
417 } u;
418 u32 off, len;
419
420 u.buf = buf;
421
422 memset(u.get, 0, sizeof *u.get + in_len);
423 u.get->msg_type = RNDIS_MSG_QUERY;
424 u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len);
425 u.get->oid = oid;
426 u.get->len = cpu_to_le32(in_len);
427 u.get->offset = ccpu2(20);
428
429 retval = rndis_command(dev, u.header);
430 if (unlikely(retval < 0)) {
431 dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) failed, %d\n",
432 oid, retval);
433 return retval;
434 }
435
436 off = le32_to_cpu(u.get_c->offset);
437 len = le32_to_cpu(u.get_c->len);
438 if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE))
439 goto response_error;
440
441 if (*reply_len != -1 && len != *reply_len)
442 goto response_error;
443
444 *reply = (unsigned char *) &u.get_c->request_id + off;
445 *reply_len = len;
446
447 return retval;
448
449response_error:
450 dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) "
451 "invalid response - off %d len %d\n",
452 oid, off, len);
453 return -EDOM;
454}
455
456static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
457{
458 int retval;
459 struct net_device *net = dev->net;
460 struct cdc_state *info = (void *) &dev->data;
461 union {
462 void *buf;
463 struct rndis_msg_hdr *header;
464 struct rndis_init *init;
465 struct rndis_init_c *init_c;
466 struct rndis_query *get;
467 struct rndis_query_c *get_c;
468 struct rndis_set *set;
469 struct rndis_set_c *set_c;
470 } u;
471 u32 tmp;
472 int reply_len;
473 unsigned char *bp;
474
475 /* we can't rely on i/o from stack working, or stack allocation */
476 u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
477 if (!u.buf)
478 return -ENOMEM;
479 retval = usbnet_generic_cdc_bind(dev, intf);
480 if (retval < 0)
481 goto fail;
482
483 u.init->msg_type = RNDIS_MSG_INIT;
484 u.init->msg_len = ccpu2(sizeof *u.init);
485 u.init->major_version = ccpu2(1);
486 u.init->minor_version = ccpu2(0);
487
488 /* max transfer (in spec) is 0x4000 at full speed, but for
489 * TX we'll stick to one Ethernet packet plus RNDIS framing.
490 * For RX we handle drivers that zero-pad to end-of-packet.
491 * Don't let userspace change these settings.
492 *
493 * NOTE: there still seems to be wierdness here, as if we need
494 * to do some more things to make sure WinCE targets accept this.
495 * They default to jumbograms of 8KB or 16KB, which is absurd
496 * for such low data rates and which is also more than Linux
497 * can usually expect to allocate for SKB data...
498 */
499 net->hard_header_len += sizeof (struct rndis_data_hdr);
500 dev->hard_mtu = net->mtu + net->hard_header_len;
501
502 dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1);
503 dev->rx_urb_size &= ~(dev->maxpacket - 1);
504 u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size);
505
506 net->change_mtu = NULL;
507 retval = rndis_command(dev, u.header);
508 if (unlikely(retval < 0)) {
509 /* it might not even be an RNDIS device!! */
510 dev_err(&intf->dev, "RNDIS init failed, %d\n", retval);
511 goto fail_and_release;
512 }
513 tmp = le32_to_cpu(u.init_c->max_transfer_size);
514 if (tmp < dev->hard_mtu) {
515 dev_err(&intf->dev,
516 "dev can't take %u byte packets (max %u)\n",
517 dev->hard_mtu, tmp);
518 goto fail_and_release;
519 }
520
521 /* REVISIT: peripheral "alignment" request is ignored ... */
522 dev_dbg(&intf->dev,
523 "hard mtu %u (%u from dev), rx buflen %Zu, align %d\n",
524 dev->hard_mtu, tmp, dev->rx_urb_size,
525 1 << le32_to_cpu(u.init_c->packet_alignment));
526
527 /* Get designated host ethernet address */
528 reply_len = ETH_ALEN;
529 retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS,
530 48, (void **) &bp, &reply_len);
531 if (unlikely(retval< 0)) {
532 dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval);
533 goto fail_and_release;
534 }
535 memcpy(net->dev_addr, bp, ETH_ALEN);
536
537 /* set a nonzero filter to enable data transfers */
538 memset(u.set, 0, sizeof *u.set);
539 u.set->msg_type = RNDIS_MSG_SET;
540 u.set->msg_len = ccpu2(4 + sizeof *u.set);
541 u.set->oid = OID_GEN_CURRENT_PACKET_FILTER;
542 u.set->len = ccpu2(4);
543 u.set->offset = ccpu2((sizeof *u.set) - 8);
544 *(__le32 *)(u.buf + sizeof *u.set) = ccpu2(DEFAULT_FILTER);
545
546 retval = rndis_command(dev, u.header);
547 if (unlikely(retval < 0)) {
548 dev_err(&intf->dev, "rndis set packet filter, %d\n", retval);
549 goto fail_and_release;
550 }
551
552 retval = 0;
553
554 kfree(u.buf);
555 return retval;
556
557fail_and_release:
558 usb_set_intfdata(info->data, NULL);
559 usb_driver_release_interface(driver_of(intf), info->data);
560 info->data = NULL;
561fail:
562 kfree(u.buf);
563 return retval;
564}
565
566static void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
567{
568 struct rndis_halt *halt;
569
570 /* try to clear any rndis state/activity (no i/o from stack!) */
571 halt = kzalloc(sizeof *halt, GFP_KERNEL);
572 if (halt) {
573 halt->msg_type = RNDIS_MSG_HALT;
574 halt->msg_len = ccpu2(sizeof *halt);
575 (void) rndis_command(dev, (void *)halt);
576 kfree(halt);
577 }
578
579 return usbnet_cdc_unbind(dev, intf);
580}
581
582/*
583 * DATA -- host must not write zlps
584 */
585static int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
586{
587 /* peripheral may have batched packets to us... */
588 while (likely(skb->len)) {
589 struct rndis_data_hdr *hdr = (void *)skb->data;
590 struct sk_buff *skb2;
591 u32 msg_len, data_offset, data_len;
592
593 msg_len = le32_to_cpu(hdr->msg_len);
594 data_offset = le32_to_cpu(hdr->data_offset);
595 data_len = le32_to_cpu(hdr->data_len);
596
597 /* don't choke if we see oob, per-packet data, etc */
598 if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
599 || skb->len < msg_len
600 || (data_offset + data_len + 8) > msg_len)) {
601 dev->stats.rx_frame_errors++;
602 devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
603 le32_to_cpu(hdr->msg_type),
604 msg_len, data_offset, data_len, skb->len);
605 return 0;
606 }
607 skb_pull(skb, 8 + data_offset);
608
609 /* at most one packet left? */
610 if (likely((data_len - skb->len) <= sizeof *hdr)) {
611 skb_trim(skb, data_len);
612 break;
613 }
614
615 /* try to return all the packets in the batch */
616 skb2 = skb_clone(skb, GFP_ATOMIC);
617 if (unlikely(!skb2))
618 break;
619 skb_pull(skb, msg_len - sizeof *hdr);
620 skb_trim(skb2, data_len);
621 usbnet_skb_return(dev, skb2);
622 }
623
624 /* caller will usbnet_skb_return the remaining packet */
625 return 1;
626}
627
628static struct sk_buff *
629rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
630{
631 struct rndis_data_hdr *hdr;
632 struct sk_buff *skb2;
633 unsigned len = skb->len;
634
635 if (likely(!skb_cloned(skb))) {
636 int room = skb_headroom(skb);
637
638 /* enough head room as-is? */
639 if (unlikely((sizeof *hdr) <= room))
640 goto fill;
641
642 /* enough room, but needs to be readjusted? */
643 room += skb_tailroom(skb);
644 if (likely((sizeof *hdr) <= room)) {
645 skb->data = memmove(skb->head + sizeof *hdr,
646 skb->data, len);
647 skb_set_tail_pointer(skb, len);
648 goto fill;
649 }
650 }
651
652 /* create a new skb, with the correct size (and tailpad) */
653 skb2 = skb_copy_expand(skb, sizeof *hdr, 1, flags);
654 dev_kfree_skb_any(skb);
655 if (unlikely(!skb2))
656 return skb2;
657 skb = skb2;
658
659 /* fill out the RNDIS header. we won't bother trying to batch
660 * packets; Linux minimizes wasted bandwidth through tx queues.
661 */
662fill:
663 hdr = (void *) __skb_push(skb, sizeof *hdr);
664 memset(hdr, 0, sizeof *hdr);
665 hdr->msg_type = RNDIS_MSG_PACKET;
666 hdr->msg_len = cpu_to_le32(skb->len);
667 hdr->data_offset = ccpu2(sizeof(*hdr) - 8);
668 hdr->data_len = cpu_to_le32(len);
669
670 /* FIXME make the last packet always be short ... */
671 return skb;
672}
673
674
675static const struct driver_info rndis_info = {
676 .description = "RNDIS device",
677 .flags = FLAG_ETHER | FLAG_FRAMING_RN | FLAG_NO_SETINT,
678 .bind = rndis_bind,
679 .unbind = rndis_unbind,
680 .status = rndis_status,
681 .rx_fixup = rndis_rx_fixup,
682 .tx_fixup = rndis_tx_fixup,
683};
684
685#undef ccpu2
686
687
688/*-------------------------------------------------------------------------*/
689
690static const struct usb_device_id products [] = {
691{
692 /* RNDIS is MSFT's un-official variant of CDC ACM */
693 USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
694 .driver_info = (unsigned long) &rndis_info,
695}, {
696 /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
697 USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
698 .driver_info = (unsigned long) &rndis_info,
699},
700 { }, // END
701};
702MODULE_DEVICE_TABLE(usb, products);
703
704static struct usb_driver rndis_driver = {
705 .name = "rndis_host",
706 .id_table = products,
707 .probe = usbnet_probe,
708 .disconnect = usbnet_disconnect,
709 .suspend = usbnet_suspend,
710 .resume = usbnet_resume,
711};
712
713static int __init rndis_init(void)
714{
715 return usb_register(&rndis_driver);
716}
717module_init(rndis_init);
718
719static void __exit rndis_exit(void)
720{
721 usb_deregister(&rndis_driver);
722}
723module_exit(rndis_exit);
724
725MODULE_AUTHOR("David Brownell");
726MODULE_DESCRIPTION("USB Host side RNDIS driver");
727MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
new file mode 100644
index 000000000000..fa598f0340cf
--- /dev/null
+++ b/drivers/net/usb/rtl8150.c
@@ -0,0 +1,1004 @@
1/*
2 * Copyright (c) 2002 Petko Manolov (petkan@users.sourceforge.net)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * version 2 as published by the Free Software Foundation.
7 */
8
9#include <linux/init.h>
10#include <linux/signal.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/mii.h>
16#include <linux/ethtool.h>
17#include <linux/usb.h>
18#include <asm/uaccess.h>
19
20/* Version Information */
21#define DRIVER_VERSION "v0.6.2 (2004/08/27)"
22#define DRIVER_AUTHOR "Petko Manolov <petkan@users.sourceforge.net>"
23#define DRIVER_DESC "rtl8150 based usb-ethernet driver"
24
25#define IDR 0x0120
26#define MAR 0x0126
27#define CR 0x012e
28#define TCR 0x012f
29#define RCR 0x0130
30#define TSR 0x0132
31#define RSR 0x0133
32#define CON0 0x0135
33#define CON1 0x0136
34#define MSR 0x0137
35#define PHYADD 0x0138
36#define PHYDAT 0x0139
37#define PHYCNT 0x013b
38#define GPPC 0x013d
39#define BMCR 0x0140
40#define BMSR 0x0142
41#define ANAR 0x0144
42#define ANLP 0x0146
43#define AER 0x0148
44#define CSCR 0x014C /* This one has the link status */
45#define CSCR_LINK_STATUS (1 << 3)
46
47#define IDR_EEPROM 0x1202
48
49#define PHY_READ 0
50#define PHY_WRITE 0x20
51#define PHY_GO 0x40
52
53#define MII_TIMEOUT 10
54#define INTBUFSIZE 8
55
56#define RTL8150_REQT_READ 0xc0
57#define RTL8150_REQT_WRITE 0x40
58#define RTL8150_REQ_GET_REGS 0x05
59#define RTL8150_REQ_SET_REGS 0x05
60
61
62/* Transmit status register errors */
63#define TSR_ECOL (1<<5)
64#define TSR_LCOL (1<<4)
65#define TSR_LOSS_CRS (1<<3)
66#define TSR_JBR (1<<2)
67#define TSR_ERRORS (TSR_ECOL | TSR_LCOL | TSR_LOSS_CRS | TSR_JBR)
68/* Receive status register errors */
69#define RSR_CRC (1<<2)
70#define RSR_FAE (1<<1)
71#define RSR_ERRORS (RSR_CRC | RSR_FAE)
72
73/* Media status register definitions */
74#define MSR_DUPLEX (1<<4)
75#define MSR_SPEED (1<<3)
76#define MSR_LINK (1<<2)
77
78/* Interrupt pipe data */
79#define INT_TSR 0x00
80#define INT_RSR 0x01
81#define INT_MSR 0x02
82#define INT_WAKSR 0x03
83#define INT_TXOK_CNT 0x04
84#define INT_RXLOST_CNT 0x05
85#define INT_CRERR_CNT 0x06
86#define INT_COL_CNT 0x07
87
88/* Transmit status register errors */
89#define TSR_ECOL (1<<5)
90#define TSR_LCOL (1<<4)
91#define TSR_LOSS_CRS (1<<3)
92#define TSR_JBR (1<<2)
93#define TSR_ERRORS (TSR_ECOL | TSR_LCOL | TSR_LOSS_CRS | TSR_JBR)
94/* Receive status register errors */
95#define RSR_CRC (1<<2)
96#define RSR_FAE (1<<1)
97#define RSR_ERRORS (RSR_CRC | RSR_FAE)
98
99/* Media status register definitions */
100#define MSR_DUPLEX (1<<4)
101#define MSR_SPEED (1<<3)
102#define MSR_LINK (1<<2)
103
104/* Interrupt pipe data */
105#define INT_TSR 0x00
106#define INT_RSR 0x01
107#define INT_MSR 0x02
108#define INT_WAKSR 0x03
109#define INT_TXOK_CNT 0x04
110#define INT_RXLOST_CNT 0x05
111#define INT_CRERR_CNT 0x06
112#define INT_COL_CNT 0x07
113
114
115#define RTL8150_MTU 1540
116#define RTL8150_TX_TIMEOUT (HZ)
117#define RX_SKB_POOL_SIZE 4
118
119/* rtl8150 flags */
120#define RTL8150_HW_CRC 0
121#define RX_REG_SET 1
122#define RTL8150_UNPLUG 2
123#define RX_URB_FAIL 3
124
125/* Define these values to match your device */
126#define VENDOR_ID_REALTEK 0x0bda
127#define VENDOR_ID_MELCO 0x0411
128#define VENDOR_ID_MICRONET 0x3980
129#define VENDOR_ID_LONGSHINE 0x07b8
130#define VENDOR_ID_OQO 0x1557
131#define VENDOR_ID_ZYXEL 0x0586
132
133#define PRODUCT_ID_RTL8150 0x8150
134#define PRODUCT_ID_LUAKTX 0x0012
135#define PRODUCT_ID_LCS8138TX 0x401a
136#define PRODUCT_ID_SP128AR 0x0003
137#define PRODUCT_ID_PRESTIGE 0x401a
138
139#undef EEPROM_WRITE
140
141/* table of devices that work with this driver */
142static struct usb_device_id rtl8150_table[] = {
143 {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8150)},
144 {USB_DEVICE(VENDOR_ID_MELCO, PRODUCT_ID_LUAKTX)},
145 {USB_DEVICE(VENDOR_ID_MICRONET, PRODUCT_ID_SP128AR)},
146 {USB_DEVICE(VENDOR_ID_LONGSHINE, PRODUCT_ID_LCS8138TX)},
147 {USB_DEVICE(VENDOR_ID_OQO, PRODUCT_ID_RTL8150)},
148 {USB_DEVICE(VENDOR_ID_ZYXEL, PRODUCT_ID_PRESTIGE)},
149 {}
150};
151
152MODULE_DEVICE_TABLE(usb, rtl8150_table);
153
154struct rtl8150 {
155 unsigned long flags;
156 struct usb_device *udev;
157 struct tasklet_struct tl;
158 struct net_device_stats stats;
159 struct net_device *netdev;
160 struct urb *rx_urb, *tx_urb, *intr_urb, *ctrl_urb;
161 struct sk_buff *tx_skb, *rx_skb;
162 struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE];
163 spinlock_t rx_pool_lock;
164 struct usb_ctrlrequest dr;
165 int intr_interval;
166 __le16 rx_creg;
167 u8 *intr_buff;
168 u8 phy;
169};
170
171typedef struct rtl8150 rtl8150_t;
172
173static void fill_skb_pool(rtl8150_t *);
174static void free_skb_pool(rtl8150_t *);
175static inline struct sk_buff *pull_skb(rtl8150_t *);
176static void rtl8150_disconnect(struct usb_interface *intf);
177static int rtl8150_probe(struct usb_interface *intf,
178 const struct usb_device_id *id);
179static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message);
180static int rtl8150_resume(struct usb_interface *intf);
181
182static const char driver_name [] = "rtl8150";
183
184static struct usb_driver rtl8150_driver = {
185 .name = driver_name,
186 .probe = rtl8150_probe,
187 .disconnect = rtl8150_disconnect,
188 .id_table = rtl8150_table,
189 .suspend = rtl8150_suspend,
190 .resume = rtl8150_resume
191};
192
193/*
194**
195** device related part of the code
196**
197*/
198static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
199{
200 return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
201 RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
202 indx, 0, data, size, 500);
203}
204
205static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
206{
207 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
208 RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
209 indx, 0, data, size, 500);
210}
211
212static void ctrl_callback(struct urb *urb)
213{
214 rtl8150_t *dev;
215
216 switch (urb->status) {
217 case 0:
218 break;
219 case -EINPROGRESS:
220 break;
221 case -ENOENT:
222 break;
223 default:
224 warn("ctrl urb status %d", urb->status);
225 }
226 dev = urb->context;
227 clear_bit(RX_REG_SET, &dev->flags);
228}
229
230static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size)
231{
232 int ret;
233
234 if (test_bit(RX_REG_SET, &dev->flags))
235 return -EAGAIN;
236
237 dev->dr.bRequestType = RTL8150_REQT_WRITE;
238 dev->dr.bRequest = RTL8150_REQ_SET_REGS;
239 dev->dr.wValue = cpu_to_le16(indx);
240 dev->dr.wIndex = 0;
241 dev->dr.wLength = cpu_to_le16(size);
242 dev->ctrl_urb->transfer_buffer_length = size;
243 usb_fill_control_urb(dev->ctrl_urb, dev->udev,
244 usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr,
245 &dev->rx_creg, size, ctrl_callback, dev);
246 if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) {
247 if (ret == -ENODEV)
248 netif_device_detach(dev->netdev);
249 err("control request submission failed: %d", ret);
250 } else
251 set_bit(RX_REG_SET, &dev->flags);
252
253 return ret;
254}
255
256static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg)
257{
258 int i;
259 u8 data[3], tmp;
260
261 data[0] = phy;
262 data[1] = data[2] = 0;
263 tmp = indx | PHY_READ | PHY_GO;
264 i = 0;
265
266 set_registers(dev, PHYADD, sizeof(data), data);
267 set_registers(dev, PHYCNT, 1, &tmp);
268 do {
269 get_registers(dev, PHYCNT, 1, data);
270 } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
271
272 if (i < MII_TIMEOUT) {
273 get_registers(dev, PHYDAT, 2, data);
274 *reg = data[0] | (data[1] << 8);
275 return 0;
276 } else
277 return 1;
278}
279
280static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg)
281{
282 int i;
283 u8 data[3], tmp;
284
285 data[0] = phy;
286 data[1] = reg & 0xff;
287 data[2] = (reg >> 8) & 0xff;
288 tmp = indx | PHY_WRITE | PHY_GO;
289 i = 0;
290
291 set_registers(dev, PHYADD, sizeof(data), data);
292 set_registers(dev, PHYCNT, 1, &tmp);
293 do {
294 get_registers(dev, PHYCNT, 1, data);
295 } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
296
297 if (i < MII_TIMEOUT)
298 return 0;
299 else
300 return 1;
301}
302
303static inline void set_ethernet_addr(rtl8150_t * dev)
304{
305 u8 node_id[6];
306
307 get_registers(dev, IDR, sizeof(node_id), node_id);
308 memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id));
309}
310
311static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
312{
313 struct sockaddr *addr = p;
314 rtl8150_t *dev = netdev_priv(netdev);
315 int i;
316
317 if (netif_running(netdev))
318 return -EBUSY;
319
320 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
321 dbg("%s: Setting MAC address to ", netdev->name);
322 for (i = 0; i < 5; i++)
323 dbg("%02X:", netdev->dev_addr[i]);
324 dbg("%02X\n", netdev->dev_addr[i]);
325 /* Set the IDR registers. */
326 set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr);
327#ifdef EEPROM_WRITE
328 {
329 u8 cr;
330 /* Get the CR contents. */
331 get_registers(dev, CR, 1, &cr);
332 /* Set the WEPROM bit (eeprom write enable). */
333 cr |= 0x20;
334 set_registers(dev, CR, 1, &cr);
335 /* Write the MAC address into eeprom. Eeprom writes must be word-sized,
336 so we need to split them up. */
337 for (i = 0; i * 2 < netdev->addr_len; i++) {
338 set_registers(dev, IDR_EEPROM + (i * 2), 2,
339 netdev->dev_addr + (i * 2));
340 }
341 /* Clear the WEPROM bit (preventing accidental eeprom writes). */
342 cr &= 0xdf;
343 set_registers(dev, CR, 1, &cr);
344 }
345#endif
346 return 0;
347}
348
349static int rtl8150_reset(rtl8150_t * dev)
350{
351 u8 data = 0x10;
352 int i = HZ;
353
354 set_registers(dev, CR, 1, &data);
355 do {
356 get_registers(dev, CR, 1, &data);
357 } while ((data & 0x10) && --i);
358
359 return (i > 0) ? 1 : 0;
360}
361
362static int alloc_all_urbs(rtl8150_t * dev)
363{
364 dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
365 if (!dev->rx_urb)
366 return 0;
367 dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
368 if (!dev->tx_urb) {
369 usb_free_urb(dev->rx_urb);
370 return 0;
371 }
372 dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
373 if (!dev->intr_urb) {
374 usb_free_urb(dev->rx_urb);
375 usb_free_urb(dev->tx_urb);
376 return 0;
377 }
378 dev->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
379 if (!dev->intr_urb) {
380 usb_free_urb(dev->rx_urb);
381 usb_free_urb(dev->tx_urb);
382 usb_free_urb(dev->intr_urb);
383 return 0;
384 }
385
386 return 1;
387}
388
389static void free_all_urbs(rtl8150_t * dev)
390{
391 usb_free_urb(dev->rx_urb);
392 usb_free_urb(dev->tx_urb);
393 usb_free_urb(dev->intr_urb);
394 usb_free_urb(dev->ctrl_urb);
395}
396
397static void unlink_all_urbs(rtl8150_t * dev)
398{
399 usb_kill_urb(dev->rx_urb);
400 usb_kill_urb(dev->tx_urb);
401 usb_kill_urb(dev->intr_urb);
402 usb_kill_urb(dev->ctrl_urb);
403}
404
405static inline struct sk_buff *pull_skb(rtl8150_t *dev)
406{
407 struct sk_buff *skb;
408 int i;
409
410 for (i = 0; i < RX_SKB_POOL_SIZE; i++) {
411 if (dev->rx_skb_pool[i]) {
412 skb = dev->rx_skb_pool[i];
413 dev->rx_skb_pool[i] = NULL;
414 return skb;
415 }
416 }
417 return NULL;
418}
419
420static void read_bulk_callback(struct urb *urb)
421{
422 rtl8150_t *dev;
423 unsigned pkt_len, res;
424 struct sk_buff *skb;
425 struct net_device *netdev;
426 u16 rx_stat;
427 int status;
428
429 dev = urb->context;
430 if (!dev)
431 return;
432 if (test_bit(RTL8150_UNPLUG, &dev->flags))
433 return;
434 netdev = dev->netdev;
435 if (!netif_device_present(netdev))
436 return;
437
438 switch (urb->status) {
439 case 0:
440 break;
441 case -ENOENT:
442 return; /* the urb is in unlink state */
443 case -ETIME:
444 warn("may be reset is needed?..");
445 goto goon;
446 default:
447 warn("Rx status %d", urb->status);
448 goto goon;
449 }
450
451 if (!dev->rx_skb)
452 goto resched;
453 /* protect against short packets (tell me why we got some?!?) */
454 if (urb->actual_length < 4)
455 goto goon;
456
457 res = urb->actual_length;
458 rx_stat = le16_to_cpu(*(__le16 *)(urb->transfer_buffer + res - 4));
459 pkt_len = res - 4;
460
461 skb_put(dev->rx_skb, pkt_len);
462 dev->rx_skb->protocol = eth_type_trans(dev->rx_skb, netdev);
463 netif_rx(dev->rx_skb);
464 dev->stats.rx_packets++;
465 dev->stats.rx_bytes += pkt_len;
466
467 spin_lock(&dev->rx_pool_lock);
468 skb = pull_skb(dev);
469 spin_unlock(&dev->rx_pool_lock);
470 if (!skb)
471 goto resched;
472
473 dev->rx_skb = skb;
474goon:
475 usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
476 dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev);
477 status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC);
478 if (status == -ENODEV)
479 netif_device_detach(dev->netdev);
480 else if (status) {
481 set_bit(RX_URB_FAIL, &dev->flags);
482 goto resched;
483 } else {
484 clear_bit(RX_URB_FAIL, &dev->flags);
485 }
486
487 return;
488resched:
489 tasklet_schedule(&dev->tl);
490}
491
492static void rx_fixup(unsigned long data)
493{
494 rtl8150_t *dev;
495 struct sk_buff *skb;
496 int status;
497
498 dev = (rtl8150_t *)data;
499
500 spin_lock_irq(&dev->rx_pool_lock);
501 fill_skb_pool(dev);
502 spin_unlock_irq(&dev->rx_pool_lock);
503 if (test_bit(RX_URB_FAIL, &dev->flags))
504 if (dev->rx_skb)
505 goto try_again;
506 spin_lock_irq(&dev->rx_pool_lock);
507 skb = pull_skb(dev);
508 spin_unlock_irq(&dev->rx_pool_lock);
509 if (skb == NULL)
510 goto tlsched;
511 dev->rx_skb = skb;
512 usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
513 dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev);
514try_again:
515 status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC);
516 if (status == -ENODEV) {
517 netif_device_detach(dev->netdev);
518 } else if (status) {
519 set_bit(RX_URB_FAIL, &dev->flags);
520 goto tlsched;
521 } else {
522 clear_bit(RX_URB_FAIL, &dev->flags);
523 }
524
525 return;
526tlsched:
527 tasklet_schedule(&dev->tl);
528}
529
530static void write_bulk_callback(struct urb *urb)
531{
532 rtl8150_t *dev;
533
534 dev = urb->context;
535 if (!dev)
536 return;
537 dev_kfree_skb_irq(dev->tx_skb);
538 if (!netif_device_present(dev->netdev))
539 return;
540 if (urb->status)
541 info("%s: Tx status %d", dev->netdev->name, urb->status);
542 dev->netdev->trans_start = jiffies;
543 netif_wake_queue(dev->netdev);
544}
545
546static void intr_callback(struct urb *urb)
547{
548 rtl8150_t *dev;
549 __u8 *d;
550 int status;
551
552 dev = urb->context;
553 if (!dev)
554 return;
555 switch (urb->status) {
556 case 0: /* success */
557 break;
558 case -ECONNRESET: /* unlink */
559 case -ENOENT:
560 case -ESHUTDOWN:
561 return;
562 /* -EPIPE: should clear the halt */
563 default:
564 info("%s: intr status %d", dev->netdev->name, urb->status);
565 goto resubmit;
566 }
567
568 d = urb->transfer_buffer;
569 if (d[0] & TSR_ERRORS) {
570 dev->stats.tx_errors++;
571 if (d[INT_TSR] & (TSR_ECOL | TSR_JBR))
572 dev->stats.tx_aborted_errors++;
573 if (d[INT_TSR] & TSR_LCOL)
574 dev->stats.tx_window_errors++;
575 if (d[INT_TSR] & TSR_LOSS_CRS)
576 dev->stats.tx_carrier_errors++;
577 }
578 /* Report link status changes to the network stack */
579 if ((d[INT_MSR] & MSR_LINK) == 0) {
580 if (netif_carrier_ok(dev->netdev)) {
581 netif_carrier_off(dev->netdev);
582 dbg("%s: LINK LOST\n", __func__);
583 }
584 } else {
585 if (!netif_carrier_ok(dev->netdev)) {
586 netif_carrier_on(dev->netdev);
587 dbg("%s: LINK CAME BACK\n", __func__);
588 }
589 }
590
591resubmit:
592 status = usb_submit_urb (urb, GFP_ATOMIC);
593 if (status == -ENODEV)
594 netif_device_detach(dev->netdev);
595 else if (status)
596 err ("can't resubmit intr, %s-%s/input0, status %d",
597 dev->udev->bus->bus_name,
598 dev->udev->devpath, status);
599}
600
601static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message)
602{
603 rtl8150_t *dev = usb_get_intfdata(intf);
604
605 netif_device_detach(dev->netdev);
606
607 if (netif_running(dev->netdev)) {
608 usb_kill_urb(dev->rx_urb);
609 usb_kill_urb(dev->intr_urb);
610 }
611 return 0;
612}
613
614static int rtl8150_resume(struct usb_interface *intf)
615{
616 rtl8150_t *dev = usb_get_intfdata(intf);
617
618 netif_device_attach(dev->netdev);
619 if (netif_running(dev->netdev)) {
620 dev->rx_urb->status = 0;
621 dev->rx_urb->actual_length = 0;
622 read_bulk_callback(dev->rx_urb);
623
624 dev->intr_urb->status = 0;
625 dev->intr_urb->actual_length = 0;
626 intr_callback(dev->intr_urb);
627 }
628 return 0;
629}
630
631/*
632**
633** network related part of the code
634**
635*/
636
637static void fill_skb_pool(rtl8150_t *dev)
638{
639 struct sk_buff *skb;
640 int i;
641
642 for (i = 0; i < RX_SKB_POOL_SIZE; i++) {
643 if (dev->rx_skb_pool[i])
644 continue;
645 skb = dev_alloc_skb(RTL8150_MTU + 2);
646 if (!skb) {
647 return;
648 }
649 skb_reserve(skb, 2);
650 dev->rx_skb_pool[i] = skb;
651 }
652}
653
654static void free_skb_pool(rtl8150_t *dev)
655{
656 int i;
657
658 for (i = 0; i < RX_SKB_POOL_SIZE; i++)
659 if (dev->rx_skb_pool[i])
660 dev_kfree_skb(dev->rx_skb_pool[i]);
661}
662
663static int enable_net_traffic(rtl8150_t * dev)
664{
665 u8 cr, tcr, rcr, msr;
666
667 if (!rtl8150_reset(dev)) {
668 warn("%s - device reset failed", __FUNCTION__);
669 }
670 /* RCR bit7=1 attach Rx info at the end; =0 HW CRC (which is broken) */
671 rcr = 0x9e;
672 dev->rx_creg = cpu_to_le16(rcr);
673 tcr = 0xd8;
674 cr = 0x0c;
675 if (!(rcr & 0x80))
676 set_bit(RTL8150_HW_CRC, &dev->flags);
677 set_registers(dev, RCR, 1, &rcr);
678 set_registers(dev, TCR, 1, &tcr);
679 set_registers(dev, CR, 1, &cr);
680 get_registers(dev, MSR, 1, &msr);
681
682 return 0;
683}
684
685static void disable_net_traffic(rtl8150_t * dev)
686{
687 u8 cr;
688
689 get_registers(dev, CR, 1, &cr);
690 cr &= 0xf3;
691 set_registers(dev, CR, 1, &cr);
692}
693
694static struct net_device_stats *rtl8150_netdev_stats(struct net_device *dev)
695{
696 return &((rtl8150_t *)netdev_priv(dev))->stats;
697}
698
699static void rtl8150_tx_timeout(struct net_device *netdev)
700{
701 rtl8150_t *dev = netdev_priv(netdev);
702 warn("%s: Tx timeout.", netdev->name);
703 usb_unlink_urb(dev->tx_urb);
704 dev->stats.tx_errors++;
705}
706
707static void rtl8150_set_multicast(struct net_device *netdev)
708{
709 rtl8150_t *dev = netdev_priv(netdev);
710 netif_stop_queue(netdev);
711 if (netdev->flags & IFF_PROMISC) {
712 dev->rx_creg |= cpu_to_le16(0x0001);
713 info("%s: promiscuous mode", netdev->name);
714 } else if (netdev->mc_count ||
715 (netdev->flags & IFF_ALLMULTI)) {
716 dev->rx_creg &= cpu_to_le16(0xfffe);
717 dev->rx_creg |= cpu_to_le16(0x0002);
718 info("%s: allmulti set", netdev->name);
719 } else {
720 /* ~RX_MULTICAST, ~RX_PROMISCUOUS */
721 dev->rx_creg &= cpu_to_le16(0x00fc);
722 }
723 async_set_registers(dev, RCR, 2);
724 netif_wake_queue(netdev);
725}
726
727static int rtl8150_start_xmit(struct sk_buff *skb, struct net_device *netdev)
728{
729 rtl8150_t *dev = netdev_priv(netdev);
730 int count, res;
731
732 netif_stop_queue(netdev);
733 count = (skb->len < 60) ? 60 : skb->len;
734 count = (count & 0x3f) ? count : count + 1;
735 dev->tx_skb = skb;
736 usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2),
737 skb->data, count, write_bulk_callback, dev);
738 if ((res = usb_submit_urb(dev->tx_urb, GFP_ATOMIC))) {
739 /* Can we get/handle EPIPE here? */
740 if (res == -ENODEV)
741 netif_device_detach(dev->netdev);
742 else {
743 warn("failed tx_urb %d\n", res);
744 dev->stats.tx_errors++;
745 netif_start_queue(netdev);
746 }
747 } else {
748 dev->stats.tx_packets++;
749 dev->stats.tx_bytes += skb->len;
750 netdev->trans_start = jiffies;
751 }
752
753 return 0;
754}
755
756
757static void set_carrier(struct net_device *netdev)
758{
759 rtl8150_t *dev = netdev_priv(netdev);
760 short tmp;
761
762 get_registers(dev, CSCR, 2, &tmp);
763 if (tmp & CSCR_LINK_STATUS)
764 netif_carrier_on(netdev);
765 else
766 netif_carrier_off(netdev);
767}
768
769static int rtl8150_open(struct net_device *netdev)
770{
771 rtl8150_t *dev = netdev_priv(netdev);
772 int res;
773
774 if (dev->rx_skb == NULL)
775 dev->rx_skb = pull_skb(dev);
776 if (!dev->rx_skb)
777 return -ENOMEM;
778
779 set_registers(dev, IDR, 6, netdev->dev_addr);
780
781 usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
782 dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev);
783 if ((res = usb_submit_urb(dev->rx_urb, GFP_KERNEL))) {
784 if (res == -ENODEV)
785 netif_device_detach(dev->netdev);
786 warn("%s: rx_urb submit failed: %d", __FUNCTION__, res);
787 return res;
788 }
789 usb_fill_int_urb(dev->intr_urb, dev->udev, usb_rcvintpipe(dev->udev, 3),
790 dev->intr_buff, INTBUFSIZE, intr_callback,
791 dev, dev->intr_interval);
792 if ((res = usb_submit_urb(dev->intr_urb, GFP_KERNEL))) {
793 if (res == -ENODEV)
794 netif_device_detach(dev->netdev);
795 warn("%s: intr_urb submit failed: %d", __FUNCTION__, res);
796 usb_kill_urb(dev->rx_urb);
797 return res;
798 }
799 enable_net_traffic(dev);
800 set_carrier(netdev);
801 netif_start_queue(netdev);
802
803 return res;
804}
805
806static int rtl8150_close(struct net_device *netdev)
807{
808 rtl8150_t *dev = netdev_priv(netdev);
809 int res = 0;
810
811 netif_stop_queue(netdev);
812 if (!test_bit(RTL8150_UNPLUG, &dev->flags))
813 disable_net_traffic(dev);
814 unlink_all_urbs(dev);
815
816 return res;
817}
818
819static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
820{
821 rtl8150_t *dev = netdev_priv(netdev);
822
823 strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
824 strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN);
825 usb_make_path(dev->udev, info->bus_info, sizeof info->bus_info);
826}
827
828static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
829{
830 rtl8150_t *dev = netdev_priv(netdev);
831 short lpa, bmcr;
832
833 ecmd->supported = (SUPPORTED_10baseT_Half |
834 SUPPORTED_10baseT_Full |
835 SUPPORTED_100baseT_Half |
836 SUPPORTED_100baseT_Full |
837 SUPPORTED_Autoneg |
838 SUPPORTED_TP | SUPPORTED_MII);
839 ecmd->port = PORT_TP;
840 ecmd->transceiver = XCVR_INTERNAL;
841 ecmd->phy_address = dev->phy;
842 get_registers(dev, BMCR, 2, &bmcr);
843 get_registers(dev, ANLP, 2, &lpa);
844 if (bmcr & BMCR_ANENABLE) {
845 ecmd->autoneg = AUTONEG_ENABLE;
846 ecmd->speed = (lpa & (LPA_100HALF | LPA_100FULL)) ?
847 SPEED_100 : SPEED_10;
848 if (ecmd->speed == SPEED_100)
849 ecmd->duplex = (lpa & LPA_100FULL) ?
850 DUPLEX_FULL : DUPLEX_HALF;
851 else
852 ecmd->duplex = (lpa & LPA_10FULL) ?
853 DUPLEX_FULL : DUPLEX_HALF;
854 } else {
855 ecmd->autoneg = AUTONEG_DISABLE;
856 ecmd->speed = (bmcr & BMCR_SPEED100) ?
857 SPEED_100 : SPEED_10;
858 ecmd->duplex = (bmcr & BMCR_FULLDPLX) ?
859 DUPLEX_FULL : DUPLEX_HALF;
860 }
861 return 0;
862}
863
864static struct ethtool_ops ops = {
865 .get_drvinfo = rtl8150_get_drvinfo,
866 .get_settings = rtl8150_get_settings,
867 .get_link = ethtool_op_get_link
868};
869
870static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
871{
872 rtl8150_t *dev = netdev_priv(netdev);
873 u16 *data = (u16 *) & rq->ifr_ifru;
874 int res = 0;
875
876 switch (cmd) {
877 case SIOCDEVPRIVATE:
878 data[0] = dev->phy;
879 case SIOCDEVPRIVATE + 1:
880 read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]);
881 break;
882 case SIOCDEVPRIVATE + 2:
883 if (!capable(CAP_NET_ADMIN))
884 return -EPERM;
885 write_mii_word(dev, dev->phy, (data[1] & 0x1f), data[2]);
886 break;
887 default:
888 res = -EOPNOTSUPP;
889 }
890
891 return res;
892}
893
894static int rtl8150_probe(struct usb_interface *intf,
895 const struct usb_device_id *id)
896{
897 struct usb_device *udev = interface_to_usbdev(intf);
898 rtl8150_t *dev;
899 struct net_device *netdev;
900
901 netdev = alloc_etherdev(sizeof(rtl8150_t));
902 if (!netdev) {
903 err("Out of memory");
904 return -ENOMEM;
905 }
906
907 dev = netdev_priv(netdev);
908 memset(dev, 0, sizeof(rtl8150_t));
909
910 dev->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL);
911 if (!dev->intr_buff) {
912 free_netdev(netdev);
913 return -ENOMEM;
914 }
915
916 tasklet_init(&dev->tl, rx_fixup, (unsigned long)dev);
917 spin_lock_init(&dev->rx_pool_lock);
918
919 dev->udev = udev;
920 dev->netdev = netdev;
921 SET_MODULE_OWNER(netdev);
922 netdev->open = rtl8150_open;
923 netdev->stop = rtl8150_close;
924 netdev->do_ioctl = rtl8150_ioctl;
925 netdev->watchdog_timeo = RTL8150_TX_TIMEOUT;
926 netdev->tx_timeout = rtl8150_tx_timeout;
927 netdev->hard_start_xmit = rtl8150_start_xmit;
928 netdev->set_multicast_list = rtl8150_set_multicast;
929 netdev->set_mac_address = rtl8150_set_mac_address;
930 netdev->get_stats = rtl8150_netdev_stats;
931 netdev->mtu = RTL8150_MTU;
932 SET_ETHTOOL_OPS(netdev, &ops);
933 dev->intr_interval = 100; /* 100ms */
934
935 if (!alloc_all_urbs(dev)) {
936 err("out of memory");
937 goto out;
938 }
939 if (!rtl8150_reset(dev)) {
940 err("couldn't reset the device");
941 goto out1;
942 }
943 fill_skb_pool(dev);
944 set_ethernet_addr(dev);
945
946 usb_set_intfdata(intf, dev);
947 SET_NETDEV_DEV(netdev, &intf->dev);
948 if (register_netdev(netdev) != 0) {
949 err("couldn't register the device");
950 goto out2;
951 }
952
953 info("%s: rtl8150 is detected", netdev->name);
954
955 return 0;
956
957out2:
958 usb_set_intfdata(intf, NULL);
959 free_skb_pool(dev);
960out1:
961 free_all_urbs(dev);
962out:
963 kfree(dev->intr_buff);
964 free_netdev(netdev);
965 return -EIO;
966}
967
968static void rtl8150_disconnect(struct usb_interface *intf)
969{
970 rtl8150_t *dev = usb_get_intfdata(intf);
971
972 usb_set_intfdata(intf, NULL);
973 if (dev) {
974 set_bit(RTL8150_UNPLUG, &dev->flags);
975 tasklet_disable(&dev->tl);
976 tasklet_kill(&dev->tl);
977 unregister_netdev(dev->netdev);
978 unlink_all_urbs(dev);
979 free_all_urbs(dev);
980 free_skb_pool(dev);
981 if (dev->rx_skb)
982 dev_kfree_skb(dev->rx_skb);
983 kfree(dev->intr_buff);
984 free_netdev(dev->netdev);
985 }
986}
987
988static int __init usb_rtl8150_init(void)
989{
990 info(DRIVER_DESC " " DRIVER_VERSION);
991 return usb_register(&rtl8150_driver);
992}
993
994static void __exit usb_rtl8150_exit(void)
995{
996 usb_deregister(&rtl8150_driver);
997}
998
999module_init(usb_rtl8150_init);
1000module_exit(usb_rtl8150_exit);
1001
1002MODULE_AUTHOR(DRIVER_AUTHOR);
1003MODULE_DESCRIPTION(DRIVER_DESC);
1004MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
new file mode 100644
index 000000000000..f9cd42d058b0
--- /dev/null
+++ b/drivers/net/usb/usbnet.c
@@ -0,0 +1,1304 @@
1/*
2 * USB Network driver infrastructure
3 * Copyright (C) 2000-2005 by David Brownell
4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * This is a generic "USB networking" framework that works with several
23 * kinds of full and high speed networking devices: host-to-host cables,
24 * smart usb peripherals, and actual Ethernet adapters.
25 *
26 * These devices usually differ in terms of control protocols (if they
27 * even have one!) and sometimes they define new framing to wrap or batch
28 * Ethernet packets. Otherwise, they talk to USB pretty much the same,
29 * so interface (un)binding, endpoint I/O queues, fault handling, and other
30 * issues can usefully be addressed by this framework.
31 */
32
33// #define DEBUG // error path messages, extra info
34// #define VERBOSE // more; success messages
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/ethtool.h>
41#include <linux/workqueue.h>
42#include <linux/mii.h>
43#include <linux/usb.h>
44
45#include "usbnet.h"
46
47#define DRIVER_VERSION "22-Aug-2005"
48
49
50/*-------------------------------------------------------------------------*/
51
52/*
53 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max.
54 * Several dozen bytes of IPv4 data can fit in two such transactions.
55 * One maximum size Ethernet packet takes twenty four of them.
56 * For high speed, each frame comfortably fits almost 36 max size
57 * Ethernet packets (so queues should be bigger).
58 *
59 * REVISIT qlens should be members of 'struct usbnet'; the goal is to
60 * let the USB host controller be busy for 5msec or more before an irq
61 * is required, under load. Jumbograms change the equation.
62 */
63#define RX_MAX_QUEUE_MEMORY (60 * 1518)
64#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
65 (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
66#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
67 (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
68
69// reawaken network queue this soon after stopping; else watchdog barks
70#define TX_TIMEOUT_JIFFIES (5*HZ)
71
72// throttle rx/tx briefly after some faults, so khubd might disconnect()
73// us (it polls at HZ/4 usually) before we report too many false errors.
74#define THROTTLE_JIFFIES (HZ/8)
75
76// between wakeups
77#define UNLINK_TIMEOUT_MS 3
78
79/*-------------------------------------------------------------------------*/
80
81// randomly generated ethernet address
82static u8 node_id [ETH_ALEN];
83
84static const char driver_name [] = "usbnet";
85
86/* use ethtool to change the level for any given device */
87static int msg_level = -1;
88module_param (msg_level, int, 0);
89MODULE_PARM_DESC (msg_level, "Override default message level");
90
91/*-------------------------------------------------------------------------*/
92
93/* handles CDC Ethernet and many other network "bulk data" interfaces */
94int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
95{
96 int tmp;
97 struct usb_host_interface *alt = NULL;
98 struct usb_host_endpoint *in = NULL, *out = NULL;
99 struct usb_host_endpoint *status = NULL;
100
101 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
102 unsigned ep;
103
104 in = out = status = NULL;
105 alt = intf->altsetting + tmp;
106
107 /* take the first altsetting with in-bulk + out-bulk;
108 * remember any status endpoint, just in case;
109 * ignore other endpoints and altsetttings.
110 */
111 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
112 struct usb_host_endpoint *e;
113 int intr = 0;
114
115 e = alt->endpoint + ep;
116 switch (e->desc.bmAttributes) {
117 case USB_ENDPOINT_XFER_INT:
118 if (!usb_endpoint_dir_in(&e->desc))
119 continue;
120 intr = 1;
121 /* FALLTHROUGH */
122 case USB_ENDPOINT_XFER_BULK:
123 break;
124 default:
125 continue;
126 }
127 if (usb_endpoint_dir_in(&e->desc)) {
128 if (!intr && !in)
129 in = e;
130 else if (intr && !status)
131 status = e;
132 } else {
133 if (!out)
134 out = e;
135 }
136 }
137 if (in && out)
138 break;
139 }
140 if (!alt || !in || !out)
141 return -EINVAL;
142
143 if (alt->desc.bAlternateSetting != 0
144 || !(dev->driver_info->flags & FLAG_NO_SETINT)) {
145 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
146 alt->desc.bAlternateSetting);
147 if (tmp < 0)
148 return tmp;
149 }
150
151 dev->in = usb_rcvbulkpipe (dev->udev,
152 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
153 dev->out = usb_sndbulkpipe (dev->udev,
154 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
155 dev->status = status;
156 return 0;
157}
158EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
159
160static void intr_complete (struct urb *urb);
161
162static int init_status (struct usbnet *dev, struct usb_interface *intf)
163{
164 char *buf = NULL;
165 unsigned pipe = 0;
166 unsigned maxp;
167 unsigned period;
168
169 if (!dev->driver_info->status)
170 return 0;
171
172 pipe = usb_rcvintpipe (dev->udev,
173 dev->status->desc.bEndpointAddress
174 & USB_ENDPOINT_NUMBER_MASK);
175 maxp = usb_maxpacket (dev->udev, pipe, 0);
176
177 /* avoid 1 msec chatter: min 8 msec poll rate */
178 period = max ((int) dev->status->desc.bInterval,
179 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
180
181 buf = kmalloc (maxp, GFP_KERNEL);
182 if (buf) {
183 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
184 if (!dev->interrupt) {
185 kfree (buf);
186 return -ENOMEM;
187 } else {
188 usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
189 buf, maxp, intr_complete, dev, period);
190 dev_dbg(&intf->dev,
191 "status ep%din, %d bytes period %d\n",
192 usb_pipeendpoint(pipe), maxp, period);
193 }
194 }
195 return 0;
196}
197
198/* Passes this packet up the stack, updating its accounting.
199 * Some link protocols batch packets, so their rx_fixup paths
200 * can return clones as well as just modify the original skb.
201 */
202void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
203{
204 int status;
205
206 skb->protocol = eth_type_trans (skb, dev->net);
207 dev->stats.rx_packets++;
208 dev->stats.rx_bytes += skb->len;
209
210 if (netif_msg_rx_status (dev))
211 devdbg (dev, "< rx, len %zu, type 0x%x",
212 skb->len + sizeof (struct ethhdr), skb->protocol);
213 memset (skb->cb, 0, sizeof (struct skb_data));
214 status = netif_rx (skb);
215 if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
216 devdbg (dev, "netif_rx status %d", status);
217}
218EXPORT_SYMBOL_GPL(usbnet_skb_return);
219
220
221/*-------------------------------------------------------------------------
222 *
223 * Network Device Driver (peer link to "Host Device", from USB host)
224 *
225 *-------------------------------------------------------------------------*/
226
227static int usbnet_change_mtu (struct net_device *net, int new_mtu)
228{
229 struct usbnet *dev = netdev_priv(net);
230 int ll_mtu = new_mtu + net->hard_header_len;
231 int old_hard_mtu = dev->hard_mtu;
232 int old_rx_urb_size = dev->rx_urb_size;
233
234 if (new_mtu <= 0)
235 return -EINVAL;
236 // no second zero-length packet read wanted after mtu-sized packets
237 if ((ll_mtu % dev->maxpacket) == 0)
238 return -EDOM;
239 net->mtu = new_mtu;
240
241 dev->hard_mtu = net->mtu + net->hard_header_len;
242 if (dev->rx_urb_size == old_hard_mtu) {
243 dev->rx_urb_size = dev->hard_mtu;
244 if (dev->rx_urb_size > old_rx_urb_size)
245 usbnet_unlink_rx_urbs(dev);
246 }
247
248 return 0;
249}
250
251/*-------------------------------------------------------------------------*/
252
253static struct net_device_stats *usbnet_get_stats (struct net_device *net)
254{
255 struct usbnet *dev = netdev_priv(net);
256 return &dev->stats;
257}
258
259/*-------------------------------------------------------------------------*/
260
261/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
262 * completion callbacks. 2.5 should have fixed those bugs...
263 */
264
265static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
266{
267 unsigned long flags;
268
269 spin_lock_irqsave(&list->lock, flags);
270 __skb_unlink(skb, list);
271 spin_unlock(&list->lock);
272 spin_lock(&dev->done.lock);
273 __skb_queue_tail(&dev->done, skb);
274 if (dev->done.qlen == 1)
275 tasklet_schedule(&dev->bh);
276 spin_unlock_irqrestore(&dev->done.lock, flags);
277}
278
279/* some work can't be done in tasklets, so we use keventd
280 *
281 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
282 * but tasklet_schedule() doesn't. hope the failure is rare.
283 */
284void usbnet_defer_kevent (struct usbnet *dev, int work)
285{
286 set_bit (work, &dev->flags);
287 if (!schedule_work (&dev->kevent))
288 deverr (dev, "kevent %d may have been dropped", work);
289 else
290 devdbg (dev, "kevent %d scheduled", work);
291}
292EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
293
294/*-------------------------------------------------------------------------*/
295
296static void rx_complete (struct urb *urb);
297
298static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
299{
300 struct sk_buff *skb;
301 struct skb_data *entry;
302 int retval = 0;
303 unsigned long lockflags;
304 size_t size = dev->rx_urb_size;
305
306 if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
307 if (netif_msg_rx_err (dev))
308 devdbg (dev, "no rx skb");
309 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
310 usb_free_urb (urb);
311 return;
312 }
313 skb_reserve (skb, NET_IP_ALIGN);
314
315 entry = (struct skb_data *) skb->cb;
316 entry->urb = urb;
317 entry->dev = dev;
318 entry->state = rx_start;
319 entry->length = 0;
320
321 usb_fill_bulk_urb (urb, dev->udev, dev->in,
322 skb->data, size, rx_complete, skb);
323
324 spin_lock_irqsave (&dev->rxq.lock, lockflags);
325
326 if (netif_running (dev->net)
327 && netif_device_present (dev->net)
328 && !test_bit (EVENT_RX_HALT, &dev->flags)) {
329 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){
330 case -EPIPE:
331 usbnet_defer_kevent (dev, EVENT_RX_HALT);
332 break;
333 case -ENOMEM:
334 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
335 break;
336 case -ENODEV:
337 if (netif_msg_ifdown (dev))
338 devdbg (dev, "device gone");
339 netif_device_detach (dev->net);
340 break;
341 default:
342 if (netif_msg_rx_err (dev))
343 devdbg (dev, "rx submit, %d", retval);
344 tasklet_schedule (&dev->bh);
345 break;
346 case 0:
347 __skb_queue_tail (&dev->rxq, skb);
348 }
349 } else {
350 if (netif_msg_ifdown (dev))
351 devdbg (dev, "rx: stopped");
352 retval = -ENOLINK;
353 }
354 spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
355 if (retval) {
356 dev_kfree_skb_any (skb);
357 usb_free_urb (urb);
358 }
359}
360
361
362/*-------------------------------------------------------------------------*/
363
364static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
365{
366 if (dev->driver_info->rx_fixup
367 && !dev->driver_info->rx_fixup (dev, skb))
368 goto error;
369 // else network stack removes extra byte if we forced a short packet
370
371 if (skb->len)
372 usbnet_skb_return (dev, skb);
373 else {
374 if (netif_msg_rx_err (dev))
375 devdbg (dev, "drop");
376error:
377 dev->stats.rx_errors++;
378 skb_queue_tail (&dev->done, skb);
379 }
380}
381
382/*-------------------------------------------------------------------------*/
383
384static void rx_complete (struct urb *urb)
385{
386 struct sk_buff *skb = (struct sk_buff *) urb->context;
387 struct skb_data *entry = (struct skb_data *) skb->cb;
388 struct usbnet *dev = entry->dev;
389 int urb_status = urb->status;
390
391 skb_put (skb, urb->actual_length);
392 entry->state = rx_done;
393 entry->urb = NULL;
394
395 switch (urb_status) {
396 // success
397 case 0:
398 if (skb->len < dev->net->hard_header_len) {
399 entry->state = rx_cleanup;
400 dev->stats.rx_errors++;
401 dev->stats.rx_length_errors++;
402 if (netif_msg_rx_err (dev))
403 devdbg (dev, "rx length %d", skb->len);
404 }
405 break;
406
407 // stalls need manual reset. this is rare ... except that
408 // when going through USB 2.0 TTs, unplug appears this way.
409 // we avoid the highspeed version of the ETIMEOUT/EILSEQ
410 // storm, recovering as needed.
411 case -EPIPE:
412 dev->stats.rx_errors++;
413 usbnet_defer_kevent (dev, EVENT_RX_HALT);
414 // FALLTHROUGH
415
416 // software-driven interface shutdown
417 case -ECONNRESET: // async unlink
418 case -ESHUTDOWN: // hardware gone
419 if (netif_msg_ifdown (dev))
420 devdbg (dev, "rx shutdown, code %d", urb_status);
421 goto block;
422
423 // we get controller i/o faults during khubd disconnect() delays.
424 // throttle down resubmits, to avoid log floods; just temporarily,
425 // so we still recover when the fault isn't a khubd delay.
426 case -EPROTO:
427 case -ETIME:
428 case -EILSEQ:
429 dev->stats.rx_errors++;
430 if (!timer_pending (&dev->delay)) {
431 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
432 if (netif_msg_link (dev))
433 devdbg (dev, "rx throttle %d", urb_status);
434 }
435block:
436 entry->state = rx_cleanup;
437 entry->urb = urb;
438 urb = NULL;
439 break;
440
441 // data overrun ... flush fifo?
442 case -EOVERFLOW:
443 dev->stats.rx_over_errors++;
444 // FALLTHROUGH
445
446 default:
447 entry->state = rx_cleanup;
448 dev->stats.rx_errors++;
449 if (netif_msg_rx_err (dev))
450 devdbg (dev, "rx status %d", urb_status);
451 break;
452 }
453
454 defer_bh(dev, skb, &dev->rxq);
455
456 if (urb) {
457 if (netif_running (dev->net)
458 && !test_bit (EVENT_RX_HALT, &dev->flags)) {
459 rx_submit (dev, urb, GFP_ATOMIC);
460 return;
461 }
462 usb_free_urb (urb);
463 }
464 if (netif_msg_rx_err (dev))
465 devdbg (dev, "no read resubmitted");
466}
467
468static void intr_complete (struct urb *urb)
469{
470 struct usbnet *dev = urb->context;
471 int status = urb->status;
472
473 switch (status) {
474 /* success */
475 case 0:
476 dev->driver_info->status(dev, urb);
477 break;
478
479 /* software-driven interface shutdown */
480 case -ENOENT: // urb killed
481 case -ESHUTDOWN: // hardware gone
482 if (netif_msg_ifdown (dev))
483 devdbg (dev, "intr shutdown, code %d", status);
484 return;
485
486 /* NOTE: not throttling like RX/TX, since this endpoint
487 * already polls infrequently
488 */
489 default:
490 devdbg (dev, "intr status %d", status);
491 break;
492 }
493
494 if (!netif_running (dev->net))
495 return;
496
497 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
498 status = usb_submit_urb (urb, GFP_ATOMIC);
499 if (status != 0 && netif_msg_timer (dev))
500 deverr(dev, "intr resubmit --> %d", status);
501}
502
503/*-------------------------------------------------------------------------*/
504
505// unlink pending rx/tx; completion handlers do all other cleanup
506
507static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
508{
509 unsigned long flags;
510 struct sk_buff *skb, *skbnext;
511 int count = 0;
512
513 spin_lock_irqsave (&q->lock, flags);
514 for (skb = q->next; skb != (struct sk_buff *) q; skb = skbnext) {
515 struct skb_data *entry;
516 struct urb *urb;
517 int retval;
518
519 entry = (struct skb_data *) skb->cb;
520 urb = entry->urb;
521 skbnext = skb->next;
522
523 // during some PM-driven resume scenarios,
524 // these (async) unlinks complete immediately
525 retval = usb_unlink_urb (urb);
526 if (retval != -EINPROGRESS && retval != 0)
527 devdbg (dev, "unlink urb err, %d", retval);
528 else
529 count++;
530 }
531 spin_unlock_irqrestore (&q->lock, flags);
532 return count;
533}
534
535// Flush all pending rx urbs
536// minidrivers may need to do this when the MTU changes
537
538void usbnet_unlink_rx_urbs(struct usbnet *dev)
539{
540 if (netif_running(dev->net)) {
541 (void) unlink_urbs (dev, &dev->rxq);
542 tasklet_schedule(&dev->bh);
543 }
544}
545EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
546
547/*-------------------------------------------------------------------------*/
548
549// precondition: never called in_interrupt
550
551static int usbnet_stop (struct net_device *net)
552{
553 struct usbnet *dev = netdev_priv(net);
554 int temp;
555 DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup);
556 DECLARE_WAITQUEUE (wait, current);
557
558 netif_stop_queue (net);
559
560 if (netif_msg_ifdown (dev))
561 devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
562 dev->stats.rx_packets, dev->stats.tx_packets,
563 dev->stats.rx_errors, dev->stats.tx_errors
564 );
565
566 // ensure there are no more active urbs
567 add_wait_queue (&unlink_wakeup, &wait);
568 dev->wait = &unlink_wakeup;
569 temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq);
570
571 // maybe wait for deletions to finish.
572 while (!skb_queue_empty(&dev->rxq) &&
573 !skb_queue_empty(&dev->txq) &&
574 !skb_queue_empty(&dev->done)) {
575 msleep(UNLINK_TIMEOUT_MS);
576 if (netif_msg_ifdown (dev))
577 devdbg (dev, "waited for %d urb completions", temp);
578 }
579 dev->wait = NULL;
580 remove_wait_queue (&unlink_wakeup, &wait);
581
582 usb_kill_urb(dev->interrupt);
583
584 /* deferred work (task, timer, softirq) must also stop.
585 * can't flush_scheduled_work() until we drop rtnl (later),
586 * else workers could deadlock; so make workers a NOP.
587 */
588 dev->flags = 0;
589 del_timer_sync (&dev->delay);
590 tasklet_kill (&dev->bh);
591
592 return 0;
593}
594
595/*-------------------------------------------------------------------------*/
596
597// posts reads, and enables write queuing
598
599// precondition: never called in_interrupt
600
601static int usbnet_open (struct net_device *net)
602{
603 struct usbnet *dev = netdev_priv(net);
604 int retval = 0;
605 struct driver_info *info = dev->driver_info;
606
607 // put into "known safe" state
608 if (info->reset && (retval = info->reset (dev)) < 0) {
609 if (netif_msg_ifup (dev))
610 devinfo (dev,
611 "open reset fail (%d) usbnet usb-%s-%s, %s",
612 retval,
613 dev->udev->bus->bus_name, dev->udev->devpath,
614 info->description);
615 goto done;
616 }
617
618 // insist peer be connected
619 if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
620 if (netif_msg_ifup (dev))
621 devdbg (dev, "can't open; %d", retval);
622 goto done;
623 }
624
625 /* start any status interrupt transfer */
626 if (dev->interrupt) {
627 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
628 if (retval < 0) {
629 if (netif_msg_ifup (dev))
630 deverr (dev, "intr submit %d", retval);
631 goto done;
632 }
633 }
634
635 netif_start_queue (net);
636 if (netif_msg_ifup (dev)) {
637 char *framing;
638
639 if (dev->driver_info->flags & FLAG_FRAMING_NC)
640 framing = "NetChip";
641 else if (dev->driver_info->flags & FLAG_FRAMING_GL)
642 framing = "GeneSys";
643 else if (dev->driver_info->flags & FLAG_FRAMING_Z)
644 framing = "Zaurus";
645 else if (dev->driver_info->flags & FLAG_FRAMING_RN)
646 framing = "RNDIS";
647 else if (dev->driver_info->flags & FLAG_FRAMING_AX)
648 framing = "ASIX";
649 else
650 framing = "simple";
651
652 devinfo (dev, "open: enable queueing "
653 "(rx %d, tx %d) mtu %d %s framing",
654 (int)RX_QLEN (dev), (int)TX_QLEN (dev), dev->net->mtu,
655 framing);
656 }
657
658 // delay posting reads until we're fully open
659 tasklet_schedule (&dev->bh);
660done:
661 return retval;
662}
663
664/*-------------------------------------------------------------------------*/
665
666/* ethtool methods; minidrivers may need to add some more, but
667 * they'll probably want to use this base set.
668 */
669
670#if defined(CONFIG_MII) || defined(CONFIG_MII_MODULE)
671#define HAVE_MII
672
673int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd)
674{
675 struct usbnet *dev = netdev_priv(net);
676
677 if (!dev->mii.mdio_read)
678 return -EOPNOTSUPP;
679
680 return mii_ethtool_gset(&dev->mii, cmd);
681}
682EXPORT_SYMBOL_GPL(usbnet_get_settings);
683
684int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
685{
686 struct usbnet *dev = netdev_priv(net);
687 int retval;
688
689 if (!dev->mii.mdio_write)
690 return -EOPNOTSUPP;
691
692 retval = mii_ethtool_sset(&dev->mii, cmd);
693
694 /* link speed/duplex might have changed */
695 if (dev->driver_info->link_reset)
696 dev->driver_info->link_reset(dev);
697
698 return retval;
699
700}
701EXPORT_SYMBOL_GPL(usbnet_set_settings);
702
703u32 usbnet_get_link (struct net_device *net)
704{
705 struct usbnet *dev = netdev_priv(net);
706
707 /* If a check_connect is defined, return its result */
708 if (dev->driver_info->check_connect)
709 return dev->driver_info->check_connect (dev) == 0;
710
711 /* if the device has mii operations, use those */
712 if (dev->mii.mdio_read)
713 return mii_link_ok(&dev->mii);
714
715 /* Otherwise, say we're up (to avoid breaking scripts) */
716 return 1;
717}
718EXPORT_SYMBOL_GPL(usbnet_get_link);
719
720int usbnet_nway_reset(struct net_device *net)
721{
722 struct usbnet *dev = netdev_priv(net);
723
724 if (!dev->mii.mdio_write)
725 return -EOPNOTSUPP;
726
727 return mii_nway_restart(&dev->mii);
728}
729EXPORT_SYMBOL_GPL(usbnet_nway_reset);
730
731#endif /* HAVE_MII */
732
733void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
734{
735 struct usbnet *dev = netdev_priv(net);
736
737 strncpy (info->driver, dev->driver_name, sizeof info->driver);
738 strncpy (info->version, DRIVER_VERSION, sizeof info->version);
739 strncpy (info->fw_version, dev->driver_info->description,
740 sizeof info->fw_version);
741 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
742}
743EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
744
745u32 usbnet_get_msglevel (struct net_device *net)
746{
747 struct usbnet *dev = netdev_priv(net);
748
749 return dev->msg_enable;
750}
751EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
752
753void usbnet_set_msglevel (struct net_device *net, u32 level)
754{
755 struct usbnet *dev = netdev_priv(net);
756
757 dev->msg_enable = level;
758}
759EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
760
761/* drivers may override default ethtool_ops in their bind() routine */
762static struct ethtool_ops usbnet_ethtool_ops = {
763#ifdef HAVE_MII
764 .get_settings = usbnet_get_settings,
765 .set_settings = usbnet_set_settings,
766 .get_link = usbnet_get_link,
767 .nway_reset = usbnet_nway_reset,
768#endif
769 .get_drvinfo = usbnet_get_drvinfo,
770 .get_msglevel = usbnet_get_msglevel,
771 .set_msglevel = usbnet_set_msglevel,
772};
773
774/*-------------------------------------------------------------------------*/
775
776/* work that cannot be done in interrupt context uses keventd.
777 *
778 * NOTE: with 2.5 we could do more of this using completion callbacks,
779 * especially now that control transfers can be queued.
780 */
781static void
782kevent (struct work_struct *work)
783{
784 struct usbnet *dev =
785 container_of(work, struct usbnet, kevent);
786 int status;
787
788 /* usb_clear_halt() needs a thread context */
789 if (test_bit (EVENT_TX_HALT, &dev->flags)) {
790 unlink_urbs (dev, &dev->txq);
791 status = usb_clear_halt (dev->udev, dev->out);
792 if (status < 0
793 && status != -EPIPE
794 && status != -ESHUTDOWN) {
795 if (netif_msg_tx_err (dev))
796 deverr (dev, "can't clear tx halt, status %d",
797 status);
798 } else {
799 clear_bit (EVENT_TX_HALT, &dev->flags);
800 if (status != -ESHUTDOWN)
801 netif_wake_queue (dev->net);
802 }
803 }
804 if (test_bit (EVENT_RX_HALT, &dev->flags)) {
805 unlink_urbs (dev, &dev->rxq);
806 status = usb_clear_halt (dev->udev, dev->in);
807 if (status < 0
808 && status != -EPIPE
809 && status != -ESHUTDOWN) {
810 if (netif_msg_rx_err (dev))
811 deverr (dev, "can't clear rx halt, status %d",
812 status);
813 } else {
814 clear_bit (EVENT_RX_HALT, &dev->flags);
815 tasklet_schedule (&dev->bh);
816 }
817 }
818
819 /* tasklet could resubmit itself forever if memory is tight */
820 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
821 struct urb *urb = NULL;
822
823 if (netif_running (dev->net))
824 urb = usb_alloc_urb (0, GFP_KERNEL);
825 else
826 clear_bit (EVENT_RX_MEMORY, &dev->flags);
827 if (urb != NULL) {
828 clear_bit (EVENT_RX_MEMORY, &dev->flags);
829 rx_submit (dev, urb, GFP_KERNEL);
830 tasklet_schedule (&dev->bh);
831 }
832 }
833
834 if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
835 struct driver_info *info = dev->driver_info;
836 int retval = 0;
837
838 clear_bit (EVENT_LINK_RESET, &dev->flags);
839 if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
840 devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
841 retval,
842 dev->udev->bus->bus_name, dev->udev->devpath,
843 info->description);
844 }
845 }
846
847 if (dev->flags)
848 devdbg (dev, "kevent done, flags = 0x%lx",
849 dev->flags);
850}
851
852/*-------------------------------------------------------------------------*/
853
854static void tx_complete (struct urb *urb)
855{
856 struct sk_buff *skb = (struct sk_buff *) urb->context;
857 struct skb_data *entry = (struct skb_data *) skb->cb;
858 struct usbnet *dev = entry->dev;
859
860 if (urb->status == 0) {
861 dev->stats.tx_packets++;
862 dev->stats.tx_bytes += entry->length;
863 } else {
864 dev->stats.tx_errors++;
865
866 switch (urb->status) {
867 case -EPIPE:
868 usbnet_defer_kevent (dev, EVENT_TX_HALT);
869 break;
870
871 /* software-driven interface shutdown */
872 case -ECONNRESET: // async unlink
873 case -ESHUTDOWN: // hardware gone
874 break;
875
876 // like rx, tx gets controller i/o faults during khubd delays
877 // and so it uses the same throttling mechanism.
878 case -EPROTO:
879 case -ETIME:
880 case -EILSEQ:
881 if (!timer_pending (&dev->delay)) {
882 mod_timer (&dev->delay,
883 jiffies + THROTTLE_JIFFIES);
884 if (netif_msg_link (dev))
885 devdbg (dev, "tx throttle %d",
886 urb->status);
887 }
888 netif_stop_queue (dev->net);
889 break;
890 default:
891 if (netif_msg_tx_err (dev))
892 devdbg (dev, "tx err %d", entry->urb->status);
893 break;
894 }
895 }
896
897 urb->dev = NULL;
898 entry->state = tx_done;
899 defer_bh(dev, skb, &dev->txq);
900}
901
902/*-------------------------------------------------------------------------*/
903
904static void usbnet_tx_timeout (struct net_device *net)
905{
906 struct usbnet *dev = netdev_priv(net);
907
908 unlink_urbs (dev, &dev->txq);
909 tasklet_schedule (&dev->bh);
910
911 // FIXME: device recovery -- reset?
912}
913
914/*-------------------------------------------------------------------------*/
915
916static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
917{
918 struct usbnet *dev = netdev_priv(net);
919 int length;
920 int retval = NET_XMIT_SUCCESS;
921 struct urb *urb = NULL;
922 struct skb_data *entry;
923 struct driver_info *info = dev->driver_info;
924 unsigned long flags;
925
926 // some devices want funky USB-level framing, for
927 // win32 driver (usually) and/or hardware quirks
928 if (info->tx_fixup) {
929 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
930 if (!skb) {
931 if (netif_msg_tx_err (dev))
932 devdbg (dev, "can't tx_fixup skb");
933 goto drop;
934 }
935 }
936 length = skb->len;
937
938 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
939 if (netif_msg_tx_err (dev))
940 devdbg (dev, "no urb");
941 goto drop;
942 }
943
944 entry = (struct skb_data *) skb->cb;
945 entry->urb = urb;
946 entry->dev = dev;
947 entry->state = tx_start;
948 entry->length = length;
949
950 usb_fill_bulk_urb (urb, dev->udev, dev->out,
951 skb->data, skb->len, tx_complete, skb);
952
953 /* don't assume the hardware handles USB_ZERO_PACKET
954 * NOTE: strictly conforming cdc-ether devices should expect
955 * the ZLP here, but ignore the one-byte packet.
956 *
957 * FIXME zero that byte, if it doesn't require a new skb.
958 */
959 if ((length % dev->maxpacket) == 0)
960 urb->transfer_buffer_length++;
961
962 spin_lock_irqsave (&dev->txq.lock, flags);
963
964 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
965 case -EPIPE:
966 netif_stop_queue (net);
967 usbnet_defer_kevent (dev, EVENT_TX_HALT);
968 break;
969 default:
970 if (netif_msg_tx_err (dev))
971 devdbg (dev, "tx: submit urb err %d", retval);
972 break;
973 case 0:
974 net->trans_start = jiffies;
975 __skb_queue_tail (&dev->txq, skb);
976 if (dev->txq.qlen >= TX_QLEN (dev))
977 netif_stop_queue (net);
978 }
979 spin_unlock_irqrestore (&dev->txq.lock, flags);
980
981 if (retval) {
982 if (netif_msg_tx_err (dev))
983 devdbg (dev, "drop, code %d", retval);
984drop:
985 retval = NET_XMIT_SUCCESS;
986 dev->stats.tx_dropped++;
987 if (skb)
988 dev_kfree_skb_any (skb);
989 usb_free_urb (urb);
990 } else if (netif_msg_tx_queued (dev)) {
991 devdbg (dev, "> tx, len %d, type 0x%x",
992 length, skb->protocol);
993 }
994 return retval;
995}
996
997
998/*-------------------------------------------------------------------------*/
999
1000// tasklet (work deferred from completions, in_irq) or timer
1001
1002static void usbnet_bh (unsigned long param)
1003{
1004 struct usbnet *dev = (struct usbnet *) param;
1005 struct sk_buff *skb;
1006 struct skb_data *entry;
1007
1008 while ((skb = skb_dequeue (&dev->done))) {
1009 entry = (struct skb_data *) skb->cb;
1010 switch (entry->state) {
1011 case rx_done:
1012 entry->state = rx_cleanup;
1013 rx_process (dev, skb);
1014 continue;
1015 case tx_done:
1016 case rx_cleanup:
1017 usb_free_urb (entry->urb);
1018 dev_kfree_skb (skb);
1019 continue;
1020 default:
1021 devdbg (dev, "bogus skb state %d", entry->state);
1022 }
1023 }
1024
1025 // waiting for all pending urbs to complete?
1026 if (dev->wait) {
1027 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
1028 wake_up (dev->wait);
1029 }
1030
1031 // or are we maybe short a few urbs?
1032 } else if (netif_running (dev->net)
1033 && netif_device_present (dev->net)
1034 && !timer_pending (&dev->delay)
1035 && !test_bit (EVENT_RX_HALT, &dev->flags)) {
1036 int temp = dev->rxq.qlen;
1037 int qlen = RX_QLEN (dev);
1038
1039 if (temp < qlen) {
1040 struct urb *urb;
1041 int i;
1042
1043 // don't refill the queue all at once
1044 for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
1045 urb = usb_alloc_urb (0, GFP_ATOMIC);
1046 if (urb != NULL)
1047 rx_submit (dev, urb, GFP_ATOMIC);
1048 }
1049 if (temp != dev->rxq.qlen && netif_msg_link (dev))
1050 devdbg (dev, "rxqlen %d --> %d",
1051 temp, dev->rxq.qlen);
1052 if (dev->rxq.qlen < qlen)
1053 tasklet_schedule (&dev->bh);
1054 }
1055 if (dev->txq.qlen < TX_QLEN (dev))
1056 netif_wake_queue (dev->net);
1057 }
1058}
1059
1060
1061
1062/*-------------------------------------------------------------------------
1063 *
1064 * USB Device Driver support
1065 *
1066 *-------------------------------------------------------------------------*/
1067
1068// precondition: never called in_interrupt
1069
1070void usbnet_disconnect (struct usb_interface *intf)
1071{
1072 struct usbnet *dev;
1073 struct usb_device *xdev;
1074 struct net_device *net;
1075
1076 dev = usb_get_intfdata(intf);
1077 usb_set_intfdata(intf, NULL);
1078 if (!dev)
1079 return;
1080
1081 xdev = interface_to_usbdev (intf);
1082
1083 if (netif_msg_probe (dev))
1084 devinfo (dev, "unregister '%s' usb-%s-%s, %s",
1085 intf->dev.driver->name,
1086 xdev->bus->bus_name, xdev->devpath,
1087 dev->driver_info->description);
1088
1089 net = dev->net;
1090 unregister_netdev (net);
1091
1092 /* we don't hold rtnl here ... */
1093 flush_scheduled_work ();
1094
1095 if (dev->driver_info->unbind)
1096 dev->driver_info->unbind (dev, intf);
1097
1098 free_netdev(net);
1099 usb_put_dev (xdev);
1100}
1101EXPORT_SYMBOL_GPL(usbnet_disconnect);
1102
1103
1104/*-------------------------------------------------------------------------*/
1105
1106// precondition: never called in_interrupt
1107
1108int
1109usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1110{
1111 struct usbnet *dev;
1112 struct net_device *net;
1113 struct usb_host_interface *interface;
1114 struct driver_info *info;
1115 struct usb_device *xdev;
1116 int status;
1117 const char *name;
1118
1119 name = udev->dev.driver->name;
1120 info = (struct driver_info *) prod->driver_info;
1121 if (!info) {
1122 dev_dbg (&udev->dev, "blacklisted by %s\n", name);
1123 return -ENODEV;
1124 }
1125 xdev = interface_to_usbdev (udev);
1126 interface = udev->cur_altsetting;
1127
1128 usb_get_dev (xdev);
1129
1130 status = -ENOMEM;
1131
1132 // set up our own records
1133 net = alloc_etherdev(sizeof(*dev));
1134 if (!net) {
1135 dbg ("can't kmalloc dev");
1136 goto out;
1137 }
1138
1139 dev = netdev_priv(net);
1140 dev->udev = xdev;
1141 dev->driver_info = info;
1142 dev->driver_name = name;
1143 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1144 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1145 skb_queue_head_init (&dev->rxq);
1146 skb_queue_head_init (&dev->txq);
1147 skb_queue_head_init (&dev->done);
1148 dev->bh.func = usbnet_bh;
1149 dev->bh.data = (unsigned long) dev;
1150 INIT_WORK (&dev->kevent, kevent);
1151 dev->delay.function = usbnet_bh;
1152 dev->delay.data = (unsigned long) dev;
1153 init_timer (&dev->delay);
1154 mutex_init (&dev->phy_mutex);
1155
1156 SET_MODULE_OWNER (net);
1157 dev->net = net;
1158 strcpy (net->name, "usb%d");
1159 memcpy (net->dev_addr, node_id, sizeof node_id);
1160
1161 /* rx and tx sides can use different message sizes;
1162 * bind() should set rx_urb_size in that case.
1163 */
1164 dev->hard_mtu = net->mtu + net->hard_header_len;
1165#if 0
1166// dma_supported() is deeply broken on almost all architectures
1167 // possible with some EHCI controllers
1168 if (dma_supported (&udev->dev, DMA_64BIT_MASK))
1169 net->features |= NETIF_F_HIGHDMA;
1170#endif
1171
1172 net->change_mtu = usbnet_change_mtu;
1173 net->get_stats = usbnet_get_stats;
1174 net->hard_start_xmit = usbnet_start_xmit;
1175 net->open = usbnet_open;
1176 net->stop = usbnet_stop;
1177 net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
1178 net->tx_timeout = usbnet_tx_timeout;
1179 net->ethtool_ops = &usbnet_ethtool_ops;
1180
1181 // allow device-specific bind/init procedures
1182 // NOTE net->name still not usable ...
1183 if (info->bind) {
1184 status = info->bind (dev, udev);
1185 if (status < 0)
1186 goto out1;
1187
1188 // heuristic: "usb%d" for links we know are two-host,
1189 // else "eth%d" when there's reasonable doubt. userspace
1190 // can rename the link if it knows better.
1191 if ((dev->driver_info->flags & FLAG_ETHER) != 0
1192 && (net->dev_addr [0] & 0x02) == 0)
1193 strcpy (net->name, "eth%d");
1194
1195 /* maybe the remote can't receive an Ethernet MTU */
1196 if (net->mtu > (dev->hard_mtu - net->hard_header_len))
1197 net->mtu = dev->hard_mtu - net->hard_header_len;
1198 } else if (!info->in || !info->out)
1199 status = usbnet_get_endpoints (dev, udev);
1200 else {
1201 dev->in = usb_rcvbulkpipe (xdev, info->in);
1202 dev->out = usb_sndbulkpipe (xdev, info->out);
1203 if (!(info->flags & FLAG_NO_SETINT))
1204 status = usb_set_interface (xdev,
1205 interface->desc.bInterfaceNumber,
1206 interface->desc.bAlternateSetting);
1207 else
1208 status = 0;
1209
1210 }
1211 if (status == 0 && dev->status)
1212 status = init_status (dev, udev);
1213 if (status < 0)
1214 goto out3;
1215
1216 if (!dev->rx_urb_size)
1217 dev->rx_urb_size = dev->hard_mtu;
1218 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1219
1220 SET_NETDEV_DEV(net, &udev->dev);
1221 status = register_netdev (net);
1222 if (status)
1223 goto out3;
1224 if (netif_msg_probe (dev))
1225 devinfo (dev, "register '%s' at usb-%s-%s, %s, "
1226 "%02x:%02x:%02x:%02x:%02x:%02x",
1227 udev->dev.driver->name,
1228 xdev->bus->bus_name, xdev->devpath,
1229 dev->driver_info->description,
1230 net->dev_addr [0], net->dev_addr [1],
1231 net->dev_addr [2], net->dev_addr [3],
1232 net->dev_addr [4], net->dev_addr [5]);
1233
1234 // ok, it's ready to go.
1235 usb_set_intfdata (udev, dev);
1236
1237 // start as if the link is up
1238 netif_device_attach (net);
1239
1240 return 0;
1241
1242out3:
1243 if (info->unbind)
1244 info->unbind (dev, udev);
1245out1:
1246 free_netdev(net);
1247out:
1248 usb_put_dev(xdev);
1249 return status;
1250}
1251EXPORT_SYMBOL_GPL(usbnet_probe);
1252
1253/*-------------------------------------------------------------------------*/
1254
1255/* FIXME these suspend/resume methods assume non-CDC style
1256 * devices, with only one interface.
1257 */
1258
1259int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
1260{
1261 struct usbnet *dev = usb_get_intfdata(intf);
1262
1263 /* accelerate emptying of the rx and queues, to avoid
1264 * having everything error out.
1265 */
1266 netif_device_detach (dev->net);
1267 (void) unlink_urbs (dev, &dev->rxq);
1268 (void) unlink_urbs (dev, &dev->txq);
1269 return 0;
1270}
1271EXPORT_SYMBOL_GPL(usbnet_suspend);
1272
1273int usbnet_resume (struct usb_interface *intf)
1274{
1275 struct usbnet *dev = usb_get_intfdata(intf);
1276
1277 netif_device_attach (dev->net);
1278 tasklet_schedule (&dev->bh);
1279 return 0;
1280}
1281EXPORT_SYMBOL_GPL(usbnet_resume);
1282
1283
1284/*-------------------------------------------------------------------------*/
1285
1286static int __init usbnet_init(void)
1287{
1288 /* compiler should optimize this out */
1289 BUILD_BUG_ON (sizeof (((struct sk_buff *)0)->cb)
1290 < sizeof (struct skb_data));
1291
1292 random_ether_addr(node_id);
1293 return 0;
1294}
1295module_init(usbnet_init);
1296
1297static void __exit usbnet_exit(void)
1298{
1299}
1300module_exit(usbnet_exit);
1301
1302MODULE_AUTHOR("David Brownell");
1303MODULE_DESCRIPTION("USB network driver framework");
1304MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/usbnet.h b/drivers/net/usb/usbnet.h
new file mode 100644
index 000000000000..82db5a8e528e
--- /dev/null
+++ b/drivers/net/usb/usbnet.h
@@ -0,0 +1,200 @@
1/*
2 * USB Networking Link Interface
3 *
4 * Copyright (C) 2000-2005 by David Brownell <dbrownell@users.sourceforge.net>
5 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22
23#ifndef __USBNET_H
24#define __USBNET_H
25
26
27/* interface from usbnet core to each USB networking link we handle */
28struct usbnet {
29 /* housekeeping */
30 struct usb_device *udev;
31 struct driver_info *driver_info;
32 const char *driver_name;
33 wait_queue_head_t *wait;
34 struct mutex phy_mutex;
35
36 /* i/o info: pipes etc */
37 unsigned in, out;
38 struct usb_host_endpoint *status;
39 unsigned maxpacket;
40 struct timer_list delay;
41
42 /* protocol/interface state */
43 struct net_device *net;
44 struct net_device_stats stats;
45 int msg_enable;
46 unsigned long data [5];
47 u32 xid;
48 u32 hard_mtu; /* count any extra framing */
49 size_t rx_urb_size; /* size for rx urbs */
50 struct mii_if_info mii;
51
52 /* various kinds of pending driver work */
53 struct sk_buff_head rxq;
54 struct sk_buff_head txq;
55 struct sk_buff_head done;
56 struct urb *interrupt;
57 struct tasklet_struct bh;
58
59 struct work_struct kevent;
60 unsigned long flags;
61# define EVENT_TX_HALT 0
62# define EVENT_RX_HALT 1
63# define EVENT_RX_MEMORY 2
64# define EVENT_STS_SPLIT 3
65# define EVENT_LINK_RESET 4
66};
67
68static inline struct usb_driver *driver_of(struct usb_interface *intf)
69{
70 return to_usb_driver(intf->dev.driver);
71}
72
73/* interface from the device/framing level "minidriver" to core */
74struct driver_info {
75 char *description;
76
77 int flags;
78/* framing is CDC Ethernet, not writing ZLPs (hw issues), or optionally: */
79#define FLAG_FRAMING_NC 0x0001 /* guard against device dropouts */
80#define FLAG_FRAMING_GL 0x0002 /* genelink batches packets */
81#define FLAG_FRAMING_Z 0x0004 /* zaurus adds a trailer */
82#define FLAG_FRAMING_RN 0x0008 /* RNDIS batches, plus huge header */
83
84#define FLAG_NO_SETINT 0x0010 /* device can't set_interface() */
85#define FLAG_ETHER 0x0020 /* maybe use "eth%d" names */
86
87#define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */
88
89 /* init device ... can sleep, or cause probe() failure */
90 int (*bind)(struct usbnet *, struct usb_interface *);
91
92 /* cleanup device ... can sleep, but can't fail */
93 void (*unbind)(struct usbnet *, struct usb_interface *);
94
95 /* reset device ... can sleep */
96 int (*reset)(struct usbnet *);
97
98 /* see if peer is connected ... can sleep */
99 int (*check_connect)(struct usbnet *);
100
101 /* for status polling */
102 void (*status)(struct usbnet *, struct urb *);
103
104 /* link reset handling, called from defer_kevent */
105 int (*link_reset)(struct usbnet *);
106
107 /* fixup rx packet (strip framing) */
108 int (*rx_fixup)(struct usbnet *dev, struct sk_buff *skb);
109
110 /* fixup tx packet (add framing) */
111 struct sk_buff *(*tx_fixup)(struct usbnet *dev,
112 struct sk_buff *skb, gfp_t flags);
113
114 /* for new devices, use the descriptor-reading code instead */
115 int in; /* rx endpoint */
116 int out; /* tx endpoint */
117
118 unsigned long data; /* Misc driver specific data */
119};
120
121/* Minidrivers are just drivers using the "usbnet" core as a powerful
122 * network-specific subroutine library ... that happens to do pretty
123 * much everything except custom framing and chip-specific stuff.
124 */
125extern int usbnet_probe(struct usb_interface *, const struct usb_device_id *);
126extern int usbnet_suspend (struct usb_interface *, pm_message_t );
127extern int usbnet_resume (struct usb_interface *);
128extern void usbnet_disconnect(struct usb_interface *);
129
130
131/* Drivers that reuse some of the standard USB CDC infrastructure
132 * (notably, using multiple interfaces according to the CDC
133 * union descriptor) get some helper code.
134 */
135struct cdc_state {
136 struct usb_cdc_header_desc *header;
137 struct usb_cdc_union_desc *u;
138 struct usb_cdc_ether_desc *ether;
139 struct usb_interface *control;
140 struct usb_interface *data;
141};
142
143extern int usbnet_generic_cdc_bind (struct usbnet *, struct usb_interface *);
144extern void usbnet_cdc_unbind (struct usbnet *, struct usb_interface *);
145
146/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */
147#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
148 |USB_CDC_PACKET_TYPE_ALL_MULTICAST \
149 |USB_CDC_PACKET_TYPE_PROMISCUOUS \
150 |USB_CDC_PACKET_TYPE_DIRECTED)
151
152
153/* we record the state for each of our queued skbs */
154enum skb_state {
155 illegal = 0,
156 tx_start, tx_done,
157 rx_start, rx_done, rx_cleanup
158};
159
160struct skb_data { /* skb->cb is one of these */
161 struct urb *urb;
162 struct usbnet *dev;
163 enum skb_state state;
164 size_t length;
165};
166
167
168extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *);
169extern void usbnet_defer_kevent (struct usbnet *, int);
170extern void usbnet_skb_return (struct usbnet *, struct sk_buff *);
171extern void usbnet_unlink_rx_urbs(struct usbnet *);
172
173extern int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd);
174extern int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd);
175extern u32 usbnet_get_link (struct net_device *net);
176extern u32 usbnet_get_msglevel (struct net_device *);
177extern void usbnet_set_msglevel (struct net_device *, u32);
178extern void usbnet_get_drvinfo (struct net_device *, struct ethtool_drvinfo *);
179extern int usbnet_nway_reset(struct net_device *net);
180
181/* messaging support includes the interface name, so it must not be
182 * used before it has one ... notably, in minidriver bind() calls.
183 */
184#ifdef DEBUG
185#define devdbg(usbnet, fmt, arg...) \
186 printk(KERN_DEBUG "%s: " fmt "\n" , (usbnet)->net->name , ## arg)
187#else
188#define devdbg(usbnet, fmt, arg...) do {} while(0)
189#endif
190
191#define deverr(usbnet, fmt, arg...) \
192 printk(KERN_ERR "%s: " fmt "\n" , (usbnet)->net->name , ## arg)
193#define devwarn(usbnet, fmt, arg...) \
194 printk(KERN_WARNING "%s: " fmt "\n" , (usbnet)->net->name , ## arg)
195
196#define devinfo(usbnet, fmt, arg...) \
197 printk(KERN_INFO "%s: " fmt "\n" , (usbnet)->net->name , ## arg); \
198
199
200#endif /* __USBNET_H */
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
new file mode 100644
index 000000000000..9f98e8ce487a
--- /dev/null
+++ b/drivers/net/usb/zaurus.c
@@ -0,0 +1,385 @@
1/*
2 * Copyright (C) 2002 Pavel Machek <pavel@ucw.cz>
3 * Copyright (C) 2002-2005 by David Brownell
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20// #define DEBUG // error path messages, extra info
21// #define VERBOSE // more; success messages
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/ethtool.h>
27#include <linux/workqueue.h>
28#include <linux/mii.h>
29#include <linux/crc32.h>
30#include <linux/usb.h>
31#include <linux/usb/cdc.h>
32
33#include "usbnet.h"
34
35
36/*
37 * All known Zaurii lie about their standards conformance. At least
38 * the earliest SA-1100 models lie by saying they support CDC Ethernet.
39 * Some later models (especially PXA-25x and PXA-27x based ones) lie
40 * and say they support CDC MDLM (for access to cell phone modems).
41 *
42 * There are non-Zaurus products that use these same protocols too.
43 *
44 * The annoying thing is that at the same time Sharp was developing
45 * that annoying standards-breaking software, the Linux community had
46 * a simple "CDC Subset" working reliably on the same SA-1100 hardware.
47 * That is, the same functionality but not violating standards.
48 *
49 * The CDC Ethernet nonconformance points are troublesome to hosts
50 * with a true CDC Ethernet implementation:
51 * - Framing appends a CRC, which the spec says drivers "must not" do;
52 * - Transfers data in altsetting zero, instead of altsetting 1;
53 * - All these peripherals use the same ethernet address.
54 *
55 * The CDC MDLM nonconformance is less immediately troublesome, since all
56 * MDLM implementations are quasi-proprietary anyway.
57 */
58
59static struct sk_buff *
60zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
61{
62 int padlen;
63 struct sk_buff *skb2;
64
65 padlen = 2;
66 if (!skb_cloned(skb)) {
67 int tailroom = skb_tailroom(skb);
68 if ((padlen + 4) <= tailroom)
69 goto done;
70 }
71 skb2 = skb_copy_expand(skb, 0, 4 + padlen, flags);
72 dev_kfree_skb_any(skb);
73 skb = skb2;
74 if (skb) {
75 u32 fcs;
76done:
77 fcs = crc32_le(~0, skb->data, skb->len);
78 fcs = ~fcs;
79
80 *skb_put (skb, 1) = fcs & 0xff;
81 *skb_put (skb, 1) = (fcs>> 8) & 0xff;
82 *skb_put (skb, 1) = (fcs>>16) & 0xff;
83 *skb_put (skb, 1) = (fcs>>24) & 0xff;
84 }
85 return skb;
86}
87
88static int zaurus_bind(struct usbnet *dev, struct usb_interface *intf)
89{
90 /* Belcarra's funky framing has other options; mostly
91 * TRAILERS (!) with 4 bytes CRC, and maybe 2 pad bytes.
92 */
93 dev->net->hard_header_len += 6;
94 dev->rx_urb_size = dev->net->hard_header_len + dev->net->mtu;
95 return usbnet_generic_cdc_bind(dev, intf);
96}
97
98/* PDA style devices are always connected if present */
99static int always_connected (struct usbnet *dev)
100{
101 return 0;
102}
103
104static const struct driver_info zaurus_sl5x00_info = {
105 .description = "Sharp Zaurus SL-5x00",
106 .flags = FLAG_FRAMING_Z,
107 .check_connect = always_connected,
108 .bind = zaurus_bind,
109 .unbind = usbnet_cdc_unbind,
110 .tx_fixup = zaurus_tx_fixup,
111};
112#define ZAURUS_STRONGARM_INFO ((unsigned long)&zaurus_sl5x00_info)
113
114static const struct driver_info zaurus_pxa_info = {
115 .description = "Sharp Zaurus, PXA-2xx based",
116 .flags = FLAG_FRAMING_Z,
117 .check_connect = always_connected,
118 .bind = zaurus_bind,
119 .unbind = usbnet_cdc_unbind,
120 .tx_fixup = zaurus_tx_fixup,
121};
122#define ZAURUS_PXA_INFO ((unsigned long)&zaurus_pxa_info)
123
124static const struct driver_info olympus_mxl_info = {
125 .description = "Olympus R1000",
126 .flags = FLAG_FRAMING_Z,
127 .check_connect = always_connected,
128 .bind = zaurus_bind,
129 .unbind = usbnet_cdc_unbind,
130 .tx_fixup = zaurus_tx_fixup,
131};
132#define OLYMPUS_MXL_INFO ((unsigned long)&olympus_mxl_info)
133
134
135/* Some more recent products using Lineo/Belcarra code will wrongly claim
136 * CDC MDLM conformance. They aren't conformant: data endpoints live
137 * in the control interface, there's no data interface, and it's not used
138 * to talk to a cell phone radio. But at least we can detect these two
139 * pseudo-classes, rather than growing this product list with entries for
140 * each new nonconformant product (sigh).
141 */
142static const u8 safe_guid[16] = {
143 0x5d, 0x34, 0xcf, 0x66, 0x11, 0x18, 0x11, 0xd6,
144 0xa2, 0x1a, 0x00, 0x01, 0x02, 0xca, 0x9a, 0x7f,
145};
146static const u8 blan_guid[16] = {
147 0x74, 0xf0, 0x3d, 0xbd, 0x1e, 0xc1, 0x44, 0x70,
148 0xa3, 0x67, 0x71, 0x34, 0xc9, 0xf5, 0x54, 0x37,
149};
150
151static int blan_mdlm_bind(struct usbnet *dev, struct usb_interface *intf)
152{
153 u8 *buf = intf->cur_altsetting->extra;
154 int len = intf->cur_altsetting->extralen;
155 struct usb_cdc_mdlm_desc *desc = NULL;
156 struct usb_cdc_mdlm_detail_desc *detail = NULL;
157
158 while (len > 3) {
159 if (buf [1] != USB_DT_CS_INTERFACE)
160 goto next_desc;
161
162 /* use bDescriptorSubType, and just verify that we get a
163 * "BLAN" (or "SAFE") descriptor.
164 */
165 switch (buf [2]) {
166 case USB_CDC_MDLM_TYPE:
167 if (desc) {
168 dev_dbg(&intf->dev, "extra MDLM\n");
169 goto bad_desc;
170 }
171 desc = (void *) buf;
172 if (desc->bLength != sizeof *desc) {
173 dev_dbg(&intf->dev, "MDLM len %u\n",
174 desc->bLength);
175 goto bad_desc;
176 }
177 /* expect bcdVersion 1.0, ignore */
178 if (memcmp(&desc->bGUID, blan_guid, 16)
179 && memcmp(&desc->bGUID, safe_guid, 16) ) {
180 /* hey, this one might _really_ be MDLM! */
181 dev_dbg(&intf->dev, "MDLM guid\n");
182 goto bad_desc;
183 }
184 break;
185 case USB_CDC_MDLM_DETAIL_TYPE:
186 if (detail) {
187 dev_dbg(&intf->dev, "extra MDLM detail\n");
188 goto bad_desc;
189 }
190 detail = (void *) buf;
191 switch (detail->bGuidDescriptorType) {
192 case 0: /* "SAFE" */
193 if (detail->bLength != (sizeof *detail + 2))
194 goto bad_detail;
195 break;
196 case 1: /* "BLAN" */
197 if (detail->bLength != (sizeof *detail + 3))
198 goto bad_detail;
199 break;
200 default:
201 goto bad_detail;
202 }
203
204 /* assuming we either noticed BLAN already, or will
205 * find it soon, there are some data bytes here:
206 * - bmNetworkCapabilities (unused)
207 * - bmDataCapabilities (bits, see below)
208 * - bPad (ignored, for PADAFTER -- BLAN-only)
209 * bits are:
210 * - 0x01 -- Zaurus framing (add CRC)
211 * - 0x02 -- PADBEFORE (CRC includes some padding)
212 * - 0x04 -- PADAFTER (some padding after CRC)
213 * - 0x08 -- "fermat" packet mangling (for hw bugs)
214 * the PADBEFORE appears not to matter; we interop
215 * with devices that use it and those that don't.
216 */
217 if ((detail->bDetailData[1] & ~0x02) != 0x01) {
218 /* bmDataCapabilities == 0 would be fine too,
219 * but framing is minidriver-coupled for now.
220 */
221bad_detail:
222 dev_dbg(&intf->dev,
223 "bad MDLM detail, %d %d %d\n",
224 detail->bLength,
225 detail->bDetailData[0],
226 detail->bDetailData[2]);
227 goto bad_desc;
228 }
229
230 /* same extra framing as for non-BLAN mode */
231 dev->net->hard_header_len += 6;
232 dev->rx_urb_size = dev->net->hard_header_len
233 + dev->net->mtu;
234 break;
235 }
236next_desc:
237 len -= buf [0]; /* bLength */
238 buf += buf [0];
239 }
240
241 if (!desc || !detail) {
242 dev_dbg(&intf->dev, "missing cdc mdlm %s%sdescriptor\n",
243 desc ? "" : "func ",
244 detail ? "" : "detail ");
245 goto bad_desc;
246 }
247
248 /* There's probably a CDC Ethernet descriptor there, but we can't
249 * rely on the Ethernet address it provides since not all vendors
250 * bother to make it unique. Likewise there's no point in tracking
251 * of the CDC event notifications.
252 */
253 return usbnet_get_endpoints(dev, intf);
254
255bad_desc:
256 dev_info(&dev->udev->dev, "unsupported MDLM descriptors\n");
257 return -ENODEV;
258}
259
260static const struct driver_info bogus_mdlm_info = {
261 .description = "pseudo-MDLM (BLAN) device",
262 .flags = FLAG_FRAMING_Z,
263 .check_connect = always_connected,
264 .tx_fixup = zaurus_tx_fixup,
265 .bind = blan_mdlm_bind,
266};
267
268static const struct usb_device_id products [] = {
269#define ZAURUS_MASTER_INTERFACE \
270 .bInterfaceClass = USB_CLASS_COMM, \
271 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
272 .bInterfaceProtocol = USB_CDC_PROTO_NONE
273
274/* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
275{
276 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
277 | USB_DEVICE_ID_MATCH_DEVICE,
278 .idVendor = 0x04DD,
279 .idProduct = 0x8004,
280 ZAURUS_MASTER_INTERFACE,
281 .driver_info = ZAURUS_STRONGARM_INFO,
282},
283
284/* PXA-2xx based models are also lying-about-cdc. If you add any
285 * more devices that claim to be CDC Ethernet, make sure they get
286 * added to the blacklist in cdc_ether too.
287 *
288 * NOTE: OpenZaurus versions with 2.6 kernels won't use these entries,
289 * unlike the older ones with 2.4 "embedix" kernels.
290 */
291{
292 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
293 | USB_DEVICE_ID_MATCH_DEVICE,
294 .idVendor = 0x04DD,
295 .idProduct = 0x8005, /* A-300 */
296 ZAURUS_MASTER_INTERFACE,
297 .driver_info = ZAURUS_PXA_INFO,
298}, {
299 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
300 | USB_DEVICE_ID_MATCH_DEVICE,
301 .idVendor = 0x04DD,
302 .idProduct = 0x8006, /* B-500/SL-5600 */
303 ZAURUS_MASTER_INTERFACE,
304 .driver_info = ZAURUS_PXA_INFO,
305}, {
306 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
307 | USB_DEVICE_ID_MATCH_DEVICE,
308 .idVendor = 0x04DD,
309 .idProduct = 0x8007, /* C-700 */
310 ZAURUS_MASTER_INTERFACE,
311 .driver_info = ZAURUS_PXA_INFO,
312}, {
313 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
314 | USB_DEVICE_ID_MATCH_DEVICE,
315 .idVendor = 0x04DD,
316 .idProduct = 0x9031, /* C-750 C-760 */
317 ZAURUS_MASTER_INTERFACE,
318 .driver_info = ZAURUS_PXA_INFO,
319}, {
320 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
321 | USB_DEVICE_ID_MATCH_DEVICE,
322 .idVendor = 0x04DD,
323 .idProduct = 0x9032, /* SL-6000 */
324 ZAURUS_MASTER_INTERFACE,
325 .driver_info = ZAURUS_PXA_INFO,
326}, {
327 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
328 | USB_DEVICE_ID_MATCH_DEVICE,
329 .idVendor = 0x04DD,
330 /* reported with some C860 units */
331 .idProduct = 0x9050, /* C-860 */
332 ZAURUS_MASTER_INTERFACE,
333 .driver_info = ZAURUS_PXA_INFO,
334},
335
336
337/* At least some of the newest PXA units have very different lies about
338 * their standards support: they claim to be cell phones offering
339 * direct access to their radios! (No, they don't conform to CDC MDLM.)
340 */
341{
342 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
343 USB_CDC_PROTO_NONE),
344 .driver_info = (unsigned long) &bogus_mdlm_info,
345},
346
347/* Olympus has some models with a Zaurus-compatible option.
348 * R-1000 uses a FreeScale i.MXL cpu (ARMv4T)
349 */
350{
351 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
352 | USB_DEVICE_ID_MATCH_DEVICE,
353 .idVendor = 0x07B4,
354 .idProduct = 0x0F02, /* R-1000 */
355 ZAURUS_MASTER_INTERFACE,
356 .driver_info = OLYMPUS_MXL_INFO,
357},
358 { }, // END
359};
360MODULE_DEVICE_TABLE(usb, products);
361
362static struct usb_driver zaurus_driver = {
363 .name = "zaurus",
364 .id_table = products,
365 .probe = usbnet_probe,
366 .disconnect = usbnet_disconnect,
367 .suspend = usbnet_suspend,
368 .resume = usbnet_resume,
369};
370
371static int __init zaurus_init(void)
372{
373 return usb_register(&zaurus_driver);
374}
375module_init(zaurus_init);
376
377static void __exit zaurus_exit(void)
378{
379 usb_deregister(&zaurus_driver);
380}
381module_exit(zaurus_exit);
382
383MODULE_AUTHOR("Pavel Machek, David Brownell");
384MODULE_DESCRIPTION("Sharp Zaurus PDA, and compatible products");
385MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 38fac3bbcd82..7d5b8c2cc614 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -149,7 +149,7 @@ static int airport_hard_reset(struct orinoco_private *priv)
149 /* Vitally important. If we don't do this it seems we get an 149 /* Vitally important. If we don't do this it seems we get an
150 * interrupt somewhere during the power cycle, since 150 * interrupt somewhere during the power cycle, since
151 * hw_unavailable is already set it doesn't get ACKed, we get 151 * hw_unavailable is already set it doesn't get ACKed, we get
152 * into an interrupt loop and the the PMU decides to turn us 152 * into an interrupt loop and the PMU decides to turn us
153 * off. */ 153 * off. */
154 disable_irq(dev->irq); 154 disable_irq(dev->irq);
155 155
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index f8483c179e4c..10e07e865426 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -658,12 +658,6 @@ struct bcm43xx_pio {
658 658
659#define BCM43xx_MAX_80211_CORES 2 659#define BCM43xx_MAX_80211_CORES 2
660 660
661#ifdef CONFIG_BCM947XX
662#define core_offset(bcm) (bcm)->current_core_offset
663#else
664#define core_offset(bcm) 0
665#endif
666
667/* Generic information about a core. */ 661/* Generic information about a core. */
668struct bcm43xx_coreinfo { 662struct bcm43xx_coreinfo {
669 u8 available:1, 663 u8 available:1,
@@ -789,10 +783,6 @@ struct bcm43xx_private {
789 783
790 /* The currently active core. */ 784 /* The currently active core. */
791 struct bcm43xx_coreinfo *current_core; 785 struct bcm43xx_coreinfo *current_core;
792#ifdef CONFIG_BCM947XX
793 /** current core memory offset */
794 u32 current_core_offset;
795#endif
796 struct bcm43xx_coreinfo *active_80211_core; 786 struct bcm43xx_coreinfo *active_80211_core;
797 /* coreinfo structs for all possible cores follow. 787 /* coreinfo structs for all possible cores follow.
798 * Note that a core might not exist. 788 * Note that a core might not exist.
@@ -943,25 +933,25 @@ struct bcm43xx_lopair * bcm43xx_get_lopair(struct bcm43xx_phyinfo *phy,
943static inline 933static inline
944u16 bcm43xx_read16(struct bcm43xx_private *bcm, u16 offset) 934u16 bcm43xx_read16(struct bcm43xx_private *bcm, u16 offset)
945{ 935{
946 return ioread16(bcm->mmio_addr + core_offset(bcm) + offset); 936 return ioread16(bcm->mmio_addr + offset);
947} 937}
948 938
949static inline 939static inline
950void bcm43xx_write16(struct bcm43xx_private *bcm, u16 offset, u16 value) 940void bcm43xx_write16(struct bcm43xx_private *bcm, u16 offset, u16 value)
951{ 941{
952 iowrite16(value, bcm->mmio_addr + core_offset(bcm) + offset); 942 iowrite16(value, bcm->mmio_addr + offset);
953} 943}
954 944
955static inline 945static inline
956u32 bcm43xx_read32(struct bcm43xx_private *bcm, u16 offset) 946u32 bcm43xx_read32(struct bcm43xx_private *bcm, u16 offset)
957{ 947{
958 return ioread32(bcm->mmio_addr + core_offset(bcm) + offset); 948 return ioread32(bcm->mmio_addr + offset);
959} 949}
960 950
961static inline 951static inline
962void bcm43xx_write32(struct bcm43xx_private *bcm, u16 offset, u32 value) 952void bcm43xx_write32(struct bcm43xx_private *bcm, u16 offset, u32 value)
963{ 953{
964 iowrite32(value, bcm->mmio_addr + core_offset(bcm) + offset); 954 iowrite32(value, bcm->mmio_addr + offset);
965} 955}
966 956
967static inline 957static inline
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index e3d2e61a31ee..1f7731fcfbd5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -660,10 +660,6 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
660 ring->routing = BCM43xx_DMA32_CLIENTTRANS; 660 ring->routing = BCM43xx_DMA32_CLIENTTRANS;
661 if (dma64) 661 if (dma64)
662 ring->routing = BCM43xx_DMA64_CLIENTTRANS; 662 ring->routing = BCM43xx_DMA64_CLIENTTRANS;
663#ifdef CONFIG_BCM947XX
664 if (bcm->pci_dev->bus->number == 0)
665 ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
666#endif
667 663
668 ring->bcm = bcm; 664 ring->bcm = bcm;
669 ring->nr_slots = nr_slots; 665 ring->nr_slots = nr_slots;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 5e96bca6730a..ef6b253a92ce 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -61,10 +61,6 @@ MODULE_AUTHOR("Stefano Brivio");
61MODULE_AUTHOR("Michael Buesch"); 61MODULE_AUTHOR("Michael Buesch");
62MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
63 63
64#ifdef CONFIG_BCM947XX
65extern char *nvram_get(char *name);
66#endif
67
68#if defined(CONFIG_BCM43XX_DMA) && defined(CONFIG_BCM43XX_PIO) 64#if defined(CONFIG_BCM43XX_DMA) && defined(CONFIG_BCM43XX_PIO)
69static int modparam_pio; 65static int modparam_pio;
70module_param_named(pio, modparam_pio, int, 0444); 66module_param_named(pio, modparam_pio, int, 0444);
@@ -142,10 +138,6 @@ MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for using multiple fi
142 { PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 138 { PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
143 /* Broadcom 43XG 802.11b/g */ 139 /* Broadcom 43XG 802.11b/g */
144 { PCI_VENDOR_ID_BROADCOM, 0x4325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 140 { PCI_VENDOR_ID_BROADCOM, 0x4325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
145#ifdef CONFIG_BCM947XX
146 /* SB bus on BCM947xx */
147 { PCI_VENDOR_ID_BROADCOM, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
148#endif
149 { 0 }, 141 { 0 },
150}; 142};
151MODULE_DEVICE_TABLE(pci, bcm43xx_pci_tbl); 143MODULE_DEVICE_TABLE(pci, bcm43xx_pci_tbl);
@@ -786,9 +778,6 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
786{ 778{
787 u16 value; 779 u16 value;
788 u16 *sprom; 780 u16 *sprom;
789#ifdef CONFIG_BCM947XX
790 char *c;
791#endif
792 781
793 sprom = kzalloc(BCM43xx_SPROM_SIZE * sizeof(u16), 782 sprom = kzalloc(BCM43xx_SPROM_SIZE * sizeof(u16),
794 GFP_KERNEL); 783 GFP_KERNEL);
@@ -796,28 +785,7 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
796 printk(KERN_ERR PFX "sprom_extract OOM\n"); 785 printk(KERN_ERR PFX "sprom_extract OOM\n");
797 return -ENOMEM; 786 return -ENOMEM;
798 } 787 }
799#ifdef CONFIG_BCM947XX
800 sprom[BCM43xx_SPROM_BOARDFLAGS2] = atoi(nvram_get("boardflags2"));
801 sprom[BCM43xx_SPROM_BOARDFLAGS] = atoi(nvram_get("boardflags"));
802
803 if ((c = nvram_get("il0macaddr")) != NULL)
804 e_aton(c, (char *) &(sprom[BCM43xx_SPROM_IL0MACADDR]));
805
806 if ((c = nvram_get("et1macaddr")) != NULL)
807 e_aton(c, (char *) &(sprom[BCM43xx_SPROM_ET1MACADDR]));
808
809 sprom[BCM43xx_SPROM_PA0B0] = atoi(nvram_get("pa0b0"));
810 sprom[BCM43xx_SPROM_PA0B1] = atoi(nvram_get("pa0b1"));
811 sprom[BCM43xx_SPROM_PA0B2] = atoi(nvram_get("pa0b2"));
812
813 sprom[BCM43xx_SPROM_PA1B0] = atoi(nvram_get("pa1b0"));
814 sprom[BCM43xx_SPROM_PA1B1] = atoi(nvram_get("pa1b1"));
815 sprom[BCM43xx_SPROM_PA1B2] = atoi(nvram_get("pa1b2"));
816
817 sprom[BCM43xx_SPROM_BOARDREV] = atoi(nvram_get("boardrev"));
818#else
819 bcm43xx_sprom_read(bcm, sprom); 788 bcm43xx_sprom_read(bcm, sprom);
820#endif
821 789
822 /* boardflags2 */ 790 /* boardflags2 */
823 value = sprom[BCM43xx_SPROM_BOARDFLAGS2]; 791 value = sprom[BCM43xx_SPROM_BOARDFLAGS2];
@@ -1225,12 +1193,6 @@ static int _switch_core(struct bcm43xx_private *bcm, int core)
1225 goto error; 1193 goto error;
1226 udelay(10); 1194 udelay(10);
1227 } 1195 }
1228#ifdef CONFIG_BCM947XX
1229 if (bcm->pci_dev->bus->number == 0)
1230 bcm->current_core_offset = 0x1000 * core;
1231 else
1232 bcm->current_core_offset = 0;
1233#endif
1234 1196
1235 return 0; 1197 return 0;
1236error: 1198error:
@@ -1387,19 +1349,6 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
1387 1349
1388 if ((bcm43xx_core_enabled(bcm)) && 1350 if ((bcm43xx_core_enabled(bcm)) &&
1389 !bcm43xx_using_pio(bcm)) { 1351 !bcm43xx_using_pio(bcm)) {
1390//FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here?
1391#if 0
1392#ifndef CONFIG_BCM947XX
1393 /* reset all used DMA controllers. */
1394 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
1395 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA2_BASE);
1396 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA3_BASE);
1397 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
1398 bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
1399 if (bcm->current_core->rev < 5)
1400 bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
1401#endif
1402#endif
1403 } 1352 }
1404 if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) { 1353 if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) {
1405 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 1354 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
@@ -2140,32 +2089,11 @@ out:
2140 return err; 2089 return err;
2141} 2090}
2142 2091
2143#ifdef CONFIG_BCM947XX
2144static struct pci_device_id bcm43xx_47xx_ids[] = {
2145 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4324) },
2146 { 0 }
2147};
2148#endif
2149
2150static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm) 2092static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm)
2151{ 2093{
2152 int err; 2094 int err;
2153 2095
2154 bcm->irq = bcm->pci_dev->irq; 2096 bcm->irq = bcm->pci_dev->irq;
2155#ifdef CONFIG_BCM947XX
2156 if (bcm->pci_dev->bus->number == 0) {
2157 struct pci_dev *d;
2158 struct pci_device_id *id;
2159 for (id = bcm43xx_47xx_ids; id->vendor; id++) {
2160 d = pci_get_device(id->vendor, id->device, NULL);
2161 if (d != NULL) {
2162 bcm->irq = d->irq;
2163 pci_dev_put(d);
2164 break;
2165 }
2166 }
2167 }
2168#endif
2169 err = request_irq(bcm->irq, bcm43xx_interrupt_handler, 2097 err = request_irq(bcm->irq, bcm43xx_interrupt_handler,
2170 IRQF_SHARED, KBUILD_MODNAME, bcm); 2098 IRQF_SHARED, KBUILD_MODNAME, bcm);
2171 if (err) 2099 if (err)
@@ -2645,10 +2573,6 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
2645 chip_id_16 = 0x4610; 2573 chip_id_16 = 0x4610;
2646 else if ((pci_device >= 0x4710) && (pci_device <= 0x4715)) 2574 else if ((pci_device >= 0x4710) && (pci_device <= 0x4715))
2647 chip_id_16 = 0x4710; 2575 chip_id_16 = 0x4710;
2648#ifdef CONFIG_BCM947XX
2649 else if ((pci_device >= 0x4320) && (pci_device <= 0x4325))
2650 chip_id_16 = 0x4309;
2651#endif
2652 else { 2576 else {
2653 printk(KERN_ERR PFX "Could not determine Chip ID\n"); 2577 printk(KERN_ERR PFX "Could not determine Chip ID\n");
2654 return -ENODEV; 2578 return -ENODEV;
@@ -4144,11 +4068,6 @@ static int __devinit bcm43xx_init_one(struct pci_dev *pdev,
4144 struct bcm43xx_private *bcm; 4068 struct bcm43xx_private *bcm;
4145 int err; 4069 int err;
4146 4070
4147#ifdef CONFIG_BCM947XX
4148 if ((pdev->bus->number == 0) && (pdev->device != 0x0800))
4149 return -ENODEV;
4150#endif
4151
4152#ifdef DEBUG_SINGLE_DEVICE_ONLY 4071#ifdef DEBUG_SINGLE_DEVICE_ONLY
4153 if (strcmp(pci_name(pdev), DEBUG_SINGLE_DEVICE_ONLY)) 4072 if (strcmp(pci_name(pdev), DEBUG_SINGLE_DEVICE_ONLY))
4154 return -ENODEV; 4073 return -ENODEV;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index f76357178e4d..c8f3c532bab5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -33,25 +33,6 @@
33 33
34#include "bcm43xx.h" 34#include "bcm43xx.h"
35 35
36#ifdef CONFIG_BCM947XX
37#define atoi(str) simple_strtoul(((str != NULL) ? str : ""), NULL, 0)
38
39static inline void e_aton(char *str, char *dest)
40{
41 int i = 0;
42 u16 *d = (u16 *) dest;
43
44 for (;;) {
45 dest[i++] = (char) simple_strtoul(str, NULL, 16);
46 str += 2;
47 if (!*str++ || i == 6)
48 break;
49 }
50 for (i = 0; i < 3; i++)
51 d[i] = cpu_to_be16(d[i]);
52}
53#endif
54
55#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes] 36#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes]
56#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes) 37#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes)
57/* Magic helper macro to pad structures. Ignore those above. It's magic. */ 38/* Magic helper macro to pad structures. Ignore those above. It's magic. */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 841b3c136ad9..283be4a70524 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -3054,7 +3054,7 @@ static const iw_handler prism54_handler[] = {
3054 (iw_handler) prism54_set_wap, /* SIOCSIWAP */ 3054 (iw_handler) prism54_set_wap, /* SIOCSIWAP */
3055 (iw_handler) prism54_get_wap, /* SIOCGIWAP */ 3055 (iw_handler) prism54_get_wap, /* SIOCGIWAP */
3056 (iw_handler) NULL, /* -- hole -- */ 3056 (iw_handler) NULL, /* -- hole -- */
3057 (iw_handler) NULL, /* SIOCGIWAPLIST depreciated */ 3057 (iw_handler) NULL, /* SIOCGIWAPLIST deprecated */
3058 (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */ 3058 (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */
3059 (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */ 3059 (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */
3060 (iw_handler) prism54_set_essid, /* SIOCSIWESSID */ 3060 (iw_handler) prism54_set_essid, /* SIOCSIWESSID */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index a037b11dac9d..084795355b74 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -115,7 +115,7 @@ isl_upload_firmware(islpci_private *priv)
115 ISL38XX_MEMORY_WINDOW_SIZE : fw_len; 115 ISL38XX_MEMORY_WINDOW_SIZE : fw_len;
116 u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN; 116 u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN;
117 117
118 /* set the cards base address for writting the data */ 118 /* set the card's base address for writing the data */
119 isl38xx_w32_flush(device_base, reg, 119 isl38xx_w32_flush(device_base, reg,
120 ISL38XX_DIR_MEM_BASE_REG); 120 ISL38XX_DIR_MEM_BASE_REG);
121 wmb(); /* be paranoid */ 121 wmb(); /* be paranoid */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 67b867f837ca..5740d4d4267c 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -176,7 +176,7 @@ psa_write(struct net_device * dev,
176 volatile u_char __iomem *verify = lp->mem + PSA_ADDR + 176 volatile u_char __iomem *verify = lp->mem + PSA_ADDR +
177 (psaoff(0, psa_comp_number) << 1); 177 (psaoff(0, psa_comp_number) << 1);
178 178
179 /* Authorize writting to PSA */ 179 /* Authorize writing to PSA */
180 hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN); 180 hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN);
181 181
182 while(n-- > 0) 182 while(n-- > 0)
@@ -1676,7 +1676,7 @@ wv_set_frequency(u_long base, /* i/o port of the card */
1676 fee_write(base, 0x60, 1676 fee_write(base, 0x60,
1677 dac, 2); 1677 dac, 2);
1678 1678
1679 /* We now should verify here that the EEprom writting was ok */ 1679 /* We now should verify here that the EEprom writing was ok */
1680 1680
1681 /* ReRead the first area */ 1681 /* ReRead the first area */
1682 fee_read(base, 0x00, 1682 fee_read(base, 0x00,
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 4d1c4905c749..4b9de0093a7b 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -120,7 +120,7 @@
120 * the Wavelan itself (NCR -> AT&T -> Lucent). 120 * the Wavelan itself (NCR -> AT&T -> Lucent).
121 * 121 *
122 * All started with Anders Klemets <klemets@paul.rutgers.edu>, 122 * All started with Anders Klemets <klemets@paul.rutgers.edu>,
123 * writting a Wavelan ISA driver for the MACH microkernel. Girish 123 * writing a Wavelan ISA driver for the MACH microkernel. Girish
124 * Welling <welling@paul.rutgers.edu> had also worked on it. 124 * Welling <welling@paul.rutgers.edu> had also worked on it.
125 * Keith Moore modify this for the Pcmcia hardware. 125 * Keith Moore modify this for the Pcmcia hardware.
126 * 126 *
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index e04cffc8adf3..8459549d0cee 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -40,6 +40,7 @@ static struct usb_device_id usb_ids[] = {
40 { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 }, 40 { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
41 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 41 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
42 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 }, 42 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
43 { USB_DEVICE(0x0df6, 0x9075), .driver_info = DEVICE_ZD1211 },
43 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, 44 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
44 { USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 }, 45 { USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 },
45 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, 46 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
@@ -67,8 +68,11 @@ static struct usb_device_id usb_ids[] = {
67 { USB_DEVICE(0x0586, 0x3410), .driver_info = DEVICE_ZD1211B }, 68 { USB_DEVICE(0x0586, 0x3410), .driver_info = DEVICE_ZD1211B },
68 { USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B }, 69 { USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B },
69 { USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B }, 70 { USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B },
71 { USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B },
72 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
70 /* "Driverless" devices that need ejecting */ 73 /* "Driverless" devices that need ejecting */
71 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 74 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
75 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
72 {} 76 {}
73}; 77};
74 78
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 3f4a7cf9efea..f2a90a7fa2d6 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -109,7 +109,6 @@ static int gx_fix;
109/* These identify the driver base version and may not be removed. */ 109/* These identify the driver base version and may not be removed. */
110static char version[] __devinitdata = 110static char version[] __devinitdata =
111KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n" 111KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
112KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
113KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n"; 112KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
114 113
115MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 114MODULE_AUTHOR("Donald Becker <becker@scyld.com>");