aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-07 22:21:56 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-07 22:21:56 -0500
commit7677ced48e2bbbb8d847d34f37e5d96d2b0e41e4 (patch)
tree0a859f403c02eb854d9ffa11bd17f77056891d07
parent21d37bbc65e39a26856de6b14be371ff24e0d03f (diff)
parentac38dfc39e7684f55174742e5f0d6c5a0093bbf6 (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (116 commits) sk98lin: planned removal AT91: MACB support sky2: version 1.12 sky2: add new chip ids sky2: Yukon Extreme support sky2: safer transmit timeout sky2: TSO support for EC_U sky2: use dev_err for error reports sky2: add Wake On Lan support fix unaligned exception in /drivers/net/wireless/orinoco.c Remove unused kernel config option DLCI_COUNT z85230: spinlock logic mips: declance: Driver model for the PMAD-A Spidernet: Rework RX linked list NET: turn local_save_flags() + local_irq_disable() into local_irq_save() NET-3c59x: turn local_save_flags() + local_irq_disable() into local_irq_save() hp100: convert pci_module_init() to pci_register_driver() NetXen: Added ethtool support for user level tools. NetXen: Firmware crb init changes. maintainers: add atl1 maintainers ...
-rw-r--r--Documentation/feature-removal-schedule.txt7
-rw-r--r--MAINTAINERS18
-rw-r--r--drivers/net/3c59x.c3
-rw-r--r--drivers/net/Kconfig65
-rw-r--r--drivers/net/Makefile6
-rw-r--r--drivers/net/Space.c4
-rw-r--r--drivers/net/amd8111e.c3
-rw-r--r--drivers/net/b44.c8
-rw-r--r--drivers/net/b44.h10
-rw-r--r--drivers/net/bmac.c20
-rw-r--r--drivers/net/bnx2.c13
-rw-r--r--drivers/net/bonding/bond_main.c23
-rw-r--r--drivers/net/bonding/bond_sysfs.c15
-rw-r--r--drivers/net/bonding/bonding.h9
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/cpl5_cmd.h18
-rw-r--r--drivers/net/chelsio/cxgb2.c149
-rw-r--r--drivers/net/chelsio/elmer0.h40
-rw-r--r--drivers/net/chelsio/espi.c44
-rw-r--r--drivers/net/chelsio/fpga_defs.h6
-rw-r--r--drivers/net/chelsio/gmac.h11
-rw-r--r--drivers/net/chelsio/ixf1010.c100
-rw-r--r--drivers/net/chelsio/mv88e1xxx.c27
-rw-r--r--drivers/net/chelsio/my3126.c16
-rw-r--r--drivers/net/chelsio/pm3393.c91
-rw-r--r--drivers/net/chelsio/sge.c328
-rw-r--r--drivers/net/chelsio/subr.c89
-rw-r--r--drivers/net/chelsio/tp.c62
-rw-r--r--drivers/net/chelsio/vsc7326.c139
-rw-r--r--drivers/net/chelsio/vsc7326_reg.h139
-rw-r--r--drivers/net/chelsio/vsc8244.c41
-rw-r--r--drivers/net/cxgb3/Makefile8
-rw-r--r--drivers/net/cxgb3/adapter.h279
-rw-r--r--drivers/net/cxgb3/ael1002.c251
-rw-r--r--drivers/net/cxgb3/common.h729
-rw-r--r--drivers/net/cxgb3/cxgb3_ctl_defs.h164
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h99
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h185
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2515
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c1222
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h193
-rw-r--r--drivers/net/cxgb3/firmware_exports.h177
-rw-r--r--drivers/net/cxgb3/l2t.c450
-rw-r--r--drivers/net/cxgb3/l2t.h143
-rw-r--r--drivers/net/cxgb3/mc5.c473
-rw-r--r--drivers/net/cxgb3/regs.h2195
-rw-r--r--drivers/net/cxgb3/sge.c2681
-rw-r--r--drivers/net/cxgb3/sge_defs.h251
-rw-r--r--drivers/net/cxgb3/t3_cpl.h1444
-rw-r--r--drivers/net/cxgb3/t3_hw.c3375
-rw-r--r--drivers/net/cxgb3/t3cdev.h73
-rw-r--r--drivers/net/cxgb3/version.h39
-rw-r--r--drivers/net/cxgb3/vsc8211.c228
-rw-r--r--drivers/net/cxgb3/xgmac.c409
-rw-r--r--drivers/net/declance.c164
-rw-r--r--drivers/net/e1000/e1000.h7
-rw-r--r--drivers/net/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/e1000/e1000_main.c128
-rw-r--r--drivers/net/e1000/e1000_osdep.h4
-rw-r--r--drivers/net/e1000/e1000_param.c15
-rw-r--r--drivers/net/forcedeth.c1342
-rw-r--r--drivers/net/hp100.c2
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/macb.c25
-rw-r--r--drivers/net/macb.h8
-rw-r--r--drivers/net/mace.c16
-rw-r--r--drivers/net/macmace.c18
-rw-r--r--drivers/net/macsonic.c6
-rw-r--r--drivers/net/myri10ge/myri10ge.c10
-rw-r--r--drivers/net/netxen/netxen_nic.h17
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c96
-rw-r--r--drivers/net/netxen/netxen_nic_init.c279
-rw-r--r--drivers/net/oaknet.c666
-rw-r--r--drivers/net/pasemi_mac.c1019
-rw-r--r--drivers/net/pasemi_mac.h460
-rwxr-xr-x[-rw-r--r--]drivers/net/qla3xxx.c363
-rwxr-xr-x[-rw-r--r--]drivers/net/qla3xxx.h88
-rw-r--r--drivers/net/s2io-regs.h7
-rw-r--r--drivers/net/s2io.c1178
-rw-r--r--drivers/net/s2io.h223
-rw-r--r--drivers/net/sc92031.c1620
-rw-r--r--drivers/net/sk_mca.c1216
-rw-r--r--drivers/net/sk_mca.h170
-rw-r--r--drivers/net/skfp/can.c83
-rw-r--r--drivers/net/skfp/drvfbi.c24
-rw-r--r--drivers/net/skfp/fplustm.c4
-rw-r--r--drivers/net/skfp/smt.c10
-rw-r--r--drivers/net/skge.c235
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/sky2.c543
-rw-r--r--drivers/net/sky2.h85
-rw-r--r--drivers/net/spider_net.c313
-rw-r--r--drivers/net/spider_net.h20
-rw-r--r--drivers/net/spider_net_ethtool.c4
-rw-r--r--drivers/net/tg3.c32
-rw-r--r--drivers/net/ucc_geth.c23
-rw-r--r--drivers/net/wan/Kconfig24
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/hdlc.c3
-rw-r--r--drivers/net/wan/pc300too.c565
-rw-r--r--drivers/net/wan/z85230.c14
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h7
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_leds.c11
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c36
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_radio.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_radio.h16
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/orinoco.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c13
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h4
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c126
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h158
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.h1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_types.h71
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c128
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h6
-rw-r--r--drivers/usb/net/asix.c4
-rw-r--r--drivers/usb/net/pegasus.h4
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c6
125 files changed, 25584 insertions, 5263 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index b3d1ce7e3ba0..2dc5e5da8f88 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -333,3 +333,10 @@ Why: Unmaintained for years, superceded by JFFS2 for years.
333Who: Jeff Garzik <jeff@garzik.org> 333Who: Jeff Garzik <jeff@garzik.org>
334 334
335--------------------------- 335---------------------------
336
337What: sk98lin network driver
338When: July 2007
339Why: In kernel tree version of driver is unmaintained. Sk98lin driver
340 replaced by the skge driver.
341Who: Stephen Hemminger <shemminger@osdl.org>
342
diff --git a/MAINTAINERS b/MAINTAINERS
index 96135d285eb0..fe35f3ac4cd3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -598,6 +598,16 @@ M: ecashin@coraid.com
598W: http://www.coraid.com/support/linux 598W: http://www.coraid.com/support/linux
599S: Supported 599S: Supported
600 600
601ATL1 ETHERNET DRIVER
602P: Jay Cliburn
603M: jcliburn@gmail.com
604P: Chris Snook
605M: csnook@redhat.com
606L: atl1-devel@lists.sourceforge.net
607W: http://sourceforge.net/projects/atl1
608W: http://atl1.sourceforge.net
609S: Maintained
610
601ATM 611ATM
602P: Chas Williams 612P: Chas Williams
603M: chas@cmf.nrl.navy.mil 613M: chas@cmf.nrl.navy.mil
@@ -2485,6 +2495,12 @@ L: orinoco-devel@lists.sourceforge.net
2485W: http://www.nongnu.org/orinoco/ 2495W: http://www.nongnu.org/orinoco/
2486S: Maintained 2496S: Maintained
2487 2497
2498PA SEMI ETHERNET DRIVER
2499P: Olof Johansson
2500M: olof@lixom.net
2501L: netdev@vger.kernel.org
2502S: Maintained
2503
2488PARALLEL PORT SUPPORT 2504PARALLEL PORT SUPPORT
2489P: Phil Blundell 2505P: Phil Blundell
2490M: philb@gnu.org 2506M: philb@gnu.org
@@ -2654,7 +2670,7 @@ S: Supported
2654 2670
2655PRISM54 WIRELESS DRIVER 2671PRISM54 WIRELESS DRIVER
2656P: Prism54 Development Team 2672P: Prism54 Development Team
2657M: prism54-private@prism54.org 2673M: developers@islsm.org
2658L: netdev@vger.kernel.org 2674L: netdev@vger.kernel.org
2659W: http://prism54.org 2675W: http://prism54.org
2660S: Maintained 2676S: Maintained
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80bdcf846234..716a47210aa3 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -792,8 +792,7 @@ static void poll_vortex(struct net_device *dev)
792{ 792{
793 struct vortex_private *vp = netdev_priv(dev); 793 struct vortex_private *vp = netdev_priv(dev);
794 unsigned long flags; 794 unsigned long flags;
795 local_save_flags(flags); 795 local_irq_save(flags);
796 local_irq_disable();
797 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); 796 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
798 local_irq_restore(flags); 797 local_irq_restore(flags);
799} 798}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8aa8dd02b910..ad92b6a76ee6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -190,7 +190,7 @@ config MII
190 190
191config MACB 191config MACB
192 tristate "Atmel MACB support" 192 tristate "Atmel MACB support"
193 depends on NET_ETHERNET && AVR32 193 depends on NET_ETHERNET && (AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263)
194 select MII 194 select MII
195 help 195 help
196 The Atmel MACB ethernet interface is found on many AT32 and AT91 196 The Atmel MACB ethernet interface is found on many AT32 and AT91
@@ -235,16 +235,6 @@ config BMAC
235 To compile this driver as a module, choose M here: the module 235 To compile this driver as a module, choose M here: the module
236 will be called bmac. 236 will be called bmac.
237 237
238config OAKNET
239 tristate "National DP83902AV (Oak ethernet) support"
240 depends on NET_ETHERNET && PPC && BROKEN
241 select CRC32
242 help
243 Say Y if your machine has this type of Ethernet network card.
244
245 To compile this driver as a module, choose M here: the module
246 will be called oaknet.
247
248config ARIADNE 238config ARIADNE
249 tristate "Ariadne support" 239 tristate "Ariadne support"
250 depends on NET_ETHERNET && ZORRO 240 depends on NET_ETHERNET && ZORRO
@@ -1155,21 +1145,6 @@ config SEEQ8005
1155 <file:Documentation/networking/net-modules.txt>. The module 1145 <file:Documentation/networking/net-modules.txt>. The module
1156 will be called seeq8005. 1146 will be called seeq8005.
1157 1147
1158config SKMC
1159 tristate "SKnet MCA support"
1160 depends on NET_ETHERNET && MCA && BROKEN
1161 ---help---
1162 These are Micro Channel Ethernet adapters. You need to say Y to "MCA
1163 support" in order to use this driver. Supported cards are the SKnet
1164 Junior MC2 and the SKnet MC2(+). The driver automatically
1165 distinguishes between the two cards. Note that using multiple boards
1166 of different type hasn't been tested with this driver. Say Y if you
1167 have one of these Ethernet adapters.
1168
1169 To compile this driver as a module, choose M here and read
1170 <file:Documentation/networking/net-modules.txt>. The module
1171 will be called sk_mca.
1172
1173config NE2_MCA 1148config NE2_MCA
1174 tristate "NE/2 (ne2000 MCA version) support" 1149 tristate "NE/2 (ne2000 MCA version) support"
1175 depends on NET_ETHERNET && MCA_LEGACY 1150 depends on NET_ETHERNET && MCA_LEGACY
@@ -1788,6 +1763,18 @@ config LAN_SAA9730
1788 workstations. 1763 workstations.
1789 See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>. 1764 See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>.
1790 1765
1766config SC92031
1767 tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
1768 depends on NET_PCI && PCI && EXPERIMENTAL
1769 select CRC32
1770 ---help---
1771 This is a driver for the Fast Ethernet PCI network cards based on
1772 the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
1773 have one of these, say Y here.
1774
1775 To compile this driver as a module, choose M here: the module
1776 will be called sc92031. This is recommended.
1777
1791config NET_POCKET 1778config NET_POCKET
1792 bool "Pocket and portable adapters" 1779 bool "Pocket and portable adapters"
1793 depends on NET_ETHERNET && PARPORT 1780 depends on NET_ETHERNET && PARPORT
@@ -2392,6 +2379,24 @@ config CHELSIO_T1_NAPI
2392 NAPI is a driver API designed to reduce CPU and interrupt load 2379 NAPI is a driver API designed to reduce CPU and interrupt load
2393 when the driver is receiving lots of packets from the card. 2380 when the driver is receiving lots of packets from the card.
2394 2381
2382config CHELSIO_T3
2383 tristate "Chelsio Communications T3 10Gb Ethernet support"
2384 depends on PCI
2385 help
2386 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
2387 adapters.
2388
2389 For general information about Chelsio and our products, visit
2390 our website at <http://www.chelsio.com>.
2391
2392 For customer support, please visit our customer support page at
2393 <http://www.chelsio.com/support.htm>.
2394
2395 Please send feedback to <linux-bugs@chelsio.com>.
2396
2397 To compile this driver as a module, choose M here: the module
2398 will be called cxgb3.
2399
2395config EHEA 2400config EHEA
2396 tristate "eHEA Ethernet support" 2401 tristate "eHEA Ethernet support"
2397 depends on IBMEBUS 2402 depends on IBMEBUS
@@ -2488,6 +2493,13 @@ config NETXEN_NIC
2488 help 2493 help
2489 This enables the support for NetXen's Gigabit Ethernet card. 2494 This enables the support for NetXen's Gigabit Ethernet card.
2490 2495
2496config PASEMI_MAC
2497 tristate "PA Semi 1/10Gbit MAC"
2498 depends on PPC64 && PCI
2499 help
2500 This driver supports the on-chip 1/10Gbit Ethernet controller on
2501 PA Semi's PWRficient line of chips.
2502
2491endmenu 2503endmenu
2492 2504
2493source "drivers/net/tokenring/Kconfig" 2505source "drivers/net/tokenring/Kconfig"
@@ -2541,6 +2553,7 @@ config DEFXX
2541config SKFP 2553config SKFP
2542 tristate "SysKonnect FDDI PCI support" 2554 tristate "SysKonnect FDDI PCI support"
2543 depends on FDDI && PCI 2555 depends on FDDI && PCI
2556 select BITREVERSE
2544 ---help--- 2557 ---help---
2545 Say Y here if you have a SysKonnect FDDI PCI adapter. 2558 Say Y here if you have a SysKonnect FDDI PCI adapter.
2546 The following adapters are supported by this driver: 2559 The following adapters are supported by this driver:
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4c0d4e5ce42b..0878e3df5174 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
6obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 6obj-$(CONFIG_IBM_EMAC) += ibm_emac/
7obj-$(CONFIG_IXGB) += ixgb/ 7obj-$(CONFIG_IXGB) += ixgb/
8obj-$(CONFIG_CHELSIO_T1) += chelsio/ 8obj-$(CONFIG_CHELSIO_T1) += chelsio/
9obj-$(CONFIG_CHELSIO_T3) += cxgb3/
9obj-$(CONFIG_EHEA) += ehea/ 10obj-$(CONFIG_EHEA) += ehea/
10obj-$(CONFIG_BONDING) += bonding/ 11obj-$(CONFIG_BONDING) += bonding/
11obj-$(CONFIG_GIANFAR) += gianfar_driver.o 12obj-$(CONFIG_GIANFAR) += gianfar_driver.o
@@ -36,8 +37,6 @@ obj-$(CONFIG_CASSINI) += cassini.o
36obj-$(CONFIG_MACE) += mace.o 37obj-$(CONFIG_MACE) += mace.o
37obj-$(CONFIG_BMAC) += bmac.o 38obj-$(CONFIG_BMAC) += bmac.o
38 39
39obj-$(CONFIG_OAKNET) += oaknet.o 8390.o
40
41obj-$(CONFIG_DGRS) += dgrs.o 40obj-$(CONFIG_DGRS) += dgrs.o
42obj-$(CONFIG_VORTEX) += 3c59x.o 41obj-$(CONFIG_VORTEX) += 3c59x.o
43obj-$(CONFIG_TYPHOON) += typhoon.o 42obj-$(CONFIG_TYPHOON) += typhoon.o
@@ -137,7 +136,6 @@ obj-$(CONFIG_AT1700) += at1700.o
137obj-$(CONFIG_EL1) += 3c501.o 136obj-$(CONFIG_EL1) += 3c501.o
138obj-$(CONFIG_EL16) += 3c507.o 137obj-$(CONFIG_EL16) += 3c507.o
139obj-$(CONFIG_ELMC) += 3c523.o 138obj-$(CONFIG_ELMC) += 3c523.o
140obj-$(CONFIG_SKMC) += sk_mca.o
141obj-$(CONFIG_IBMLANA) += ibmlana.o 139obj-$(CONFIG_IBMLANA) += ibmlana.o
142obj-$(CONFIG_ELMC_II) += 3c527.o 140obj-$(CONFIG_ELMC_II) += 3c527.o
143obj-$(CONFIG_EL3) += 3c509.o 141obj-$(CONFIG_EL3) += 3c509.o
@@ -160,6 +158,7 @@ obj-$(CONFIG_APRICOT) += 82596.o
160obj-$(CONFIG_LASI_82596) += lasi_82596.o 158obj-$(CONFIG_LASI_82596) += lasi_82596.o
161obj-$(CONFIG_MVME16x_NET) += 82596.o 159obj-$(CONFIG_MVME16x_NET) += 82596.o
162obj-$(CONFIG_BVME6000_NET) += 82596.o 160obj-$(CONFIG_BVME6000_NET) += 82596.o
161obj-$(CONFIG_SC92031) += sc92031.o
163 162
164# This is also a 82596 and should probably be merged 163# This is also a 82596 and should probably be merged
165obj-$(CONFIG_LP486E) += lp486e.o 164obj-$(CONFIG_LP486E) += lp486e.o
@@ -196,6 +195,7 @@ obj-$(CONFIG_SMC91X) += smc91x.o
196obj-$(CONFIG_SMC911X) += smc911x.o 195obj-$(CONFIG_SMC911X) += smc911x.o
197obj-$(CONFIG_DM9000) += dm9000.o 196obj-$(CONFIG_DM9000) += dm9000.o
198obj-$(CONFIG_FEC_8XX) += fec_8xx/ 197obj-$(CONFIG_FEC_8XX) += fec_8xx/
198obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
199 199
200obj-$(CONFIG_MACB) += macb.o 200obj-$(CONFIG_MACB) += macb.o
201 201
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 9305eb9b1b98..dd8ed456c8b2 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -59,7 +59,6 @@ extern struct net_device *wavelan_probe(int unit);
59extern struct net_device *arlan_probe(int unit); 59extern struct net_device *arlan_probe(int unit);
60extern struct net_device *el16_probe(int unit); 60extern struct net_device *el16_probe(int unit);
61extern struct net_device *elmc_probe(int unit); 61extern struct net_device *elmc_probe(int unit);
62extern struct net_device *skmca_probe(int unit);
63extern struct net_device *elplus_probe(int unit); 62extern struct net_device *elplus_probe(int unit);
64extern struct net_device *ac3200_probe(int unit); 63extern struct net_device *ac3200_probe(int unit);
65extern struct net_device *es_probe(int unit); 64extern struct net_device *es_probe(int unit);
@@ -153,9 +152,6 @@ static struct devprobe2 mca_probes[] __initdata = {
153#ifdef CONFIG_ELMC_II /* 3c527 */ 152#ifdef CONFIG_ELMC_II /* 3c527 */
154 {mc32_probe, 0}, 153 {mc32_probe, 0},
155#endif 154#endif
156#ifdef CONFIG_SKMC /* SKnet Microchannel */
157 {skmca_probe, 0},
158#endif
159 {NULL, 0}, 155 {NULL, 0},
160}; 156};
161 157
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 18896f24d407..9c399aaefbdd 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1334,8 +1334,7 @@ err_no_interrupt:
1334static void amd8111e_poll(struct net_device *dev) 1334static void amd8111e_poll(struct net_device *dev)
1335{ 1335{
1336 unsigned long flags; 1336 unsigned long flags;
1337 local_save_flags(flags); 1337 local_irq_save(flags);
1338 local_irq_disable();
1339 amd8111e_interrupt(0, dev); 1338 amd8111e_interrupt(0, dev);
1340 local_irq_restore(flags); 1339 local_irq_restore(flags);
1341} 1340}
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 303a8d94ad4b..5ff7882297d6 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -721,7 +721,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
721 struct ring_info *src_map, *dest_map; 721 struct ring_info *src_map, *dest_map;
722 struct rx_header *rh; 722 struct rx_header *rh;
723 int dest_idx; 723 int dest_idx;
724 u32 ctrl; 724 __le32 ctrl;
725 725
726 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); 726 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
727 dest_desc = &bp->rx_ring[dest_idx]; 727 dest_desc = &bp->rx_ring[dest_idx];
@@ -783,7 +783,7 @@ static int b44_rx(struct b44 *bp, int budget)
783 RX_PKT_BUF_SZ, 783 RX_PKT_BUF_SZ,
784 PCI_DMA_FROMDEVICE); 784 PCI_DMA_FROMDEVICE);
785 rh = (struct rx_header *) skb->data; 785 rh = (struct rx_header *) skb->data;
786 len = cpu_to_le16(rh->len); 786 len = le16_to_cpu(rh->len);
787 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) || 787 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
788 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { 788 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
789 drop_it: 789 drop_it:
@@ -799,7 +799,7 @@ static int b44_rx(struct b44 *bp, int budget)
799 do { 799 do {
800 udelay(2); 800 udelay(2);
801 barrier(); 801 barrier();
802 len = cpu_to_le16(rh->len); 802 len = le16_to_cpu(rh->len);
803 } while (len == 0 && i++ < 5); 803 } while (len == 0 && i++ < 5);
804 if (len == 0) 804 if (len == 0)
805 goto drop_it; 805 goto drop_it;
@@ -2061,7 +2061,7 @@ out:
2061static int b44_read_eeprom(struct b44 *bp, u8 *data) 2061static int b44_read_eeprom(struct b44 *bp, u8 *data)
2062{ 2062{
2063 long i; 2063 long i;
2064 u16 *ptr = (u16 *) data; 2064 __le16 *ptr = (__le16 *) data;
2065 2065
2066 for (i = 0; i < 128; i += 2) 2066 for (i = 0; i < 128; i += 2)
2067 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i)); 2067 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 4944507fad23..18fc13336628 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -308,8 +308,8 @@
308#define MII_TLEDCTRL_ENABLE 0x0040 308#define MII_TLEDCTRL_ENABLE 0x0040
309 309
310struct dma_desc { 310struct dma_desc {
311 u32 ctrl; 311 __le32 ctrl;
312 u32 addr; 312 __le32 addr;
313}; 313};
314 314
315/* There are only 12 bits in the DMA engine for descriptor offsetting 315/* There are only 12 bits in the DMA engine for descriptor offsetting
@@ -327,9 +327,9 @@ struct dma_desc {
327#define RX_COPY_THRESHOLD 256 327#define RX_COPY_THRESHOLD 256
328 328
329struct rx_header { 329struct rx_header {
330 u16 len; 330 __le16 len;
331 u16 flags; 331 __le16 flags;
332 u16 pad[12]; 332 __le16 pad[12];
333}; 333};
334#define RX_HEADER_LEN 28 334#define RX_HEADER_LEN 28
335 335
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 4528ce9c4e43..c143304dcff5 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/crc32.h> 20#include <linux/crc32.h>
21#include <linux/bitrev.h>
21#include <asm/prom.h> 22#include <asm/prom.h>
22#include <asm/dbdma.h> 23#include <asm/dbdma.h>
23#include <asm/io.h> 24#include <asm/io.h>
@@ -140,7 +141,6 @@ static unsigned char *bmac_emergency_rxbuf;
140 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \ 141 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
141 + sizeof(struct sk_buff_head)) 142 + sizeof(struct sk_buff_head))
142 143
143static unsigned char bitrev(unsigned char b);
144static int bmac_open(struct net_device *dev); 144static int bmac_open(struct net_device *dev);
145static int bmac_close(struct net_device *dev); 145static int bmac_close(struct net_device *dev);
146static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); 146static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
@@ -586,18 +586,6 @@ bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
586 virt_to_bus(addr), 0); 586 virt_to_bus(addr), 0);
587} 587}
588 588
589/* Bit-reverse one byte of an ethernet hardware address. */
590static unsigned char
591bitrev(unsigned char b)
592{
593 int d = 0, i;
594
595 for (i = 0; i < 8; ++i, b >>= 1)
596 d = (d << 1) | (b & 1);
597 return d;
598}
599
600
601static void 589static void
602bmac_init_tx_ring(struct bmac_data *bp) 590bmac_init_tx_ring(struct bmac_data *bp)
603{ 591{
@@ -1224,8 +1212,8 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1224 { 1212 {
1225 reset_and_select_srom(dev); 1213 reset_and_select_srom(dev);
1226 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); 1214 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1227 ea[2*i] = bitrev(data & 0x0ff); 1215 ea[2*i] = bitrev8(data & 0x0ff);
1228 ea[2*i+1] = bitrev((data >> 8) & 0x0ff); 1216 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1229 } 1217 }
1230} 1218}
1231 1219
@@ -1315,7 +1303,7 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
1315 1303
1316 rev = addr[0] == 0 && addr[1] == 0xA0; 1304 rev = addr[0] == 0 && addr[1] == 0xA0;
1317 for (j = 0; j < 6; ++j) 1305 for (j = 0; j < 6; ++j)
1318 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j]; 1306 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1319 1307
1320 /* Enable chip without interrupts for now */ 1308 /* Enable chip without interrupts for now */
1321 bmac_enable_and_reset_chip(dev); 1309 bmac_enable_and_reset_chip(dev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ee7b75b976b5..c416c18007da 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -39,12 +39,9 @@
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#define BCM_VLAN 1 40#define BCM_VLAN 1
41#endif 41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h> 42#include <net/ip.h>
44#include <net/tcp.h> 43#include <net/tcp.h>
45#include <net/checksum.h> 44#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h> 45#include <linux/workqueue.h>
49#include <linux/crc32.h> 46#include <linux/crc32.h>
50#include <linux/prefetch.h> 47#include <linux/prefetch.h>
@@ -1728,7 +1725,7 @@ bnx2_tx_int(struct bnx2 *bp)
1728 1725
1729 tx_buf = &bp->tx_buf_ring[sw_ring_cons]; 1726 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1730 skb = tx_buf->skb; 1727 skb = tx_buf->skb;
1731#ifdef BCM_TSO 1728
1732 /* partial BD completions possible with TSO packets */ 1729 /* partial BD completions possible with TSO packets */
1733 if (skb_is_gso(skb)) { 1730 if (skb_is_gso(skb)) {
1734 u16 last_idx, last_ring_idx; 1731 u16 last_idx, last_ring_idx;
@@ -1744,7 +1741,7 @@ bnx2_tx_int(struct bnx2 *bp)
1744 break; 1741 break;
1745 } 1742 }
1746 } 1743 }
1747#endif 1744
1748 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 1745 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1749 skb_headlen(skb), PCI_DMA_TODEVICE); 1746 skb_headlen(skb), PCI_DMA_TODEVICE);
1750 1747
@@ -4514,7 +4511,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4514 vlan_tag_flags |= 4511 vlan_tag_flags |=
4515 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); 4512 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4516 } 4513 }
4517#ifdef BCM_TSO
4518 if ((mss = skb_shinfo(skb)->gso_size) && 4514 if ((mss = skb_shinfo(skb)->gso_size) &&
4519 (skb->len > (bp->dev->mtu + ETH_HLEN))) { 4515 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4520 u32 tcp_opt_len, ip_tcp_len; 4516 u32 tcp_opt_len, ip_tcp_len;
@@ -4547,7 +4543,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4547 } 4543 }
4548 } 4544 }
4549 else 4545 else
4550#endif
4551 { 4546 {
4552 mss = 0; 4547 mss = 0;
4553 } 4548 }
@@ -5544,10 +5539,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
5544 .set_tx_csum = ethtool_op_set_tx_csum, 5539 .set_tx_csum = ethtool_op_set_tx_csum,
5545 .get_sg = ethtool_op_get_sg, 5540 .get_sg = ethtool_op_get_sg,
5546 .set_sg = ethtool_op_set_sg, 5541 .set_sg = ethtool_op_set_sg,
5547#ifdef BCM_TSO
5548 .get_tso = ethtool_op_get_tso, 5542 .get_tso = ethtool_op_get_tso,
5549 .set_tso = bnx2_set_tso, 5543 .set_tso = bnx2_set_tso,
5550#endif
5551 .self_test_count = bnx2_self_test_count, 5544 .self_test_count = bnx2_self_test_count,
5552 .self_test = bnx2_self_test, 5545 .self_test = bnx2_self_test,
5553 .get_strings = bnx2_get_strings, 5546 .get_strings = bnx2_get_strings,
@@ -6104,9 +6097,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6104#ifdef BCM_VLAN 6097#ifdef BCM_VLAN
6105 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 6098 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6106#endif 6099#endif
6107#ifdef BCM_TSO
6108 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; 6100 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6109#endif
6110 6101
6111 netif_carrier_off(bp->dev); 6102 netif_carrier_off(bp->dev);
6112 6103
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6482aed4bb7c..d3801a00d3d5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4704,6 +4704,7 @@ static int bond_check_params(struct bond_params *params)
4704static struct lock_class_key bonding_netdev_xmit_lock_key; 4704static struct lock_class_key bonding_netdev_xmit_lock_key;
4705 4705
4706/* Create a new bond based on the specified name and bonding parameters. 4706/* Create a new bond based on the specified name and bonding parameters.
4707 * If name is NULL, obtain a suitable "bond%d" name for us.
4707 * Caller must NOT hold rtnl_lock; we need to release it here before we 4708 * Caller must NOT hold rtnl_lock; we need to release it here before we
4708 * set up our sysfs entries. 4709 * set up our sysfs entries.
4709 */ 4710 */
@@ -4713,7 +4714,8 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4713 int res; 4714 int res;
4714 4715
4715 rtnl_lock(); 4716 rtnl_lock();
4716 bond_dev = alloc_netdev(sizeof(struct bonding), name, ether_setup); 4717 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
4718 ether_setup);
4717 if (!bond_dev) { 4719 if (!bond_dev) {
4718 printk(KERN_ERR DRV_NAME 4720 printk(KERN_ERR DRV_NAME
4719 ": %s: eek! can't alloc netdev!\n", 4721 ": %s: eek! can't alloc netdev!\n",
@@ -4722,6 +4724,12 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4722 goto out_rtnl; 4724 goto out_rtnl;
4723 } 4725 }
4724 4726
4727 if (!name) {
4728 res = dev_alloc_name(bond_dev, "bond%d");
4729 if (res < 0)
4730 goto out_netdev;
4731 }
4732
4725 /* bond_init() must be called after dev_alloc_name() (for the 4733 /* bond_init() must be called after dev_alloc_name() (for the
4726 * /proc files), but before register_netdevice(), because we 4734 * /proc files), but before register_netdevice(), because we
4727 * need to set function pointers. 4735 * need to set function pointers.
@@ -4748,14 +4756,19 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4748 4756
4749 rtnl_unlock(); /* allows sysfs registration of net device */ 4757 rtnl_unlock(); /* allows sysfs registration of net device */
4750 res = bond_create_sysfs_entry(bond_dev->priv); 4758 res = bond_create_sysfs_entry(bond_dev->priv);
4751 goto done; 4759 if (res < 0) {
4760 rtnl_lock();
4761 goto out_bond;
4762 }
4763
4764 return 0;
4765
4752out_bond: 4766out_bond:
4753 bond_deinit(bond_dev); 4767 bond_deinit(bond_dev);
4754out_netdev: 4768out_netdev:
4755 free_netdev(bond_dev); 4769 free_netdev(bond_dev);
4756out_rtnl: 4770out_rtnl:
4757 rtnl_unlock(); 4771 rtnl_unlock();
4758done:
4759 return res; 4772 return res;
4760} 4773}
4761 4774
@@ -4763,7 +4776,6 @@ static int __init bonding_init(void)
4763{ 4776{
4764 int i; 4777 int i;
4765 int res; 4778 int res;
4766 char new_bond_name[8]; /* Enough room for 999 bonds at init. */
4767 4779
4768 printk(KERN_INFO "%s", version); 4780 printk(KERN_INFO "%s", version);
4769 4781
@@ -4776,8 +4788,7 @@ static int __init bonding_init(void)
4776 bond_create_proc_dir(); 4788 bond_create_proc_dir();
4777#endif 4789#endif
4778 for (i = 0; i < max_bonds; i++) { 4790 for (i = 0; i < max_bonds; i++) {
4779 sprintf(new_bond_name, "bond%d",i); 4791 res = bond_create(NULL, &bonding_defaults, NULL);
4780 res = bond_create(new_bond_name,&bonding_defaults, NULL);
4781 if (res) 4792 if (res)
4782 goto err; 4793 goto err;
4783 } 4794 }
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ced9ed8f995a..8e317e115532 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1372,6 +1372,21 @@ int bond_create_sysfs(void)
1372 return -ENODEV; 1372 return -ENODEV;
1373 1373
1374 ret = class_create_file(netdev_class, &class_attr_bonding_masters); 1374 ret = class_create_file(netdev_class, &class_attr_bonding_masters);
1375 /*
1376 * Permit multiple loads of the module by ignoring failures to
1377 * create the bonding_masters sysfs file. Bonding devices
1378 * created by second or subsequent loads of the module will
1379 * not be listed in, or controllable by, bonding_masters, but
1380 * will have the usual "bonding" sysfs directory.
1381 *
1382 * This is done to preserve backwards compatibility for
1383 * initscripts/sysconfig, which load bonding multiple times to
1384 * configure multiple bonding devices.
1385 */
1386 if (ret == -EEXIST) {
1387 netdev_class = NULL;
1388 return 0;
1389 }
1375 1390
1376 return ret; 1391 return ret;
1377 1392
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 0978c9ac6d2b..41aa78bf1f78 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,8 +22,8 @@
22#include "bond_3ad.h" 22#include "bond_3ad.h"
23#include "bond_alb.h" 23#include "bond_alb.h"
24 24
25#define DRV_VERSION "3.1.1" 25#define DRV_VERSION "3.1.2"
26#define DRV_RELDATE "September 26, 2006" 26#define DRV_RELDATE "January 20, 2007"
27#define DRV_NAME "bonding" 27#define DRV_NAME "bonding"
28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
29 29
@@ -237,12 +237,13 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
237#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ 237#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
238 BOND_ARP_VALIDATE_BACKUP) 238 BOND_ARP_VALIDATE_BACKUP)
239 239
240extern inline int slave_do_arp_validate(struct bonding *bond, struct slave *slave) 240static inline int slave_do_arp_validate(struct bonding *bond,
241 struct slave *slave)
241{ 242{
242 return bond->params.arp_validate & (1 << slave->state); 243 return bond->params.arp_validate & (1 << slave->state);
243} 244}
244 245
245extern inline unsigned long slave_last_rx(struct bonding *bond, 246static inline unsigned long slave_last_rx(struct bonding *bond,
246 struct slave *slave) 247 struct slave *slave)
247{ 248{
248 if (slave_do_arp_validate(bond, slave)) 249 if (slave_do_arp_validate(bond, slave))
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 74758d2c7af8..787f2f2820fe 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -324,7 +324,7 @@ struct board_info {
324 unsigned char mdio_phybaseaddr; 324 unsigned char mdio_phybaseaddr;
325 struct gmac *gmac; 325 struct gmac *gmac;
326 struct gphy *gphy; 326 struct gphy *gphy;
327 struct mdio_ops *mdio_ops; 327 struct mdio_ops *mdio_ops;
328 const char *desc; 328 const char *desc;
329}; 329};
330 330
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
index 35f565be4fd3..e36d45b78cc7 100644
--- a/drivers/net/chelsio/cpl5_cmd.h
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -103,7 +103,7 @@ enum CPL_opcode {
103 CPL_MIGRATE_C2T_RPL = 0xDD, 103 CPL_MIGRATE_C2T_RPL = 0xDD,
104 CPL_ERROR = 0xD7, 104 CPL_ERROR = 0xD7,
105 105
106 /* internal: driver -> TOM */ 106 /* internal: driver -> TOM */
107 CPL_MSS_CHANGE = 0xE1 107 CPL_MSS_CHANGE = 0xE1
108}; 108};
109 109
@@ -159,8 +159,8 @@ enum { // TX_PKT_LSO ethernet types
159}; 159};
160 160
161union opcode_tid { 161union opcode_tid {
162 u32 opcode_tid; 162 u32 opcode_tid;
163 u8 opcode; 163 u8 opcode;
164}; 164};
165 165
166#define S_OPCODE 24 166#define S_OPCODE 24
@@ -234,7 +234,7 @@ struct cpl_pass_accept_req {
234 u32 local_ip; 234 u32 local_ip;
235 u32 peer_ip; 235 u32 peer_ip;
236 u32 tos_tid; 236 u32 tos_tid;
237 struct tcp_options tcp_options; 237 struct tcp_options tcp_options;
238 u8 dst_mac[6]; 238 u8 dst_mac[6];
239 u16 vlan_tag; 239 u16 vlan_tag;
240 u8 src_mac[6]; 240 u8 src_mac[6];
@@ -250,12 +250,12 @@ struct cpl_pass_accept_rpl {
250 u32 peer_ip; 250 u32 peer_ip;
251 u32 opt0h; 251 u32 opt0h;
252 union { 252 union {
253 u32 opt0l; 253 u32 opt0l;
254 struct { 254 struct {
255 u8 rsvd[3]; 255 u8 rsvd[3];
256 u8 status; 256 u8 status;
257 };
257 }; 258 };
258 };
259}; 259};
260 260
261struct cpl_act_open_req { 261struct cpl_act_open_req {
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index fd5d821f3f2a..7d0f24f69777 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -69,14 +69,14 @@ static inline void cancel_mac_stats_update(struct adapter *ap)
69 cancel_delayed_work(&ap->stats_update_task); 69 cancel_delayed_work(&ap->stats_update_task);
70} 70}
71 71
72#define MAX_CMDQ_ENTRIES 16384 72#define MAX_CMDQ_ENTRIES 16384
73#define MAX_CMDQ1_ENTRIES 1024 73#define MAX_CMDQ1_ENTRIES 1024
74#define MAX_RX_BUFFERS 16384 74#define MAX_RX_BUFFERS 16384
75#define MAX_RX_JUMBO_BUFFERS 16384 75#define MAX_RX_JUMBO_BUFFERS 16384
76#define MAX_TX_BUFFERS_HIGH 16384U 76#define MAX_TX_BUFFERS_HIGH 16384U
77#define MAX_TX_BUFFERS_LOW 1536U 77#define MAX_TX_BUFFERS_LOW 1536U
78#define MAX_TX_BUFFERS 1460U 78#define MAX_TX_BUFFERS 1460U
79#define MIN_FL_ENTRIES 32 79#define MIN_FL_ENTRIES 32
80 80
81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
@@ -143,7 +143,7 @@ static void link_report(struct port_info *p)
143 case SPEED_100: s = "100Mbps"; break; 143 case SPEED_100: s = "100Mbps"; break;
144 } 144 }
145 145
146 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", 146 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147 p->dev->name, s, 147 p->dev->name, s,
148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); 148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149 } 149 }
@@ -233,7 +233,7 @@ static int cxgb_up(struct adapter *adapter)
233 233
234 t1_sge_start(adapter->sge); 234 t1_sge_start(adapter->sge);
235 t1_interrupts_enable(adapter); 235 t1_interrupts_enable(adapter);
236 out_err: 236out_err:
237 return err; 237 return err;
238} 238}
239 239
@@ -454,51 +454,21 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
454 const struct cmac_statistics *s; 454 const struct cmac_statistics *s;
455 const struct sge_intr_counts *t; 455 const struct sge_intr_counts *t;
456 struct sge_port_stats ss; 456 struct sge_port_stats ss;
457 unsigned int len;
457 458
458 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); 459 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
459 460
460 *data++ = s->TxOctetsOK; 461 len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
461 *data++ = s->TxOctetsBad; 462 memcpy(data, &s->TxOctetsOK, len);
462 *data++ = s->TxUnicastFramesOK; 463 data += len;
463 *data++ = s->TxMulticastFramesOK; 464
464 *data++ = s->TxBroadcastFramesOK; 465 len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
465 *data++ = s->TxPauseFrames; 466 memcpy(data, &s->RxOctetsOK, len);
466 *data++ = s->TxFramesWithDeferredXmissions; 467 data += len;
467 *data++ = s->TxLateCollisions;
468 *data++ = s->TxTotalCollisions;
469 *data++ = s->TxFramesAbortedDueToXSCollisions;
470 *data++ = s->TxUnderrun;
471 *data++ = s->TxLengthErrors;
472 *data++ = s->TxInternalMACXmitError;
473 *data++ = s->TxFramesWithExcessiveDeferral;
474 *data++ = s->TxFCSErrors;
475
476 *data++ = s->RxOctetsOK;
477 *data++ = s->RxOctetsBad;
478 *data++ = s->RxUnicastFramesOK;
479 *data++ = s->RxMulticastFramesOK;
480 *data++ = s->RxBroadcastFramesOK;
481 *data++ = s->RxPauseFrames;
482 *data++ = s->RxFCSErrors;
483 *data++ = s->RxAlignErrors;
484 *data++ = s->RxSymbolErrors;
485 *data++ = s->RxDataErrors;
486 *data++ = s->RxSequenceErrors;
487 *data++ = s->RxRuntErrors;
488 *data++ = s->RxJabberErrors;
489 *data++ = s->RxInternalMACRcvError;
490 *data++ = s->RxInRangeLengthErrors;
491 *data++ = s->RxOutOfRangeLengthField;
492 *data++ = s->RxFrameTooLongErrors;
493 468
494 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); 469 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
495 *data++ = ss.rx_packets; 470 memcpy(data, &ss, sizeof(ss));
496 *data++ = ss.rx_cso_good; 471 data += sizeof(ss);
497 *data++ = ss.tx_packets;
498 *data++ = ss.tx_cso;
499 *data++ = ss.tx_tso;
500 *data++ = ss.vlan_xtract;
501 *data++ = ss.vlan_insert;
502 472
503 t = t1_sge_get_intr_counts(adapter->sge); 473 t = t1_sge_get_intr_counts(adapter->sge);
504 *data++ = t->rx_drops; 474 *data++ = t->rx_drops;
@@ -749,7 +719,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
749 return -EINVAL; 719 return -EINVAL;
750 720
751 if (adapter->flags & FULL_INIT_DONE) 721 if (adapter->flags & FULL_INIT_DONE)
752 return -EBUSY; 722 return -EBUSY;
753 723
754 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; 724 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
755 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; 725 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
@@ -764,7 +734,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
764 struct adapter *adapter = dev->priv; 734 struct adapter *adapter = dev->priv;
765 735
766 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; 736 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
767 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; 737 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
768 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; 738 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
769 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); 739 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
770 return 0; 740 return 0;
@@ -782,9 +752,9 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
782 752
783static int get_eeprom_len(struct net_device *dev) 753static int get_eeprom_len(struct net_device *dev)
784{ 754{
785 struct adapter *adapter = dev->priv; 755 struct adapter *adapter = dev->priv;
786 756
787 return t1_is_asic(adapter) ? EEPROM_SIZE : 0; 757 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
788} 758}
789 759
790#define EEPROM_MAGIC(ap) \ 760#define EEPROM_MAGIC(ap) \
@@ -848,7 +818,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
848 u32 val; 818 u32 val;
849 819
850 if (!phy->mdio_read) 820 if (!phy->mdio_read)
851 return -EOPNOTSUPP; 821 return -EOPNOTSUPP;
852 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, 822 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
853 &val); 823 &val);
854 data->val_out = val; 824 data->val_out = val;
@@ -860,7 +830,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
860 if (!capable(CAP_NET_ADMIN)) 830 if (!capable(CAP_NET_ADMIN))
861 return -EPERM; 831 return -EPERM;
862 if (!phy->mdio_write) 832 if (!phy->mdio_write)
863 return -EOPNOTSUPP; 833 return -EOPNOTSUPP;
864 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, 834 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
865 data->val_in); 835 data->val_in);
866 break; 836 break;
@@ -879,9 +849,9 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
879 struct cmac *mac = adapter->port[dev->if_port].mac; 849 struct cmac *mac = adapter->port[dev->if_port].mac;
880 850
881 if (!mac->ops->set_mtu) 851 if (!mac->ops->set_mtu)
882 return -EOPNOTSUPP; 852 return -EOPNOTSUPP;
883 if (new_mtu < 68) 853 if (new_mtu < 68)
884 return -EINVAL; 854 return -EINVAL;
885 if ((ret = mac->ops->set_mtu(mac, new_mtu))) 855 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
886 return ret; 856 return ret;
887 dev->mtu = new_mtu; 857 dev->mtu = new_mtu;
@@ -1211,9 +1181,9 @@ static int __devinit init_one(struct pci_dev *pdev,
1211 1181
1212 return 0; 1182 return 0;
1213 1183
1214 out_release_adapter_res: 1184out_release_adapter_res:
1215 t1_free_sw_modules(adapter); 1185 t1_free_sw_modules(adapter);
1216 out_free_dev: 1186out_free_dev:
1217 if (adapter) { 1187 if (adapter) {
1218 if (adapter->regs) 1188 if (adapter->regs)
1219 iounmap(adapter->regs); 1189 iounmap(adapter->regs);
@@ -1222,7 +1192,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1222 free_netdev(adapter->port[i].dev); 1192 free_netdev(adapter->port[i].dev);
1223 } 1193 }
1224 pci_release_regions(pdev); 1194 pci_release_regions(pdev);
1225 out_disable_pdev: 1195out_disable_pdev:
1226 pci_disable_device(pdev); 1196 pci_disable_device(pdev);
1227 pci_set_drvdata(pdev, NULL); 1197 pci_set_drvdata(pdev, NULL);
1228 return err; 1198 return err;
@@ -1273,28 +1243,27 @@ static int t1_clock(struct adapter *adapter, int mode)
1273 int M_MEM_VAL; 1243 int M_MEM_VAL;
1274 1244
1275 enum { 1245 enum {
1276 M_CORE_BITS = 9, 1246 M_CORE_BITS = 9,
1277 T_CORE_VAL = 0, 1247 T_CORE_VAL = 0,
1278 T_CORE_BITS = 2, 1248 T_CORE_BITS = 2,
1279 N_CORE_VAL = 0, 1249 N_CORE_VAL = 0,
1280 N_CORE_BITS = 2, 1250 N_CORE_BITS = 2,
1281 M_MEM_BITS = 9, 1251 M_MEM_BITS = 9,
1282 T_MEM_VAL = 0, 1252 T_MEM_VAL = 0,
1283 T_MEM_BITS = 2, 1253 T_MEM_BITS = 2,
1284 N_MEM_VAL = 0, 1254 N_MEM_VAL = 0,
1285 N_MEM_BITS = 2, 1255 N_MEM_BITS = 2,
1286 NP_LOAD = 1 << 17, 1256 NP_LOAD = 1 << 17,
1287 S_LOAD_MEM = 1 << 5, 1257 S_LOAD_MEM = 1 << 5,
1288 S_LOAD_CORE = 1 << 6, 1258 S_LOAD_CORE = 1 << 6,
1289 S_CLOCK = 1 << 3 1259 S_CLOCK = 1 << 3
1290 }; 1260 };
1291 1261
1292 if (!t1_is_T1B(adapter)) 1262 if (!t1_is_T1B(adapter))
1293 return -ENODEV; /* Can't re-clock this chip. */ 1263 return -ENODEV; /* Can't re-clock this chip. */
1294 1264
1295 if (mode & 2) { 1265 if (mode & 2)
1296 return 0; /* show current mode. */ 1266 return 0; /* show current mode. */
1297 }
1298 1267
1299 if ((adapter->t1powersave & 1) == (mode & 1)) 1268 if ((adapter->t1powersave & 1) == (mode & 1))
1300 return -EALREADY; /* ASIC already running in mode. */ 1269 return -EALREADY; /* ASIC already running in mode. */
@@ -1386,26 +1355,26 @@ static inline void t1_sw_reset(struct pci_dev *pdev)
1386static void __devexit remove_one(struct pci_dev *pdev) 1355static void __devexit remove_one(struct pci_dev *pdev)
1387{ 1356{
1388 struct net_device *dev = pci_get_drvdata(pdev); 1357 struct net_device *dev = pci_get_drvdata(pdev);
1358 struct adapter *adapter = dev->priv;
1359 int i;
1389 1360
1390 if (dev) { 1361 for_each_port(adapter, i) {
1391 int i; 1362 if (test_bit(i, &adapter->registered_device_map))
1392 struct adapter *adapter = dev->priv; 1363 unregister_netdev(adapter->port[i].dev);
1393 1364 }
1394 for_each_port(adapter, i)
1395 if (test_bit(i, &adapter->registered_device_map))
1396 unregister_netdev(adapter->port[i].dev);
1397 1365
1398 t1_free_sw_modules(adapter); 1366 t1_free_sw_modules(adapter);
1399 iounmap(adapter->regs); 1367 iounmap(adapter->regs);
1400 while (--i >= 0)
1401 if (adapter->port[i].dev)
1402 free_netdev(adapter->port[i].dev);
1403 1368
1404 pci_release_regions(pdev); 1369 while (--i >= 0) {
1405 pci_disable_device(pdev); 1370 if (adapter->port[i].dev)
1406 pci_set_drvdata(pdev, NULL); 1371 free_netdev(adapter->port[i].dev);
1407 t1_sw_reset(pdev);
1408 } 1372 }
1373
1374 pci_release_regions(pdev);
1375 pci_disable_device(pdev);
1376 pci_set_drvdata(pdev, NULL);
1377 t1_sw_reset(pdev);
1409} 1378}
1410 1379
1411static struct pci_driver driver = { 1380static struct pci_driver driver = {
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
index 9ebecaa97d31..eef655c827d9 100644
--- a/drivers/net/chelsio/elmer0.h
+++ b/drivers/net/chelsio/elmer0.h
@@ -46,14 +46,14 @@ enum {
46}; 46};
47 47
48/* ELMER0 registers */ 48/* ELMER0 registers */
49#define A_ELMER0_VERSION 0x100000 49#define A_ELMER0_VERSION 0x100000
50#define A_ELMER0_PHY_CFG 0x100004 50#define A_ELMER0_PHY_CFG 0x100004
51#define A_ELMER0_INT_ENABLE 0x100008 51#define A_ELMER0_INT_ENABLE 0x100008
52#define A_ELMER0_INT_CAUSE 0x10000c 52#define A_ELMER0_INT_CAUSE 0x10000c
53#define A_ELMER0_GPI_CFG 0x100010 53#define A_ELMER0_GPI_CFG 0x100010
54#define A_ELMER0_GPI_STAT 0x100014 54#define A_ELMER0_GPI_STAT 0x100014
55#define A_ELMER0_GPO 0x100018 55#define A_ELMER0_GPO 0x100018
56#define A_ELMER0_PORT0_MI1_CFG 0x400000 56#define A_ELMER0_PORT0_MI1_CFG 0x400000
57 57
58#define S_MI1_MDI_ENABLE 0 58#define S_MI1_MDI_ENABLE 0
59#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE) 59#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
@@ -111,18 +111,18 @@ enum {
111#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY) 111#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
112#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U) 112#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
113 113
114#define A_ELMER0_PORT1_MI1_CFG 0x500000 114#define A_ELMER0_PORT1_MI1_CFG 0x500000
115#define A_ELMER0_PORT1_MI1_ADDR 0x500004 115#define A_ELMER0_PORT1_MI1_ADDR 0x500004
116#define A_ELMER0_PORT1_MI1_DATA 0x500008 116#define A_ELMER0_PORT1_MI1_DATA 0x500008
117#define A_ELMER0_PORT1_MI1_OP 0x50000c 117#define A_ELMER0_PORT1_MI1_OP 0x50000c
118#define A_ELMER0_PORT2_MI1_CFG 0x600000 118#define A_ELMER0_PORT2_MI1_CFG 0x600000
119#define A_ELMER0_PORT2_MI1_ADDR 0x600004 119#define A_ELMER0_PORT2_MI1_ADDR 0x600004
120#define A_ELMER0_PORT2_MI1_DATA 0x600008 120#define A_ELMER0_PORT2_MI1_DATA 0x600008
121#define A_ELMER0_PORT2_MI1_OP 0x60000c 121#define A_ELMER0_PORT2_MI1_OP 0x60000c
122#define A_ELMER0_PORT3_MI1_CFG 0x700000 122#define A_ELMER0_PORT3_MI1_CFG 0x700000
123#define A_ELMER0_PORT3_MI1_ADDR 0x700004 123#define A_ELMER0_PORT3_MI1_ADDR 0x700004
124#define A_ELMER0_PORT3_MI1_DATA 0x700008 124#define A_ELMER0_PORT3_MI1_DATA 0x700008
125#define A_ELMER0_PORT3_MI1_OP 0x70000c 125#define A_ELMER0_PORT3_MI1_OP 0x70000c
126 126
127/* Simple bit definition for GPI and GP0 registers. */ 127/* Simple bit definition for GPI and GP0 registers. */
128#define ELMER0_GP_BIT0 0x0001 128#define ELMER0_GP_BIT0 0x0001
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index 4192f0f5b3ee..d7c5406a6c3f 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -202,9 +202,9 @@ static void espi_setup_for_pm3393(adapter_t *adapter)
202 202
203static void espi_setup_for_vsc7321(adapter_t *adapter) 203static void espi_setup_for_vsc7321(adapter_t *adapter)
204{ 204{
205 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); 205 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
206 writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1); 206 writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
207 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); 207 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
208 writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); 208 writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
209 writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); 209 writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
210 writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); 210 writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
@@ -247,10 +247,10 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports)
247 writel(V_OUT_OF_SYNC_COUNT(4) | 247 writel(V_OUT_OF_SYNC_COUNT(4) |
248 V_DIP2_PARITY_ERR_THRES(3) | 248 V_DIP2_PARITY_ERR_THRES(3) |
249 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); 249 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
250 writel(nports == 4 ? 0x200040 : 0x1000080, 250 writel(nports == 4 ? 0x200040 : 0x1000080,
251 adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); 251 adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
252 } else 252 } else
253 writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); 253 writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
254 254
255 if (mac_type == CHBT_MAC_PM3393) 255 if (mac_type == CHBT_MAC_PM3393)
256 espi_setup_for_pm3393(adapter); 256 espi_setup_for_pm3393(adapter);
@@ -301,7 +301,8 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
301{ 301{
302 struct peespi *espi = adapter->espi; 302 struct peespi *espi = adapter->espi;
303 303
304 if (!is_T2(adapter)) return; 304 if (!is_T2(adapter))
305 return;
305 spin_lock(&espi->lock); 306 spin_lock(&espi->lock);
306 espi->misc_ctrl = (val & ~MON_MASK) | 307 espi->misc_ctrl = (val & ~MON_MASK) |
307 (espi->misc_ctrl & MON_MASK); 308 (espi->misc_ctrl & MON_MASK);
@@ -340,32 +341,31 @@ u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
340 * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in 341 * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
341 * one shot, since there is no per port counter on the out side. 342 * one shot, since there is no per port counter on the out side.
342 */ 343 */
343int 344int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
344t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
345{ 345{
346 struct peespi *espi = adapter->espi; 346 struct peespi *espi = adapter->espi;
347 u8 i, nport = (u8)adapter->params.nports; 347 u8 i, nport = (u8)adapter->params.nports;
348 348
349 if (!wait) { 349 if (!wait) {
350 if (!spin_trylock(&espi->lock)) 350 if (!spin_trylock(&espi->lock))
351 return -1; 351 return -1;
352 } else 352 } else
353 spin_lock(&espi->lock); 353 spin_lock(&espi->lock);
354 354
355 if ( (espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION ) { 355 if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) {
356 espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) | 356 espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
357 F_MONITORED_DIRECTION; 357 F_MONITORED_DIRECTION;
358 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); 358 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
359 } 359 }
360 for (i = 0 ; i < nport; i++, valp++) { 360 for (i = 0 ; i < nport; i++, valp++) {
361 if (i) { 361 if (i) {
362 writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i), 362 writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
363 adapter->regs + A_ESPI_MISC_CONTROL); 363 adapter->regs + A_ESPI_MISC_CONTROL);
364 } 364 }
365 *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3); 365 *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
366 } 366 }
367 367
368 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); 368 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
369 spin_unlock(&espi->lock); 369 spin_unlock(&espi->lock);
370 return 0; 370 return 0;
371} 371}
diff --git a/drivers/net/chelsio/fpga_defs.h b/drivers/net/chelsio/fpga_defs.h
index 17a3c2ba36a3..ccdb2bc9ae98 100644
--- a/drivers/net/chelsio/fpga_defs.h
+++ b/drivers/net/chelsio/fpga_defs.h
@@ -98,9 +98,9 @@
98#define A_MI0_DATA_INT 0xb10 98#define A_MI0_DATA_INT 0xb10
99 99
100/* GMAC registers */ 100/* GMAC registers */
101#define A_GMAC_MACID_LO 0x28 101#define A_GMAC_MACID_LO 0x28
102#define A_GMAC_MACID_HI 0x2c 102#define A_GMAC_MACID_HI 0x2c
103#define A_GMAC_CSR 0x30 103#define A_GMAC_CSR 0x30
104 104
105#define S_INTERFACE 0 105#define S_INTERFACE 0
106#define M_INTERFACE 0x3 106#define M_INTERFACE 0x3
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
index a2b8ad9b5535..006a2eb2d362 100644
--- a/drivers/net/chelsio/gmac.h
+++ b/drivers/net/chelsio/gmac.h
@@ -42,8 +42,15 @@
42 42
43#include "common.h" 43#include "common.h"
44 44
45enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL }; 45enum {
46enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 }; 46 MAC_STATS_UPDATE_FAST,
47 MAC_STATS_UPDATE_FULL
48};
49
50enum {
51 MAC_DIRECTION_RX = 1,
52 MAC_DIRECTION_TX = 2
53};
47 54
48struct cmac_statistics { 55struct cmac_statistics {
49 /* Transmit */ 56 /* Transmit */
diff --git a/drivers/net/chelsio/ixf1010.c b/drivers/net/chelsio/ixf1010.c
index 5b8f144e83d4..10b2a9a19006 100644
--- a/drivers/net/chelsio/ixf1010.c
+++ b/drivers/net/chelsio/ixf1010.c
@@ -145,48 +145,61 @@ static void disable_port(struct cmac *mac)
145 t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val); 145 t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val);
146} 146}
147 147
148#define RMON_UPDATE(mac, name, stat_name) \
149 t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \
150 (mac)->stats.stat_name += val;
151
152/* 148/*
153 * Read the current values of the RMON counters and add them to the cumulative 149 * Read the current values of the RMON counters and add them to the cumulative
154 * port statistics. The HW RMON counters are cleared by this operation. 150 * port statistics. The HW RMON counters are cleared by this operation.
155 */ 151 */
156static void port_stats_update(struct cmac *mac) 152static void port_stats_update(struct cmac *mac)
157{ 153{
158 u32 val; 154 static struct {
155 unsigned int reg;
156 unsigned int offset;
157 } hw_stats[] = {
158
159#define HW_STAT(name, stat_name) \
160 { REG_##name, \
161 (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
162
163 /* Rx stats */
164 HW_STAT(RxOctetsTotalOK, RxOctetsOK),
165 HW_STAT(RxOctetsBad, RxOctetsBad),
166 HW_STAT(RxUCPkts, RxUnicastFramesOK),
167 HW_STAT(RxMCPkts, RxMulticastFramesOK),
168 HW_STAT(RxBCPkts, RxBroadcastFramesOK),
169 HW_STAT(RxJumboPkts, RxJumboFramesOK),
170 HW_STAT(RxFCSErrors, RxFCSErrors),
171 HW_STAT(RxAlignErrors, RxAlignErrors),
172 HW_STAT(RxLongErrors, RxFrameTooLongErrors),
173 HW_STAT(RxVeryLongErrors, RxFrameTooLongErrors),
174 HW_STAT(RxPauseMacControlCounter, RxPauseFrames),
175 HW_STAT(RxDataErrors, RxDataErrors),
176 HW_STAT(RxJabberErrors, RxJabberErrors),
177 HW_STAT(RxRuntErrors, RxRuntErrors),
178 HW_STAT(RxShortErrors, RxRuntErrors),
179 HW_STAT(RxSequenceErrors, RxSequenceErrors),
180 HW_STAT(RxSymbolErrors, RxSymbolErrors),
181
182 /* Tx stats (skip collision stats as we are full-duplex only) */
183 HW_STAT(TxOctetsTotalOK, TxOctetsOK),
184 HW_STAT(TxOctetsBad, TxOctetsBad),
185 HW_STAT(TxUCPkts, TxUnicastFramesOK),
186 HW_STAT(TxMCPkts, TxMulticastFramesOK),
187 HW_STAT(TxBCPkts, TxBroadcastFramesOK),
188 HW_STAT(TxJumboPkts, TxJumboFramesOK),
189 HW_STAT(TxPauseFrames, TxPauseFrames),
190 HW_STAT(TxExcessiveLengthDrop, TxLengthErrors),
191 HW_STAT(TxUnderrun, TxUnderrun),
192 HW_STAT(TxCRCErrors, TxFCSErrors)
193 }, *p = hw_stats;
194 u64 *stats = (u64 *) &mac->stats;
195 unsigned int i;
196
197 for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
198 u32 val;
159 199
160 /* Rx stats */ 200 t1_tpi_read(mac->adapter, MACREG(mac, p->reg), &val);
161 RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK); 201 stats[p->offset] += val;
162 RMON_UPDATE(mac, RxOctetsBad, RxOctetsBad); 202 }
163 RMON_UPDATE(mac, RxUCPkts, RxUnicastFramesOK);
164 RMON_UPDATE(mac, RxMCPkts, RxMulticastFramesOK);
165 RMON_UPDATE(mac, RxBCPkts, RxBroadcastFramesOK);
166 RMON_UPDATE(mac, RxJumboPkts, RxJumboFramesOK);
167 RMON_UPDATE(mac, RxFCSErrors, RxFCSErrors);
168 RMON_UPDATE(mac, RxAlignErrors, RxAlignErrors);
169 RMON_UPDATE(mac, RxLongErrors, RxFrameTooLongErrors);
170 RMON_UPDATE(mac, RxVeryLongErrors, RxFrameTooLongErrors);
171 RMON_UPDATE(mac, RxPauseMacControlCounter, RxPauseFrames);
172 RMON_UPDATE(mac, RxDataErrors, RxDataErrors);
173 RMON_UPDATE(mac, RxJabberErrors, RxJabberErrors);
174 RMON_UPDATE(mac, RxRuntErrors, RxRuntErrors);
175 RMON_UPDATE(mac, RxShortErrors, RxRuntErrors);
176 RMON_UPDATE(mac, RxSequenceErrors, RxSequenceErrors);
177 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
178
179 /* Tx stats (skip collision stats as we are full-duplex only) */
180 RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK);
181 RMON_UPDATE(mac, TxOctetsBad, TxOctetsBad);
182 RMON_UPDATE(mac, TxUCPkts, TxUnicastFramesOK);
183 RMON_UPDATE(mac, TxMCPkts, TxMulticastFramesOK);
184 RMON_UPDATE(mac, TxBCPkts, TxBroadcastFramesOK);
185 RMON_UPDATE(mac, TxJumboPkts, TxJumboFramesOK);
186 RMON_UPDATE(mac, TxPauseFrames, TxPauseFrames);
187 RMON_UPDATE(mac, TxExcessiveLengthDrop, TxLengthErrors);
188 RMON_UPDATE(mac, TxUnderrun, TxUnderrun);
189 RMON_UPDATE(mac, TxCRCErrors, TxFCSErrors);
190} 203}
191 204
192/* No-op interrupt operation as this MAC does not support interrupts */ 205/* No-op interrupt operation as this MAC does not support interrupts */
@@ -273,7 +286,8 @@ static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
273static int mac_set_mtu(struct cmac *mac, int mtu) 286static int mac_set_mtu(struct cmac *mac, int mtu)
274{ 287{
275 /* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */ 288 /* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */
276 if (mtu > (MAX_FRAME_SIZE - 14 - 4)) return -EINVAL; 289 if (mtu > (MAX_FRAME_SIZE - 14 - 4))
290 return -EINVAL;
277 t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE), 291 t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE),
278 mtu + 14 + 4); 292 mtu + 14 + 4);
279 return 0; 293 return 0;
@@ -357,8 +371,8 @@ static void enable_port(struct cmac *mac)
357 val |= (1 << index); 371 val |= (1 << index);
358 t1_tpi_write(adapter, REG_PORT_ENABLE, val); 372 t1_tpi_write(adapter, REG_PORT_ENABLE, val);
359 373
360 index <<= 2; 374 index <<= 2;
361 if (is_T2(adapter)) { 375 if (is_T2(adapter)) {
362 /* T204: set the Fifo water level & threshold */ 376 /* T204: set the Fifo water level & threshold */
363 t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740); 377 t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740);
364 t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730); 378 t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730);
@@ -389,6 +403,10 @@ static int mac_disable(struct cmac *mac, int which)
389 return 0; 403 return 0;
390} 404}
391 405
406#define RMON_UPDATE(mac, name, stat_name) \
407 t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \
408 (mac)->stats.stat_name += val;
409
392/* 410/*
393 * This function is called periodically to accumulate the current values of the 411 * This function is called periodically to accumulate the current values of the
394 * RMON counters into the port statistics. Since the counters are only 32 bits 412 * RMON counters into the port statistics. Since the counters are only 32 bits
@@ -460,10 +478,12 @@ static struct cmac *ixf1010_mac_create(adapter_t *adapter, int index)
460 struct cmac *mac; 478 struct cmac *mac;
461 u32 val; 479 u32 val;
462 480
463 if (index > 9) return NULL; 481 if (index > 9)
482 return NULL;
464 483
465 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); 484 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
466 if (!mac) return NULL; 485 if (!mac)
486 return NULL;
467 487
468 mac->ops = &ixf1010_ops; 488 mac->ops = &ixf1010_ops;
469 mac->instance = (cmac_instance *)(mac + 1); 489 mac->instance = (cmac_instance *)(mac + 1);
diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/chelsio/mv88e1xxx.c
index 28ac93ff7c4f..5867e3b0a887 100644
--- a/drivers/net/chelsio/mv88e1xxx.c
+++ b/drivers/net/chelsio/mv88e1xxx.c
@@ -73,9 +73,8 @@ static int mv88e1xxx_interrupt_enable(struct cphy *cphy)
73 73
74 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 74 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
75 elmer |= ELMER0_GP_BIT1; 75 elmer |= ELMER0_GP_BIT1;
76 if (is_T2(cphy->adapter)) { 76 if (is_T2(cphy->adapter))
77 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 77 elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4;
78 }
79 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 78 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
80 } 79 }
81 return 0; 80 return 0;
@@ -92,9 +91,8 @@ static int mv88e1xxx_interrupt_disable(struct cphy *cphy)
92 91
93 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 92 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
94 elmer &= ~ELMER0_GP_BIT1; 93 elmer &= ~ELMER0_GP_BIT1;
95 if (is_T2(cphy->adapter)) { 94 if (is_T2(cphy->adapter))
96 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); 95 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
97 }
98 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 96 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
99 } 97 }
100 return 0; 98 return 0;
@@ -112,9 +110,8 @@ static int mv88e1xxx_interrupt_clear(struct cphy *cphy)
112 if (t1_is_asic(cphy->adapter)) { 110 if (t1_is_asic(cphy->adapter)) {
113 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); 111 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
114 elmer |= ELMER0_GP_BIT1; 112 elmer |= ELMER0_GP_BIT1;
115 if (is_T2(cphy->adapter)) { 113 if (is_T2(cphy->adapter))
116 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 114 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
117 }
118 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); 115 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
119 } 116 }
120 return 0; 117 return 0;
@@ -300,7 +297,7 @@ static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
300 297
301 /* 298 /*
302 * Loop until cause reads zero. Need to handle bouncing interrupts. 299 * Loop until cause reads zero. Need to handle bouncing interrupts.
303 */ 300 */
304 while (1) { 301 while (1) {
305 u32 cause; 302 u32 cause;
306 303
@@ -308,15 +305,16 @@ static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
308 MV88E1XXX_INTERRUPT_STATUS_REGISTER, 305 MV88E1XXX_INTERRUPT_STATUS_REGISTER,
309 &cause); 306 &cause);
310 cause &= INTR_ENABLE_MASK; 307 cause &= INTR_ENABLE_MASK;
311 if (!cause) break; 308 if (!cause)
309 break;
312 310
313 if (cause & MV88E1XXX_INTR_LINK_CHNG) { 311 if (cause & MV88E1XXX_INTR_LINK_CHNG) {
314 (void) simple_mdio_read(cphy, 312 (void) simple_mdio_read(cphy,
315 MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); 313 MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
316 314
317 if (status & MV88E1XXX_INTR_LINK_CHNG) { 315 if (status & MV88E1XXX_INTR_LINK_CHNG)
318 cphy->state |= PHY_LINK_UP; 316 cphy->state |= PHY_LINK_UP;
319 } else { 317 else {
320 cphy->state &= ~PHY_LINK_UP; 318 cphy->state &= ~PHY_LINK_UP;
321 if (cphy->state & PHY_AUTONEG_EN) 319 if (cphy->state & PHY_AUTONEG_EN)
322 cphy->state &= ~PHY_AUTONEG_RDY; 320 cphy->state &= ~PHY_AUTONEG_RDY;
@@ -360,7 +358,8 @@ static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
360{ 358{
361 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); 359 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
362 360
363 if (!cphy) return NULL; 361 if (!cphy)
362 return NULL;
364 363
365 cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops); 364 cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops);
366 365
@@ -377,11 +376,11 @@ static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
377 } 376 }
378 (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */ 377 (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */
379 378
380 /* LED */ 379 /* LED */
381 if (is_T2(adapter)) { 380 if (is_T2(adapter)) {
382 (void) simple_mdio_write(cphy, 381 (void) simple_mdio_write(cphy,
383 MV88E1XXX_LED_CONTROL_REGISTER, 0x1); 382 MV88E1XXX_LED_CONTROL_REGISTER, 0x1);
384 } 383 }
385 384
386 return cphy; 385 return cphy;
387} 386}
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index 82fed1dd5005..87dde3e60046 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -10,25 +10,25 @@ static int my3126_reset(struct cphy *cphy, int wait)
10 * This can be done through registers. It is not required since 10 * This can be done through registers. It is not required since
11 * a full chip reset is used. 11 * a full chip reset is used.
12 */ 12 */
13 return (0); 13 return 0;
14} 14}
15 15
16static int my3126_interrupt_enable(struct cphy *cphy) 16static int my3126_interrupt_enable(struct cphy *cphy)
17{ 17{
18 schedule_delayed_work(&cphy->phy_update, HZ/30); 18 schedule_delayed_work(&cphy->phy_update, HZ/30);
19 t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo); 19 t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
20 return (0); 20 return 0;
21} 21}
22 22
23static int my3126_interrupt_disable(struct cphy *cphy) 23static int my3126_interrupt_disable(struct cphy *cphy)
24{ 24{
25 cancel_rearming_delayed_work(&cphy->phy_update); 25 cancel_rearming_delayed_work(&cphy->phy_update);
26 return (0); 26 return 0;
27} 27}
28 28
29static int my3126_interrupt_clear(struct cphy *cphy) 29static int my3126_interrupt_clear(struct cphy *cphy)
30{ 30{
31 return (0); 31 return 0;
32} 32}
33 33
34#define OFFSET(REG_ADDR) (REG_ADDR << 2) 34#define OFFSET(REG_ADDR) (REG_ADDR << 2)
@@ -102,7 +102,7 @@ static void my3216_poll(struct work_struct *work)
102 102
103static int my3126_set_loopback(struct cphy *cphy, int on) 103static int my3126_set_loopback(struct cphy *cphy, int on)
104{ 104{
105 return (0); 105 return 0;
106} 106}
107 107
108/* To check the activity LED */ 108/* To check the activity LED */
@@ -146,7 +146,7 @@ static int my3126_get_link_status(struct cphy *cphy,
146 if (fc) 146 if (fc)
147 *fc = PAUSE_RX | PAUSE_TX; 147 *fc = PAUSE_RX | PAUSE_TX;
148 148
149 return (0); 149 return 0;
150} 150}
151 151
152static void my3126_destroy(struct cphy *cphy) 152static void my3126_destroy(struct cphy *cphy)
@@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
177 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); 177 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
178 cphy->bmsr = 0; 178 cphy->bmsr = 0;
179 179
180 return (cphy); 180 return cphy;
181} 181}
182 182
183/* Chip Reset */ 183/* Chip Reset */
@@ -198,7 +198,7 @@ static int my3126_phy_reset(adapter_t * adapter)
198 val |= 0x8000; 198 val |= 0x8000;
199 t1_tpi_write(adapter, A_ELMER0_GPO, val); 199 t1_tpi_write(adapter, A_ELMER0_GPO, val);
200 udelay(100); 200 udelay(100);
201 return (0); 201 return 0;
202} 202}
203 203
204struct gphy t1_my3126_ops = { 204struct gphy t1_my3126_ops = {
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 63cabeb98afe..69129edeefd6 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -446,17 +446,51 @@ static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val,
446 *val += 1ull << 40; 446 *val += 1ull << 40;
447} 447}
448 448
449#define RMON_UPDATE(mac, name, stat_name) \
450 pm3393_rmon_update((mac)->adapter, OFFSET(name), \
451 &(mac)->stats.stat_name, \
452 (ro &((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)))
453
454
455static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, 449static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
456 int flag) 450 int flag)
457{ 451{
458 u64 ro; 452 static struct {
459 u32 val0, val1, val2, val3; 453 unsigned int reg;
454 unsigned int offset;
455 } hw_stats [] = {
456
457#define HW_STAT(name, stat_name) \
458 { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
459
460 /* Rx stats */
461 HW_STAT(RxOctetsReceivedOK, RxOctetsOK),
462 HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK),
463 HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK),
464 HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK),
465 HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames),
466 HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors),
467 HW_STAT(RxFramesLostDueToInternalMACErrors,
468 RxInternalMACRcvError),
469 HW_STAT(RxSymbolErrors, RxSymbolErrors),
470 HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors),
471 HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors),
472 HW_STAT(RxJabbers, RxJabberErrors),
473 HW_STAT(RxFragments, RxRuntErrors),
474 HW_STAT(RxUndersizedFrames, RxRuntErrors),
475 HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK),
476 HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK),
477
478 /* Tx stats */
479 HW_STAT(TxOctetsTransmittedOK, TxOctetsOK),
480 HW_STAT(TxFramesLostDueToInternalMACTransmissionError,
481 TxInternalMACXmitError),
482 HW_STAT(TxTransmitSystemError, TxFCSErrors),
483 HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK),
484 HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK),
485 HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK),
486 HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames),
487 HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK),
488 HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK)
489 }, *p = hw_stats;
490 u64 ro;
491 u32 val0, val1, val2, val3;
492 u64 *stats = (u64 *) &mac->stats;
493 unsigned int i;
460 494
461 /* Snap the counters */ 495 /* Snap the counters */
462 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, 496 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
@@ -470,35 +504,14 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
470 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | 504 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
471 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); 505 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
472 506
473 /* Rx stats */ 507 for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
474 RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); 508 unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW;
475 RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); 509
476 RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); 510 pm3393_rmon_update((mac)->adapter, OFFSET(p->reg),
477 RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); 511 stats + p->offset, ro & (reg >> 2));
478 RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); 512 }
479 RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); 513
480 RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, 514
481 RxInternalMACRcvError);
482 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
483 RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
484 RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
485 RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
486 RMON_UPDATE(mac, RxFragments, RxRuntErrors);
487 RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
488 RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
489 RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
490
491 /* Tx stats */
492 RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
493 RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
494 TxInternalMACXmitError);
495 RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
496 RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
497 RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
498 RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
499 RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
500 RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
501 RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
502 515
503 return &mac->stats; 516 return &mac->stats;
504} 517}
@@ -534,9 +547,9 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
534 /* Store local copy */ 547 /* Store local copy */
535 memcpy(cmac->instance->mac_addr, ma, 6); 548 memcpy(cmac->instance->mac_addr, ma, 6);
536 549
537 lo = ((u32) ma[1] << 8) | (u32) ma[0]; 550 lo = ((u32) ma[1] << 8) | (u32) ma[0];
538 mid = ((u32) ma[3] << 8) | (u32) ma[2]; 551 mid = ((u32) ma[3] << 8) | (u32) ma[2];
539 hi = ((u32) ma[5] << 8) | (u32) ma[4]; 552 hi = ((u32) ma[5] << 8) | (u32) ma[4];
540 553
541 /* Disable Rx/Tx MAC before configuring it. */ 554 /* Disable Rx/Tx MAC before configuring it. */
542 if (enabled) 555 if (enabled)
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 659cb2252e44..89a682702fa9 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -71,12 +71,9 @@
71#define SGE_FREEL_REFILL_THRESH 16 71#define SGE_FREEL_REFILL_THRESH 16
72#define SGE_RESPQ_E_N 1024 72#define SGE_RESPQ_E_N 1024
73#define SGE_INTRTIMER_NRES 1000 73#define SGE_INTRTIMER_NRES 1000
74#define SGE_RX_COPY_THRES 256
75#define SGE_RX_SM_BUF_SIZE 1536 74#define SGE_RX_SM_BUF_SIZE 1536
76#define SGE_TX_DESC_MAX_PLEN 16384 75#define SGE_TX_DESC_MAX_PLEN 16384
77 76
78# define SGE_RX_DROP_THRES 2
79
80#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) 77#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81 78
82/* 79/*
@@ -85,10 +82,6 @@
85 */ 82 */
86#define TX_RECLAIM_PERIOD (HZ / 4) 83#define TX_RECLAIM_PERIOD (HZ / 4)
87 84
88#ifndef NET_IP_ALIGN
89# define NET_IP_ALIGN 2
90#endif
91
92#define M_CMD_LEN 0x7fffffff 85#define M_CMD_LEN 0x7fffffff
93#define V_CMD_LEN(v) (v) 86#define V_CMD_LEN(v) (v)
94#define G_CMD_LEN(v) ((v) & M_CMD_LEN) 87#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
@@ -195,7 +188,7 @@ struct cmdQ {
195 struct cmdQ_e *entries; /* HW command descriptor Q */ 188 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */ 189 struct cmdQ_ce *centries; /* SW command context descriptor Q */
197 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ 190 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
198 spinlock_t lock; /* Lock to protect cmdQ enqueuing */ 191 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
199}; 192};
200 193
201struct freelQ { 194struct freelQ {
@@ -241,9 +234,9 @@ struct sched_port {
241/* Per T204 device */ 234/* Per T204 device */
242struct sched { 235struct sched {
243 ktime_t last_updated; /* last time quotas were computed */ 236 ktime_t last_updated; /* last time quotas were computed */
244 unsigned int max_avail; /* max bits to be sent to any port */ 237 unsigned int max_avail; /* max bits to be sent to any port */
245 unsigned int port; /* port index (round robin ports) */ 238 unsigned int port; /* port index (round robin ports) */
246 unsigned int num; /* num skbs in per port queues */ 239 unsigned int num; /* num skbs in per port queues */
247 struct sched_port p[MAX_NPORTS]; 240 struct sched_port p[MAX_NPORTS];
248 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ 241 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
249}; 242};
@@ -259,10 +252,10 @@ static void restart_sched(unsigned long);
259 * contention. 252 * contention.
260 */ 253 */
261struct sge { 254struct sge {
262 struct adapter *adapter; /* adapter backpointer */ 255 struct adapter *adapter; /* adapter backpointer */
263 struct net_device *netdev; /* netdevice backpointer */ 256 struct net_device *netdev; /* netdevice backpointer */
264 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ 257 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
265 struct respQ respQ; /* response Q */ 258 struct respQ respQ; /* response Q */
266 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ 259 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
267 unsigned int rx_pkt_pad; /* RX padding for L2 packets */ 260 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
268 unsigned int jumbo_fl; /* jumbo freelist Q index */ 261 unsigned int jumbo_fl; /* jumbo freelist Q index */
@@ -460,7 +453,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
460 if (credits < MAX_SKB_FRAGS + 1) 453 if (credits < MAX_SKB_FRAGS + 1)
461 goto out; 454 goto out;
462 455
463 again: 456again:
464 for (i = 0; i < MAX_NPORTS; i++) { 457 for (i = 0; i < MAX_NPORTS; i++) {
465 s->port = ++s->port & (MAX_NPORTS - 1); 458 s->port = ++s->port & (MAX_NPORTS - 1);
466 skbq = &s->p[s->port].skbq; 459 skbq = &s->p[s->port].skbq;
@@ -483,8 +476,8 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
483 if (update-- && sched_update_avail(sge)) 476 if (update-- && sched_update_avail(sge))
484 goto again; 477 goto again;
485 478
486 out: 479out:
487 /* If there are more pending skbs, we use the hardware to schedule us 480 /* If there are more pending skbs, we use the hardware to schedule us
488 * again. 481 * again.
489 */ 482 */
490 if (s->num && !skb) { 483 if (s->num && !skb) {
@@ -575,11 +568,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
575 q->size = p->freelQ_size[i]; 568 q->size = p->freelQ_size[i];
576 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; 569 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
577 size = sizeof(struct freelQ_e) * q->size; 570 size = sizeof(struct freelQ_e) * q->size;
578 q->entries = (struct freelQ_e *) 571 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
579 pci_alloc_consistent(pdev, size, &q->dma_addr);
580 if (!q->entries) 572 if (!q->entries)
581 goto err_no_mem; 573 goto err_no_mem;
582 memset(q->entries, 0, size); 574
583 size = sizeof(struct freelQ_ce) * q->size; 575 size = sizeof(struct freelQ_ce) * q->size;
584 q->centries = kzalloc(size, GFP_KERNEL); 576 q->centries = kzalloc(size, GFP_KERNEL);
585 if (!q->centries) 577 if (!q->centries)
@@ -613,11 +605,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
613 sge->respQ.size = SGE_RESPQ_E_N; 605 sge->respQ.size = SGE_RESPQ_E_N;
614 sge->respQ.credits = 0; 606 sge->respQ.credits = 0;
615 size = sizeof(struct respQ_e) * sge->respQ.size; 607 size = sizeof(struct respQ_e) * sge->respQ.size;
616 sge->respQ.entries = (struct respQ_e *) 608 sge->respQ.entries =
617 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); 609 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
618 if (!sge->respQ.entries) 610 if (!sge->respQ.entries)
619 goto err_no_mem; 611 goto err_no_mem;
620 memset(sge->respQ.entries, 0, size);
621 return 0; 612 return 0;
622 613
623err_no_mem: 614err_no_mem:
@@ -637,20 +628,12 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
637 q->in_use -= n; 628 q->in_use -= n;
638 ce = &q->centries[cidx]; 629 ce = &q->centries[cidx];
639 while (n--) { 630 while (n--) {
640 if (q->sop) { 631 if (likely(pci_unmap_len(ce, dma_len))) {
641 if (likely(pci_unmap_len(ce, dma_len))) { 632 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
642 pci_unmap_single(pdev, 633 pci_unmap_len(ce, dma_len),
643 pci_unmap_addr(ce, dma_addr), 634 PCI_DMA_TODEVICE);
644 pci_unmap_len(ce, dma_len), 635 if (q->sop)
645 PCI_DMA_TODEVICE);
646 q->sop = 0; 636 q->sop = 0;
647 }
648 } else {
649 if (likely(pci_unmap_len(ce, dma_len))) {
650 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
651 pci_unmap_len(ce, dma_len),
652 PCI_DMA_TODEVICE);
653 }
654 } 637 }
655 if (ce->skb) { 638 if (ce->skb) {
656 dev_kfree_skb_any(ce->skb); 639 dev_kfree_skb_any(ce->skb);
@@ -711,11 +694,10 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
711 q->stop_thres = 0; 694 q->stop_thres = 0;
712 spin_lock_init(&q->lock); 695 spin_lock_init(&q->lock);
713 size = sizeof(struct cmdQ_e) * q->size; 696 size = sizeof(struct cmdQ_e) * q->size;
714 q->entries = (struct cmdQ_e *) 697 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
715 pci_alloc_consistent(pdev, size, &q->dma_addr);
716 if (!q->entries) 698 if (!q->entries)
717 goto err_no_mem; 699 goto err_no_mem;
718 memset(q->entries, 0, size); 700
719 size = sizeof(struct cmdQ_ce) * q->size; 701 size = sizeof(struct cmdQ_ce) * q->size;
720 q->centries = kzalloc(size, GFP_KERNEL); 702 q->centries = kzalloc(size, GFP_KERNEL);
721 if (!q->centries) 703 if (!q->centries)
@@ -770,7 +752,7 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off)
770static void configure_sge(struct sge *sge, struct sge_params *p) 752static void configure_sge(struct sge *sge, struct sge_params *p)
771{ 753{
772 struct adapter *ap = sge->adapter; 754 struct adapter *ap = sge->adapter;
773 755
774 writel(0, ap->regs + A_SG_CONTROL); 756 writel(0, ap->regs + A_SG_CONTROL);
775 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, 757 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
776 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); 758 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
@@ -850,7 +832,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
850 struct freelQ_e *e = &q->entries[q->pidx]; 832 struct freelQ_e *e = &q->entries[q->pidx];
851 unsigned int dma_len = q->rx_buffer_size - q->dma_offset; 833 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
852 834
853
854 while (q->credits < q->size) { 835 while (q->credits < q->size) {
855 struct sk_buff *skb; 836 struct sk_buff *skb;
856 dma_addr_t mapping; 837 dma_addr_t mapping;
@@ -862,6 +843,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
862 skb_reserve(skb, q->dma_offset); 843 skb_reserve(skb, q->dma_offset);
863 mapping = pci_map_single(pdev, skb->data, dma_len, 844 mapping = pci_map_single(pdev, skb->data, dma_len,
864 PCI_DMA_FROMDEVICE); 845 PCI_DMA_FROMDEVICE);
846 skb_reserve(skb, sge->rx_pkt_pad);
847
865 ce->skb = skb; 848 ce->skb = skb;
866 pci_unmap_addr_set(ce, dma_addr, mapping); 849 pci_unmap_addr_set(ce, dma_addr, mapping);
867 pci_unmap_len_set(ce, dma_len, dma_len); 850 pci_unmap_len_set(ce, dma_len, dma_len);
@@ -881,7 +864,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
881 } 864 }
882 q->credits++; 865 q->credits++;
883 } 866 }
884
885} 867}
886 868
887/* 869/*
@@ -1041,6 +1023,10 @@ static void recycle_fl_buf(struct freelQ *fl, int idx)
1041 } 1023 }
1042} 1024}
1043 1025
1026static int copybreak __read_mostly = 256;
1027module_param(copybreak, int, 0);
1028MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1029
1044/** 1030/**
1045 * get_packet - return the next ingress packet buffer 1031 * get_packet - return the next ingress packet buffer
1046 * @pdev: the PCI device that received the packet 1032 * @pdev: the PCI device that received the packet
@@ -1060,45 +1046,42 @@ static void recycle_fl_buf(struct freelQ *fl, int idx)
1060 * be copied but there is no memory for the copy. 1046 * be copied but there is no memory for the copy.
1061 */ 1047 */
1062static inline struct sk_buff *get_packet(struct pci_dev *pdev, 1048static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1063 struct freelQ *fl, unsigned int len, 1049 struct freelQ *fl, unsigned int len)
1064 int dma_pad, int skb_pad,
1065 unsigned int copy_thres,
1066 unsigned int drop_thres)
1067{ 1050{
1068 struct sk_buff *skb; 1051 struct sk_buff *skb;
1069 struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1052 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1070 1053
1071 if (len < copy_thres) { 1054 if (len < copybreak) {
1072 skb = alloc_skb(len + skb_pad, GFP_ATOMIC); 1055 skb = alloc_skb(len + 2, GFP_ATOMIC);
1073 if (likely(skb != NULL)) { 1056 if (!skb)
1074 skb_reserve(skb, skb_pad);
1075 skb_put(skb, len);
1076 pci_dma_sync_single_for_cpu(pdev,
1077 pci_unmap_addr(ce, dma_addr),
1078 pci_unmap_len(ce, dma_len),
1079 PCI_DMA_FROMDEVICE);
1080 memcpy(skb->data, ce->skb->data + dma_pad, len);
1081 pci_dma_sync_single_for_device(pdev,
1082 pci_unmap_addr(ce, dma_addr),
1083 pci_unmap_len(ce, dma_len),
1084 PCI_DMA_FROMDEVICE);
1085 } else if (!drop_thres)
1086 goto use_orig_buf; 1057 goto use_orig_buf;
1087 1058
1059 skb_reserve(skb, 2); /* align IP header */
1060 skb_put(skb, len);
1061 pci_dma_sync_single_for_cpu(pdev,
1062 pci_unmap_addr(ce, dma_addr),
1063 pci_unmap_len(ce, dma_len),
1064 PCI_DMA_FROMDEVICE);
1065 memcpy(skb->data, ce->skb->data, len);
1066 pci_dma_sync_single_for_device(pdev,
1067 pci_unmap_addr(ce, dma_addr),
1068 pci_unmap_len(ce, dma_len),
1069 PCI_DMA_FROMDEVICE);
1088 recycle_fl_buf(fl, fl->cidx); 1070 recycle_fl_buf(fl, fl->cidx);
1089 return skb; 1071 return skb;
1090 } 1072 }
1091 1073
1092 if (fl->credits < drop_thres) { 1074use_orig_buf:
1075 if (fl->credits < 2) {
1093 recycle_fl_buf(fl, fl->cidx); 1076 recycle_fl_buf(fl, fl->cidx);
1094 return NULL; 1077 return NULL;
1095 } 1078 }
1096 1079
1097use_orig_buf:
1098 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 1080 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
1099 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1081 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1100 skb = ce->skb; 1082 skb = ce->skb;
1101 skb_reserve(skb, dma_pad); 1083 prefetch(skb->data);
1084
1102 skb_put(skb, len); 1085 skb_put(skb, len);
1103 return skb; 1086 return skb;
1104} 1087}
@@ -1137,6 +1120,7 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1137static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) 1120static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1138{ 1121{
1139 unsigned int count = 0; 1122 unsigned int count = 0;
1123
1140 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1124 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1141 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 1125 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1142 unsigned int i, len = skb->len - skb->data_len; 1126 unsigned int i, len = skb->len - skb->data_len;
@@ -1343,7 +1327,7 @@ static void restart_sched(unsigned long arg)
1343 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { 1327 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1344 unsigned int genbit, pidx, count; 1328 unsigned int genbit, pidx, count;
1345 count = 1 + skb_shinfo(skb)->nr_frags; 1329 count = 1 + skb_shinfo(skb)->nr_frags;
1346 count += compute_large_page_tx_descs(skb); 1330 count += compute_large_page_tx_descs(skb);
1347 q->in_use += count; 1331 q->in_use += count;
1348 genbit = q->genbit; 1332 genbit = q->genbit;
1349 pidx = q->pidx; 1333 pidx = q->pidx;
@@ -1375,27 +1359,25 @@ static void restart_sched(unsigned long arg)
1375 * 1359 *
1376 * Process an ingress ethernet pakcet and deliver it to the stack. 1360 * Process an ingress ethernet pakcet and deliver it to the stack.
1377 */ 1361 */
1378static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) 1362static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1379{ 1363{
1380 struct sk_buff *skb; 1364 struct sk_buff *skb;
1381 struct cpl_rx_pkt *p; 1365 const struct cpl_rx_pkt *p;
1382 struct adapter *adapter = sge->adapter; 1366 struct adapter *adapter = sge->adapter;
1383 struct sge_port_stats *st; 1367 struct sge_port_stats *st;
1384 1368
1385 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad, 1369 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
1386 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
1387 SGE_RX_DROP_THRES);
1388 if (unlikely(!skb)) { 1370 if (unlikely(!skb)) {
1389 sge->stats.rx_drops++; 1371 sge->stats.rx_drops++;
1390 return 0; 1372 return;
1391 } 1373 }
1392 1374
1393 p = (struct cpl_rx_pkt *)skb->data; 1375 p = (const struct cpl_rx_pkt *) skb->data;
1394 skb_pull(skb, sizeof(*p));
1395 if (p->iff >= adapter->params.nports) { 1376 if (p->iff >= adapter->params.nports) {
1396 kfree_skb(skb); 1377 kfree_skb(skb);
1397 return 0; 1378 return;
1398 } 1379 }
1380 __skb_pull(skb, sizeof(*p));
1399 1381
1400 skb->dev = adapter->port[p->iff].dev; 1382 skb->dev = adapter->port[p->iff].dev;
1401 skb->dev->last_rx = jiffies; 1383 skb->dev->last_rx = jiffies;
@@ -1427,7 +1409,6 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1427 netif_rx(skb); 1409 netif_rx(skb);
1428#endif 1410#endif
1429 } 1411 }
1430 return 0;
1431} 1412}
1432 1413
1433/* 1414/*
@@ -1448,29 +1429,28 @@ static inline int enough_free_Tx_descs(const struct cmdQ *q)
1448static void restart_tx_queues(struct sge *sge) 1429static void restart_tx_queues(struct sge *sge)
1449{ 1430{
1450 struct adapter *adap = sge->adapter; 1431 struct adapter *adap = sge->adapter;
1432 int i;
1451 1433
1452 if (enough_free_Tx_descs(&sge->cmdQ[0])) { 1434 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1453 int i; 1435 return;
1454 1436
1455 for_each_port(adap, i) { 1437 for_each_port(adap, i) {
1456 struct net_device *nd = adap->port[i].dev; 1438 struct net_device *nd = adap->port[i].dev;
1457 1439
1458 if (test_and_clear_bit(nd->if_port, 1440 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1459 &sge->stopped_tx_queues) && 1441 netif_running(nd)) {
1460 netif_running(nd)) { 1442 sge->stats.cmdQ_restarted[2]++;
1461 sge->stats.cmdQ_restarted[2]++; 1443 netif_wake_queue(nd);
1462 netif_wake_queue(nd);
1463 }
1464 } 1444 }
1465 } 1445 }
1466} 1446}
1467 1447
1468/* 1448/*
1469 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 1449 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1470 * information. 1450 * information.
1471 */ 1451 */
1472static unsigned int update_tx_info(struct adapter *adapter, 1452static unsigned int update_tx_info(struct adapter *adapter,
1473 unsigned int flags, 1453 unsigned int flags,
1474 unsigned int pr0) 1454 unsigned int pr0)
1475{ 1455{
1476 struct sge *sge = adapter->sge; 1456 struct sge *sge = adapter->sge;
@@ -1510,29 +1490,30 @@ static int process_responses(struct adapter *adapter, int budget)
1510 struct sge *sge = adapter->sge; 1490 struct sge *sge = adapter->sge;
1511 struct respQ *q = &sge->respQ; 1491 struct respQ *q = &sge->respQ;
1512 struct respQ_e *e = &q->entries[q->cidx]; 1492 struct respQ_e *e = &q->entries[q->cidx];
1513 int budget_left = budget; 1493 int done = 0;
1514 unsigned int flags = 0; 1494 unsigned int flags = 0;
1515 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1495 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1516
1517 1496
1518 while (likely(budget_left && e->GenerationBit == q->genbit)) { 1497 while (done < budget && e->GenerationBit == q->genbit) {
1519 flags |= e->Qsleeping; 1498 flags |= e->Qsleeping;
1520 1499
1521 cmdq_processed[0] += e->Cmdq0CreditReturn; 1500 cmdq_processed[0] += e->Cmdq0CreditReturn;
1522 cmdq_processed[1] += e->Cmdq1CreditReturn; 1501 cmdq_processed[1] += e->Cmdq1CreditReturn;
1523 1502
1524 /* We batch updates to the TX side to avoid cacheline 1503 /* We batch updates to the TX side to avoid cacheline
1525 * ping-pong of TX state information on MP where the sender 1504 * ping-pong of TX state information on MP where the sender
1526 * might run on a different CPU than this function... 1505 * might run on a different CPU than this function...
1527 */ 1506 */
1528 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) { 1507 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
1529 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1508 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1530 cmdq_processed[0] = 0; 1509 cmdq_processed[0] = 0;
1531 } 1510 }
1511
1532 if (unlikely(cmdq_processed[1] > 16)) { 1512 if (unlikely(cmdq_processed[1] > 16)) {
1533 sge->cmdQ[1].processed += cmdq_processed[1]; 1513 sge->cmdQ[1].processed += cmdq_processed[1];
1534 cmdq_processed[1] = 0; 1514 cmdq_processed[1] = 0;
1535 } 1515 }
1516
1536 if (likely(e->DataValid)) { 1517 if (likely(e->DataValid)) {
1537 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1518 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1538 1519
@@ -1542,12 +1523,16 @@ static int process_responses(struct adapter *adapter, int budget)
1542 else 1523 else
1543 sge_rx(sge, fl, e->BufferLength); 1524 sge_rx(sge, fl, e->BufferLength);
1544 1525
1526 ++done;
1527
1545 /* 1528 /*
1546 * Note: this depends on each packet consuming a 1529 * Note: this depends on each packet consuming a
1547 * single free-list buffer; cf. the BUG above. 1530 * single free-list buffer; cf. the BUG above.
1548 */ 1531 */
1549 if (++fl->cidx == fl->size) 1532 if (++fl->cidx == fl->size)
1550 fl->cidx = 0; 1533 fl->cidx = 0;
1534 prefetch(fl->centries[fl->cidx].skb);
1535
1551 if (unlikely(--fl->credits < 1536 if (unlikely(--fl->credits <
1552 fl->size - SGE_FREEL_REFILL_THRESH)) 1537 fl->size - SGE_FREEL_REFILL_THRESH))
1553 refill_free_list(sge, fl); 1538 refill_free_list(sge, fl);
@@ -1566,14 +1551,20 @@ static int process_responses(struct adapter *adapter, int budget)
1566 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1551 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1567 q->credits = 0; 1552 q->credits = 0;
1568 } 1553 }
1569 --budget_left;
1570 } 1554 }
1571 1555
1572 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1556 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1573 sge->cmdQ[1].processed += cmdq_processed[1]; 1557 sge->cmdQ[1].processed += cmdq_processed[1];
1574 1558
1575 budget -= budget_left; 1559 return done;
1576 return budget; 1560}
1561
1562static inline int responses_pending(const struct adapter *adapter)
1563{
1564 const struct respQ *Q = &adapter->sge->respQ;
1565 const struct respQ_e *e = &Q->entries[Q->cidx];
1566
1567 return (e->GenerationBit == Q->genbit);
1577} 1568}
1578 1569
1579#ifdef CONFIG_CHELSIO_T1_NAPI 1570#ifdef CONFIG_CHELSIO_T1_NAPI
@@ -1585,19 +1576,25 @@ static int process_responses(struct adapter *adapter, int budget)
1585 * which the caller must ensure is a valid pure response. Returns 1 if it 1576 * which the caller must ensure is a valid pure response. Returns 1 if it
1586 * encounters a valid data-carrying response, 0 otherwise. 1577 * encounters a valid data-carrying response, 0 otherwise.
1587 */ 1578 */
1588static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) 1579static int process_pure_responses(struct adapter *adapter)
1589{ 1580{
1590 struct sge *sge = adapter->sge; 1581 struct sge *sge = adapter->sge;
1591 struct respQ *q = &sge->respQ; 1582 struct respQ *q = &sge->respQ;
1583 struct respQ_e *e = &q->entries[q->cidx];
1584 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1592 unsigned int flags = 0; 1585 unsigned int flags = 0;
1593 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1586 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1594 1587
1588 prefetch(fl->centries[fl->cidx].skb);
1589 if (e->DataValid)
1590 return 1;
1591
1595 do { 1592 do {
1596 flags |= e->Qsleeping; 1593 flags |= e->Qsleeping;
1597 1594
1598 cmdq_processed[0] += e->Cmdq0CreditReturn; 1595 cmdq_processed[0] += e->Cmdq0CreditReturn;
1599 cmdq_processed[1] += e->Cmdq1CreditReturn; 1596 cmdq_processed[1] += e->Cmdq1CreditReturn;
1600 1597
1601 e++; 1598 e++;
1602 if (unlikely(++q->cidx == q->size)) { 1599 if (unlikely(++q->cidx == q->size)) {
1603 q->cidx = 0; 1600 q->cidx = 0;
@@ -1613,7 +1610,7 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1613 sge->stats.pure_rsps++; 1610 sge->stats.pure_rsps++;
1614 } while (e->GenerationBit == q->genbit && !e->DataValid); 1611 } while (e->GenerationBit == q->genbit && !e->DataValid);
1615 1612
1616 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1613 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1617 sge->cmdQ[1].processed += cmdq_processed[1]; 1614 sge->cmdQ[1].processed += cmdq_processed[1];
1618 1615
1619 return e->GenerationBit == q->genbit; 1616 return e->GenerationBit == q->genbit;
@@ -1627,23 +1624,20 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1627int t1_poll(struct net_device *dev, int *budget) 1624int t1_poll(struct net_device *dev, int *budget)
1628{ 1625{
1629 struct adapter *adapter = dev->priv; 1626 struct adapter *adapter = dev->priv;
1630 int effective_budget = min(*budget, dev->quota); 1627 int work_done;
1631 int work_done = process_responses(adapter, effective_budget);
1632 1628
1629 work_done = process_responses(adapter, min(*budget, dev->quota));
1633 *budget -= work_done; 1630 *budget -= work_done;
1634 dev->quota -= work_done; 1631 dev->quota -= work_done;
1635 1632
1636 if (work_done >= effective_budget) 1633 if (unlikely(responses_pending(adapter)))
1637 return 1; 1634 return 1;
1638 1635
1639 spin_lock_irq(&adapter->async_lock); 1636 netif_rx_complete(dev);
1640 __netif_rx_complete(dev);
1641 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1637 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1642 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1643 adapter->regs + A_PL_ENABLE);
1644 spin_unlock_irq(&adapter->async_lock);
1645 1638
1646 return 0; 1639 return 0;
1640
1647} 1641}
1648 1642
1649/* 1643/*
@@ -1652,44 +1646,33 @@ int t1_poll(struct net_device *dev, int *budget)
1652irqreturn_t t1_interrupt(int irq, void *data) 1646irqreturn_t t1_interrupt(int irq, void *data)
1653{ 1647{
1654 struct adapter *adapter = data; 1648 struct adapter *adapter = data;
1655 struct net_device *dev = adapter->sge->netdev;
1656 struct sge *sge = adapter->sge; 1649 struct sge *sge = adapter->sge;
1657 u32 cause; 1650 int handled;
1658 int handled = 0;
1659 1651
1660 cause = readl(adapter->regs + A_PL_CAUSE); 1652 if (likely(responses_pending(adapter))) {
1661 if (cause == 0 || cause == ~0) 1653 struct net_device *dev = sge->netdev;
1662 return IRQ_NONE;
1663 1654
1664 spin_lock(&adapter->async_lock); 1655 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1665 if (cause & F_PL_INTR_SGE_DATA) {
1666 struct respQ *q = &adapter->sge->respQ;
1667 struct respQ_e *e = &q->entries[q->cidx];
1668
1669 handled = 1;
1670 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1671
1672 if (e->GenerationBit == q->genbit &&
1673 __netif_rx_schedule_prep(dev)) {
1674 if (e->DataValid || process_pure_responses(adapter, e)) {
1675 /* mask off data IRQ */
1676 writel(adapter->slow_intr_mask,
1677 adapter->regs + A_PL_ENABLE);
1678 __netif_rx_schedule(sge->netdev);
1679 goto unlock;
1680 }
1681 /* no data, no NAPI needed */
1682 netif_poll_enable(dev);
1683 1656
1657 if (__netif_rx_schedule_prep(dev)) {
1658 if (process_pure_responses(adapter))
1659 __netif_rx_schedule(dev);
1660 else {
1661 /* no data, no NAPI needed */
1662 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1663 netif_poll_enable(dev); /* undo schedule_prep */
1664 }
1684 } 1665 }
1685 writel(q->cidx, adapter->regs + A_SG_SLEEPING); 1666 return IRQ_HANDLED;
1686 } else 1667 }
1687 handled = t1_slow_intr_handler(adapter); 1668
1669 spin_lock(&adapter->async_lock);
1670 handled = t1_slow_intr_handler(adapter);
1671 spin_unlock(&adapter->async_lock);
1688 1672
1689 if (!handled) 1673 if (!handled)
1690 sge->stats.unhandled_irqs++; 1674 sge->stats.unhandled_irqs++;
1691unlock: 1675
1692 spin_unlock(&adapter->async_lock);
1693 return IRQ_RETVAL(handled != 0); 1676 return IRQ_RETVAL(handled != 0);
1694} 1677}
1695 1678
@@ -1712,17 +1695,13 @@ unlock:
1712irqreturn_t t1_interrupt(int irq, void *cookie) 1695irqreturn_t t1_interrupt(int irq, void *cookie)
1713{ 1696{
1714 int work_done; 1697 int work_done;
1715 struct respQ_e *e;
1716 struct adapter *adapter = cookie; 1698 struct adapter *adapter = cookie;
1717 struct respQ *Q = &adapter->sge->respQ;
1718 1699
1719 spin_lock(&adapter->async_lock); 1700 spin_lock(&adapter->async_lock);
1720 e = &Q->entries[Q->cidx];
1721 prefetch(e);
1722 1701
1723 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1702 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1724 1703
1725 if (likely(e->GenerationBit == Q->genbit)) 1704 if (likely(responses_pending(adapter)))
1726 work_done = process_responses(adapter, -1); 1705 work_done = process_responses(adapter, -1);
1727 else 1706 else
1728 work_done = t1_slow_intr_handler(adapter); 1707 work_done = t1_slow_intr_handler(adapter);
@@ -1796,7 +1775,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1796 * through the scheduler. 1775 * through the scheduler.
1797 */ 1776 */
1798 if (sge->tx_sched && !qid && skb->dev) { 1777 if (sge->tx_sched && !qid && skb->dev) {
1799 use_sched: 1778use_sched:
1800 use_sched_skb = 1; 1779 use_sched_skb = 1;
1801 /* Note that the scheduler might return a different skb than 1780 /* Note that the scheduler might return a different skb than
1802 * the one passed in. 1781 * the one passed in.
@@ -1900,7 +1879,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1900 cpl = (struct cpl_tx_pkt *)hdr; 1879 cpl = (struct cpl_tx_pkt *)hdr;
1901 } else { 1880 } else {
1902 /* 1881 /*
1903 * Packets shorter than ETH_HLEN can break the MAC, drop them 1882 * Packets shorter than ETH_HLEN can break the MAC, drop them
1904 * early. Also, we may get oversized packets because some 1883 * early. Also, we may get oversized packets because some
1905 * parts of the kernel don't handle our unusual hard_header_len 1884 * parts of the kernel don't handle our unusual hard_header_len
1906 * right, drop those too. 1885 * right, drop those too.
@@ -1984,9 +1963,9 @@ send:
1984 * then silently discard to avoid leak. 1963 * then silently discard to avoid leak.
1985 */ 1964 */
1986 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { 1965 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1987 dev_kfree_skb_any(skb); 1966 dev_kfree_skb_any(skb);
1988 ret = NETDEV_TX_OK; 1967 ret = NETDEV_TX_OK;
1989 } 1968 }
1990 return ret; 1969 return ret;
1991} 1970}
1992 1971
@@ -2099,31 +2078,35 @@ static void espibug_workaround_t204(unsigned long data)
2099 2078
2100 if (adapter->open_device_map & PORT_MASK) { 2079 if (adapter->open_device_map & PORT_MASK) {
2101 int i; 2080 int i;
2102 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) { 2081
2082 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
2103 return; 2083 return;
2104 } 2084
2105 for (i = 0; i < nports; i++) { 2085 for (i = 0; i < nports; i++) {
2106 struct sk_buff *skb = sge->espibug_skb[i]; 2086 struct sk_buff *skb = sge->espibug_skb[i];
2107 if ( (netif_running(adapter->port[i].dev)) && 2087
2108 !(netif_queue_stopped(adapter->port[i].dev)) && 2088 if (!netif_running(adapter->port[i].dev) ||
2109 (seop[i] && ((seop[i] & 0xfff) == 0)) && 2089 netif_queue_stopped(adapter->port[i].dev) ||
2110 skb ) { 2090 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2111 if (!skb->cb[0]) { 2091 continue;
2112 u8 ch_mac_addr[ETH_ALEN] = 2092
2113 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; 2093 if (!skb->cb[0]) {
2114 memcpy(skb->data + sizeof(struct cpl_tx_pkt), 2094 u8 ch_mac_addr[ETH_ALEN] = {
2115 ch_mac_addr, ETH_ALEN); 2095 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
2116 memcpy(skb->data + skb->len - 10, 2096 };
2117 ch_mac_addr, ETH_ALEN); 2097
2118 skb->cb[0] = 0xff; 2098 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
2119 } 2099 ch_mac_addr, ETH_ALEN);
2120 2100 memcpy(skb->data + skb->len - 10,
2121 /* bump the reference count to avoid freeing of 2101 ch_mac_addr, ETH_ALEN);
2122 * the skb once the DMA has completed. 2102 skb->cb[0] = 0xff;
2123 */
2124 skb = skb_get(skb);
2125 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2126 } 2103 }
2104
2105 /* bump the reference count to avoid freeing of
2106 * the skb once the DMA has completed.
2107 */
2108 skb = skb_get(skb);
2109 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2127 } 2110 }
2128 } 2111 }
2129 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2112 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
@@ -2192,9 +2175,8 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
2192 if (adapter->params.nports > 1) { 2175 if (adapter->params.nports > 1) {
2193 tx_sched_init(sge); 2176 tx_sched_init(sge);
2194 sge->espibug_timer.function = espibug_workaround_t204; 2177 sge->espibug_timer.function = espibug_workaround_t204;
2195 } else { 2178 } else
2196 sge->espibug_timer.function = espibug_workaround; 2179 sge->espibug_timer.function = espibug_workaround;
2197 }
2198 sge->espibug_timer.data = (unsigned long)sge->adapter; 2180 sge->espibug_timer.data = (unsigned long)sge->adapter;
2199 2181
2200 sge->espibug_timeout = 1; 2182 sge->espibug_timeout = 1;
@@ -2202,7 +2184,7 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
2202 if (adapter->params.nports > 1) 2184 if (adapter->params.nports > 1)
2203 sge->espibug_timeout = HZ/100; 2185 sge->espibug_timeout = HZ/100;
2204 } 2186 }
2205 2187
2206 2188
2207 p->cmdQ_size[0] = SGE_CMDQ0_E_N; 2189 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2208 p->cmdQ_size[1] = SGE_CMDQ1_E_N; 2190 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 22ed9a383c08..c2522cdfab37 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -223,13 +223,13 @@ static int fpga_slow_intr(adapter_t *adapter)
223 t1_sge_intr_error_handler(adapter->sge); 223 t1_sge_intr_error_handler(adapter->sge);
224 224
225 if (cause & FPGA_PCIX_INTERRUPT_GMAC) 225 if (cause & FPGA_PCIX_INTERRUPT_GMAC)
226 fpga_phy_intr_handler(adapter); 226 fpga_phy_intr_handler(adapter);
227 227
228 if (cause & FPGA_PCIX_INTERRUPT_TP) { 228 if (cause & FPGA_PCIX_INTERRUPT_TP) {
229 /* 229 /*
230 * FPGA doesn't support MC4 interrupts and it requires 230 * FPGA doesn't support MC4 interrupts and it requires
231 * this odd layer of indirection for MC5. 231 * this odd layer of indirection for MC5.
232 */ 232 */
233 u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); 233 u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
234 234
235 /* Clear TP interrupt */ 235 /* Clear TP interrupt */
@@ -262,8 +262,7 @@ static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
262 udelay(10); 262 udelay(10);
263 } while (busy && --attempts); 263 } while (busy && --attempts);
264 if (busy) 264 if (busy)
265 CH_ALERT("%s: MDIO operation timed out\n", 265 CH_ALERT("%s: MDIO operation timed out\n", adapter->name);
266 adapter->name);
267 return busy; 266 return busy;
268} 267}
269 268
@@ -605,22 +604,23 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
605 604
606 switch (board_info(adapter)->board) { 605 switch (board_info(adapter)->board) {
607#ifdef CONFIG_CHELSIO_T1_1G 606#ifdef CONFIG_CHELSIO_T1_1G
608 case CHBT_BOARD_CHT204: 607 case CHBT_BOARD_CHT204:
609 case CHBT_BOARD_CHT204E: 608 case CHBT_BOARD_CHT204E:
610 case CHBT_BOARD_CHN204: 609 case CHBT_BOARD_CHN204:
611 case CHBT_BOARD_CHT204V: { 610 case CHBT_BOARD_CHT204V: {
612 int i, port_bit; 611 int i, port_bit;
613 for_each_port(adapter, i) { 612 for_each_port(adapter, i) {
614 port_bit = i + 1; 613 port_bit = i + 1;
615 if (!(cause & (1 << port_bit))) continue; 614 if (!(cause & (1 << port_bit)))
615 continue;
616 616
617 phy = adapter->port[i].phy; 617 phy = adapter->port[i].phy;
618 phy_cause = phy->ops->interrupt_handler(phy); 618 phy_cause = phy->ops->interrupt_handler(phy);
619 if (phy_cause & cphy_cause_link_change) 619 if (phy_cause & cphy_cause_link_change)
620 t1_link_changed(adapter, i); 620 t1_link_changed(adapter, i);
621 } 621 }
622 break; 622 break;
623 } 623 }
624 case CHBT_BOARD_CHT101: 624 case CHBT_BOARD_CHT101:
625 if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ 625 if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */
626 phy = adapter->port[0].phy; 626 phy = adapter->port[0].phy;
@@ -631,13 +631,13 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
631 break; 631 break;
632 case CHBT_BOARD_7500: { 632 case CHBT_BOARD_7500: {
633 int p; 633 int p;
634 /* 634 /*
635 * Elmer0's interrupt cause isn't useful here because there is 635 * Elmer0's interrupt cause isn't useful here because there is
636 * only one bit that can be set for all 4 ports. This means 636 * only one bit that can be set for all 4 ports. This means
637 * we are forced to check every PHY's interrupt status 637 * we are forced to check every PHY's interrupt status
638 * register to see who initiated the interrupt. 638 * register to see who initiated the interrupt.
639 */ 639 */
640 for_each_port(adapter, p) { 640 for_each_port(adapter, p) {
641 phy = adapter->port[p].phy; 641 phy = adapter->port[p].phy;
642 phy_cause = phy->ops->interrupt_handler(phy); 642 phy_cause = phy->ops->interrupt_handler(phy);
643 if (phy_cause & cphy_cause_link_change) 643 if (phy_cause & cphy_cause_link_change)
@@ -658,7 +658,7 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
658 break; 658 break;
659 case CHBT_BOARD_8000: 659 case CHBT_BOARD_8000:
660 case CHBT_BOARD_CHT110: 660 case CHBT_BOARD_CHT110:
661 CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n", 661 CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
662 cause); 662 cause);
663 if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ 663 if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
664 struct cmac *mac = adapter->port[0].mac; 664 struct cmac *mac = adapter->port[0].mac;
@@ -670,9 +670,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
670 670
671 t1_tpi_read(adapter, 671 t1_tpi_read(adapter,
672 A_ELMER0_GPI_STAT, &mod_detect); 672 A_ELMER0_GPI_STAT, &mod_detect);
673 CH_MSG(adapter, INFO, LINK, "XPAK %s\n", 673 CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
674 mod_detect ? "removed" : "inserted"); 674 mod_detect ? "removed" : "inserted");
675 } 675 }
676 break; 676 break;
677#ifdef CONFIG_CHELSIO_T1_COUGAR 677#ifdef CONFIG_CHELSIO_T1_COUGAR
678 case CHBT_BOARD_COUGAR: 678 case CHBT_BOARD_COUGAR:
@@ -688,7 +688,8 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
688 688
689 for_each_port(adapter, i) { 689 for_each_port(adapter, i) {
690 port_bit = i ? i + 1 : 0; 690 port_bit = i ? i + 1 : 0;
691 if (!(cause & (1 << port_bit))) continue; 691 if (!(cause & (1 << port_bit)))
692 continue;
692 693
693 phy = adapter->port[i].phy; 694 phy = adapter->port[i].phy;
694 phy_cause = phy->ops->interrupt_handler(phy); 695 phy_cause = phy->ops->interrupt_handler(phy);
@@ -755,7 +756,7 @@ void t1_interrupts_disable(adapter_t* adapter)
755 756
756 /* Disable PCIX & external chip interrupts. */ 757 /* Disable PCIX & external chip interrupts. */
757 if (t1_is_asic(adapter)) 758 if (t1_is_asic(adapter))
758 writel(0, adapter->regs + A_PL_ENABLE); 759 writel(0, adapter->regs + A_PL_ENABLE);
759 760
760 /* PCI-X interrupts */ 761 /* PCI-X interrupts */
761 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); 762 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
@@ -830,11 +831,11 @@ int t1_slow_intr_handler(adapter_t *adapter)
830/* Power sequencing is a work-around for Intel's XPAKs. */ 831/* Power sequencing is a work-around for Intel's XPAKs. */
831static void power_sequence_xpak(adapter_t* adapter) 832static void power_sequence_xpak(adapter_t* adapter)
832{ 833{
833 u32 mod_detect; 834 u32 mod_detect;
834 u32 gpo; 835 u32 gpo;
835 836
836 /* Check for XPAK */ 837 /* Check for XPAK */
837 t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); 838 t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
838 if (!(ELMER0_GP_BIT5 & mod_detect)) { 839 if (!(ELMER0_GP_BIT5 & mod_detect)) {
839 /* XPAK is present */ 840 /* XPAK is present */
840 t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); 841 t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
@@ -877,31 +878,31 @@ static int board_init(adapter_t *adapter, const struct board_info *bi)
877 case CHBT_BOARD_N210: 878 case CHBT_BOARD_N210:
878 case CHBT_BOARD_CHT210: 879 case CHBT_BOARD_CHT210:
879 case CHBT_BOARD_COUGAR: 880 case CHBT_BOARD_COUGAR:
880 t1_tpi_par(adapter, 0xf); 881 t1_tpi_par(adapter, 0xf);
881 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); 882 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
882 break; 883 break;
883 case CHBT_BOARD_CHT110: 884 case CHBT_BOARD_CHT110:
884 t1_tpi_par(adapter, 0xf); 885 t1_tpi_par(adapter, 0xf);
885 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); 886 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
886 887
887 /* TBD XXX Might not need. This fixes a problem 888 /* TBD XXX Might not need. This fixes a problem
888 * described in the Intel SR XPAK errata. 889 * described in the Intel SR XPAK errata.
889 */ 890 */
890 power_sequence_xpak(adapter); 891 power_sequence_xpak(adapter);
891 break; 892 break;
892#ifdef CONFIG_CHELSIO_T1_1G 893#ifdef CONFIG_CHELSIO_T1_1G
893 case CHBT_BOARD_CHT204E: 894 case CHBT_BOARD_CHT204E:
894 /* add config space write here */ 895 /* add config space write here */
895 case CHBT_BOARD_CHT204: 896 case CHBT_BOARD_CHT204:
896 case CHBT_BOARD_CHT204V: 897 case CHBT_BOARD_CHT204V:
897 case CHBT_BOARD_CHN204: 898 case CHBT_BOARD_CHN204:
898 t1_tpi_par(adapter, 0xf); 899 t1_tpi_par(adapter, 0xf);
899 t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); 900 t1_tpi_write(adapter, A_ELMER0_GPO, 0x804);
900 break; 901 break;
901 case CHBT_BOARD_CHT101: 902 case CHBT_BOARD_CHT101:
902 case CHBT_BOARD_7500: 903 case CHBT_BOARD_7500:
903 t1_tpi_par(adapter, 0xf); 904 t1_tpi_par(adapter, 0xf);
904 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); 905 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804);
905 break; 906 break;
906#endif 907#endif
907 } 908 }
@@ -941,7 +942,7 @@ int t1_init_hw_modules(adapter_t *adapter)
941 goto out_err; 942 goto out_err;
942 943
943 err = 0; 944 err = 0;
944 out_err: 945out_err:
945 return err; 946 return err;
946} 947}
947 948
@@ -983,7 +984,7 @@ void t1_free_sw_modules(adapter_t *adapter)
983 if (adapter->espi) 984 if (adapter->espi)
984 t1_espi_destroy(adapter->espi); 985 t1_espi_destroy(adapter->espi);
985#ifdef CONFIG_CHELSIO_T1_COUGAR 986#ifdef CONFIG_CHELSIO_T1_COUGAR
986 if (adapter->cspi) 987 if (adapter->cspi)
987 t1_cspi_destroy(adapter->cspi); 988 t1_cspi_destroy(adapter->cspi);
988#endif 989#endif
989} 990}
@@ -1010,7 +1011,7 @@ static void __devinit init_link_config(struct link_config *lc,
1010 CH_ERR("%s: CSPI initialization failed\n", 1011 CH_ERR("%s: CSPI initialization failed\n",
1011 adapter->name); 1012 adapter->name);
1012 goto error; 1013 goto error;
1013 } 1014 }
1014#endif 1015#endif
1015 1016
1016/* 1017/*
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c
index 0ca0b6e19e43..6222d585e447 100644
--- a/drivers/net/chelsio/tp.c
+++ b/drivers/net/chelsio/tp.c
@@ -17,39 +17,36 @@ struct petp {
17static void tp_init(adapter_t * ap, const struct tp_params *p, 17static void tp_init(adapter_t * ap, const struct tp_params *p,
18 unsigned int tp_clk) 18 unsigned int tp_clk)
19{ 19{
20 if (t1_is_asic(ap)) { 20 u32 val;
21 u32 val;
22
23 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
24 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
25 if (!p->pm_size)
26 val |= F_OFFLOAD_DISABLE;
27 else
28 val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
29 F_TP_IN_ESPI_CHECK_TCP_CSUM;
30 writel(val, ap->regs + A_TP_IN_CONFIG);
31 writel(F_TP_OUT_CSPI_CPL |
32 F_TP_OUT_ESPI_ETHERNET |
33 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
34 F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
35 ap->regs + A_TP_OUT_CONFIG);
36 writel(V_IP_TTL(64) |
37 F_PATH_MTU /* IP DF bit */ |
38 V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
39 V_SYN_COOKIE_PARAMETER(29),
40 ap->regs + A_TP_GLOBAL_CONFIG);
41 /*
42 * Enable pause frame deadlock prevention.
43 */
44 if (is_T2(ap) && ap->params.nports > 1) {
45 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
46
47 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
48 V_DROP_TICKS_CNT(drop_ticks) |
49 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
50 ap->regs + A_TP_TX_DROP_CONFIG);
51 }
52 21
22 if (!t1_is_asic(ap))
23 return;
24
25 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
26 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
27 if (!p->pm_size)
28 val |= F_OFFLOAD_DISABLE;
29 else
30 val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM;
31 writel(val, ap->regs + A_TP_IN_CONFIG);
32 writel(F_TP_OUT_CSPI_CPL |
33 F_TP_OUT_ESPI_ETHERNET |
34 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
35 F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG);
36 writel(V_IP_TTL(64) |
37 F_PATH_MTU /* IP DF bit */ |
38 V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
39 V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG);
40 /*
41 * Enable pause frame deadlock prevention.
42 */
43 if (is_T2(ap) && ap->params.nports > 1) {
44 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
45
46 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
47 V_DROP_TICKS_CNT(drop_ticks) |
48 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
49 ap->regs + A_TP_TX_DROP_CONFIG);
53 } 50 }
54} 51}
55 52
@@ -61,6 +58,7 @@ void t1_tp_destroy(struct petp *tp)
61struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p) 58struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
62{ 59{
63 struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); 60 struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
61
64 if (!tp) 62 if (!tp)
65 return NULL; 63 return NULL;
66 64
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
index 85dc3b1dc309..534ffa0f616e 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/chelsio/vsc7326.c
@@ -226,22 +226,21 @@ static void run_table(adapter_t *adapter, struct init_table *ib, int len)
226 if (ib[i].addr == INITBLOCK_SLEEP) { 226 if (ib[i].addr == INITBLOCK_SLEEP) {
227 udelay( ib[i].data ); 227 udelay( ib[i].data );
228 CH_ERR("sleep %d us\n",ib[i].data); 228 CH_ERR("sleep %d us\n",ib[i].data);
229 } else { 229 } else
230 vsc_write( adapter, ib[i].addr, ib[i].data ); 230 vsc_write( adapter, ib[i].addr, ib[i].data );
231 }
232 } 231 }
233} 232}
234 233
235static int bist_rd(adapter_t *adapter, int moduleid, int address) 234static int bist_rd(adapter_t *adapter, int moduleid, int address)
236{ 235{
237 int data=0; 236 int data = 0;
238 u32 result=0; 237 u32 result = 0;
239 238
240 if( (address != 0x0) && 239 if ((address != 0x0) &&
241 (address != 0x1) && 240 (address != 0x1) &&
242 (address != 0x2) && 241 (address != 0x2) &&
243 (address != 0xd) && 242 (address != 0xd) &&
244 (address != 0xe)) 243 (address != 0xe))
245 CH_ERR("No bist address: 0x%x\n", address); 244 CH_ERR("No bist address: 0x%x\n", address);
246 245
247 data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) | 246 data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
@@ -251,27 +250,27 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
251 udelay(10); 250 udelay(10);
252 251
253 vsc_read(adapter, REG_RAM_BIST_RESULT, &result); 252 vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
254 if((result & (1<<9)) != 0x0) 253 if ((result & (1 << 9)) != 0x0)
255 CH_ERR("Still in bist read: 0x%x\n", result); 254 CH_ERR("Still in bist read: 0x%x\n", result);
256 else if((result & (1<<8)) != 0x0) 255 else if ((result & (1 << 8)) != 0x0)
257 CH_ERR("bist read error: 0x%x\n", result); 256 CH_ERR("bist read error: 0x%x\n", result);
258 257
259 return(result & 0xff); 258 return (result & 0xff);
260} 259}
261 260
262static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) 261static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
263{ 262{
264 int data=0; 263 int data = 0;
265 u32 result=0; 264 u32 result = 0;
266 265
267 if( (address != 0x0) && 266 if ((address != 0x0) &&
268 (address != 0x1) && 267 (address != 0x1) &&
269 (address != 0x2) && 268 (address != 0x2) &&
270 (address != 0xd) && 269 (address != 0xd) &&
271 (address != 0xe)) 270 (address != 0xe))
272 CH_ERR("No bist address: 0x%x\n", address); 271 CH_ERR("No bist address: 0x%x\n", address);
273 272
274 if( value>255 ) 273 if (value > 255)
275 CH_ERR("Suspicious write out of range value: 0x%x\n", value); 274 CH_ERR("Suspicious write out of range value: 0x%x\n", value);
276 275
277 data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) | 276 data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
@@ -281,12 +280,12 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
281 udelay(5); 280 udelay(5);
282 281
283 vsc_read(adapter, REG_RAM_BIST_CMD, &result); 282 vsc_read(adapter, REG_RAM_BIST_CMD, &result);
284 if((result & (1<<27)) != 0x0) 283 if ((result & (1 << 27)) != 0x0)
285 CH_ERR("Still in bist write: 0x%x\n", result); 284 CH_ERR("Still in bist write: 0x%x\n", result);
286 else if((result & (1<<26)) != 0x0) 285 else if ((result & (1 << 26)) != 0x0)
287 CH_ERR("bist write error: 0x%x\n", result); 286 CH_ERR("bist write error: 0x%x\n", result);
288 287
289 return(0); 288 return 0;
290} 289}
291 290
292static int run_bist(adapter_t *adapter, int moduleid) 291static int run_bist(adapter_t *adapter, int moduleid)
@@ -295,7 +294,7 @@ static int run_bist(adapter_t *adapter, int moduleid)
295 (void) bist_wr(adapter,moduleid, 0x00, 0x02); 294 (void) bist_wr(adapter,moduleid, 0x00, 0x02);
296 (void) bist_wr(adapter,moduleid, 0x01, 0x01); 295 (void) bist_wr(adapter,moduleid, 0x01, 0x01);
297 296
298 return(0); 297 return 0;
299} 298}
300 299
301static int check_bist(adapter_t *adapter, int moduleid) 300static int check_bist(adapter_t *adapter, int moduleid)
@@ -309,27 +308,26 @@ static int check_bist(adapter_t *adapter, int moduleid)
309 if ((result & 3) != 0x3) 308 if ((result & 3) != 0x3)
310 CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n", 309 CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
311 result, moduleid, column); 310 result, moduleid, column);
312 return(0); 311 return 0;
313} 312}
314 313
315static int enable_mem(adapter_t *adapter, int moduleid) 314static int enable_mem(adapter_t *adapter, int moduleid)
316{ 315{
317 /*enable mem*/ 316 /*enable mem*/
318 (void) bist_wr(adapter,moduleid, 0x00, 0x00); 317 (void) bist_wr(adapter,moduleid, 0x00, 0x00);
319 return(0); 318 return 0;
320} 319}
321 320
322static int run_bist_all(adapter_t *adapter) 321static int run_bist_all(adapter_t *adapter)
323{ 322{
324 int port=0; 323 int port = 0;
325 u32 val=0; 324 u32 val = 0;
326 325
327 vsc_write(adapter, REG_MEM_BIST, 0x5); 326 vsc_write(adapter, REG_MEM_BIST, 0x5);
328 vsc_read(adapter, REG_MEM_BIST, &val); 327 vsc_read(adapter, REG_MEM_BIST, &val);
329 328
330 for(port=0; port<12; port++){ 329 for (port = 0; port < 12; port++)
331 vsc_write(adapter, REG_DEV_SETUP(port), 0x0); 330 vsc_write(adapter, REG_DEV_SETUP(port), 0x0);
332 }
333 331
334 udelay(300); 332 udelay(300);
335 vsc_write(adapter, REG_SPI4_MISC, 0x00040409); 333 vsc_write(adapter, REG_SPI4_MISC, 0x00040409);
@@ -352,13 +350,13 @@ static int run_bist_all(adapter_t *adapter)
352 udelay(300); 350 udelay(300);
353 vsc_write(adapter, REG_SPI4_MISC, 0x60040400); 351 vsc_write(adapter, REG_SPI4_MISC, 0x60040400);
354 udelay(300); 352 udelay(300);
355 for(port=0; port<12; port++){ 353 for (port = 0; port < 12; port++)
356 vsc_write(adapter, REG_DEV_SETUP(port), 0x1); 354 vsc_write(adapter, REG_DEV_SETUP(port), 0x1);
357 } 355
358 udelay(300); 356 udelay(300);
359 vsc_write(adapter, REG_MEM_BIST, 0x0); 357 vsc_write(adapter, REG_MEM_BIST, 0x0);
360 mdelay(10); 358 mdelay(10);
361 return(0); 359 return 0;
362} 360}
363 361
364static int mac_intr_handler(struct cmac *mac) 362static int mac_intr_handler(struct cmac *mac)
@@ -591,40 +589,46 @@ static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat)
591 589
592static void port_stats_update(struct cmac *mac) 590static void port_stats_update(struct cmac *mac)
593{ 591{
594 int port = mac->instance->index; 592 struct {
593 unsigned int reg;
594 unsigned int offset;
595 } hw_stats[] = {
596
597#define HW_STAT(reg, stat_name) \
598 { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
599
600 /* Rx stats */
601 HW_STAT(RxUnicast, RxUnicastFramesOK),
602 HW_STAT(RxMulticast, RxMulticastFramesOK),
603 HW_STAT(RxBroadcast, RxBroadcastFramesOK),
604 HW_STAT(Crc, RxFCSErrors),
605 HW_STAT(RxAlignment, RxAlignErrors),
606 HW_STAT(RxOversize, RxFrameTooLongErrors),
607 HW_STAT(RxPause, RxPauseFrames),
608 HW_STAT(RxJabbers, RxJabberErrors),
609 HW_STAT(RxFragments, RxRuntErrors),
610 HW_STAT(RxUndersize, RxRuntErrors),
611 HW_STAT(RxSymbolCarrier, RxSymbolErrors),
612 HW_STAT(RxSize1519ToMax, RxJumboFramesOK),
613
614 /* Tx stats (skip collision stats as we are full-duplex only) */
615 HW_STAT(TxUnicast, TxUnicastFramesOK),
616 HW_STAT(TxMulticast, TxMulticastFramesOK),
617 HW_STAT(TxBroadcast, TxBroadcastFramesOK),
618 HW_STAT(TxPause, TxPauseFrames),
619 HW_STAT(TxUnderrun, TxUnderrun),
620 HW_STAT(TxSize1519ToMax, TxJumboFramesOK),
621 }, *p = hw_stats;
622 unsigned int port = mac->instance->index;
623 u64 *stats = (u64 *)&mac->stats;
624 unsigned int i;
625
626 for (i = 0; i < ARRAY_SIZE(hw_stats); i++)
627 rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset);
595 628
596 /* Rx stats */ 629 rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
597 rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK); 630 rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK);
598 rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad); 631 rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad);
599 rmon_update(mac, REG_RX_UNICAST(port), &mac->stats.RxUnicastFramesOK);
600 rmon_update(mac, REG_RX_MULTICAST(port),
601 &mac->stats.RxMulticastFramesOK);
602 rmon_update(mac, REG_RX_BROADCAST(port),
603 &mac->stats.RxBroadcastFramesOK);
604 rmon_update(mac, REG_CRC(port), &mac->stats.RxFCSErrors);
605 rmon_update(mac, REG_RX_ALIGNMENT(port), &mac->stats.RxAlignErrors);
606 rmon_update(mac, REG_RX_OVERSIZE(port),
607 &mac->stats.RxFrameTooLongErrors);
608 rmon_update(mac, REG_RX_PAUSE(port), &mac->stats.RxPauseFrames);
609 rmon_update(mac, REG_RX_JABBERS(port), &mac->stats.RxJabberErrors);
610 rmon_update(mac, REG_RX_FRAGMENTS(port), &mac->stats.RxRuntErrors);
611 rmon_update(mac, REG_RX_UNDERSIZE(port), &mac->stats.RxRuntErrors);
612 rmon_update(mac, REG_RX_SYMBOL_CARRIER(port),
613 &mac->stats.RxSymbolErrors);
614 rmon_update(mac, REG_RX_SIZE_1519_TO_MAX(port),
615 &mac->stats.RxJumboFramesOK);
616
617 /* Tx stats (skip collision stats as we are full-duplex only) */
618 rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
619 rmon_update(mac, REG_TX_UNICAST(port), &mac->stats.TxUnicastFramesOK);
620 rmon_update(mac, REG_TX_MULTICAST(port),
621 &mac->stats.TxMulticastFramesOK);
622 rmon_update(mac, REG_TX_BROADCAST(port),
623 &mac->stats.TxBroadcastFramesOK);
624 rmon_update(mac, REG_TX_PAUSE(port), &mac->stats.TxPauseFrames);
625 rmon_update(mac, REG_TX_UNDERRUN(port), &mac->stats.TxUnderrun);
626 rmon_update(mac, REG_TX_SIZE_1519_TO_MAX(port),
627 &mac->stats.TxJumboFramesOK);
628} 632}
629 633
630/* 634/*
@@ -686,7 +690,8 @@ static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index)
686 int i; 690 int i;
687 691
688 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); 692 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
689 if (!mac) return NULL; 693 if (!mac)
694 return NULL;
690 695
691 mac->ops = &vsc7326_ops; 696 mac->ops = &vsc7326_ops;
692 mac->instance = (cmac_instance *)(mac + 1); 697 mac->instance = (cmac_instance *)(mac + 1);
diff --git a/drivers/net/chelsio/vsc7326_reg.h b/drivers/net/chelsio/vsc7326_reg.h
index 491bcf75c4fb..479edbcabe68 100644
--- a/drivers/net/chelsio/vsc7326_reg.h
+++ b/drivers/net/chelsio/vsc7326_reg.h
@@ -192,73 +192,84 @@
192#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */ 192#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */
193 193
194/* Statistics */ 194/* Statistics */
195/* CRA(0x4,pn,reg) */
196/* reg below */
195/* pn = port number, 0-a, a = 10GbE */ 197/* pn = port number, 0-a, a = 10GbE */
196#define REG_RX_IN_BYTES(pn) CRA(0x4,pn,0x00) /* # Rx in octets */
197#define REG_RX_SYMBOL_CARRIER(pn) CRA(0x4,pn,0x01) /* Frames w/ symbol errors */
198#define REG_RX_PAUSE(pn) CRA(0x4,pn,0x02) /* # pause frames received */
199#define REG_RX_UNSUP_OPCODE(pn) CRA(0x4,pn,0x03) /* # control frames with unsupported opcode */
200#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,0x04) /* # octets in good frames */
201#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,0x05) /* # octets in bad frames */
202#define REG_RX_UNICAST(pn) CRA(0x4,pn,0x06) /* # good unicast frames */
203#define REG_RX_MULTICAST(pn) CRA(0x4,pn,0x07) /* # good multicast frames */
204#define REG_RX_BROADCAST(pn) CRA(0x4,pn,0x08) /* # good broadcast frames */
205#define REG_CRC(pn) CRA(0x4,pn,0x09) /* # frames w/ bad CRC only */
206#define REG_RX_ALIGNMENT(pn) CRA(0x4,pn,0x0a) /* # frames w/ alignment err */
207#define REG_RX_UNDERSIZE(pn) CRA(0x4,pn,0x0b) /* # frames undersize */
208#define REG_RX_FRAGMENTS(pn) CRA(0x4,pn,0x0c) /* # frames undersize w/ crc err */
209#define REG_RX_IN_RANGE_LENGTH_ERROR(pn) CRA(0x4,pn,0x0d) /* # frames with length error */
210#define REG_RX_OUT_OF_RANGE_ERROR(pn) CRA(0x4,pn,0x0e) /* # frames with illegal length field */
211#define REG_RX_OVERSIZE(pn) CRA(0x4,pn,0x0f) /* # frames oversize */
212#define REG_RX_JABBERS(pn) CRA(0x4,pn,0x10) /* # frames oversize w/ crc err */
213#define REG_RX_SIZE_64(pn) CRA(0x4,pn,0x11) /* # frames 64 octets long */
214#define REG_RX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x12) /* # frames 65-127 octets */
215#define REG_RX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x13) /* # frames 128-255 */
216#define REG_RX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x14) /* # frames 256-511 */
217#define REG_RX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x15) /* # frames 512-1023 */
218#define REG_RX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x16) /* # frames 1024-1518 */
219#define REG_RX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x17) /* # frames 1519-max */
220 198
221#define REG_TX_OUT_BYTES(pn) CRA(0x4,pn,0x18) /* # octets tx */ 199enum {
222#define REG_TX_PAUSE(pn) CRA(0x4,pn,0x19) /* # pause frames sent */ 200 RxInBytes = 0x00, // # Rx in octets
223#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,0x1a) /* # octets tx OK */ 201 RxSymbolCarrier = 0x01, // Frames w/ symbol errors
224#define REG_TX_UNICAST(pn) CRA(0x4,pn,0x1b) /* # frames unicast */ 202 RxPause = 0x02, // # pause frames received
225#define REG_TX_MULTICAST(pn) CRA(0x4,pn,0x1c) /* # frames multicast */ 203 RxUnsupOpcode = 0x03, // # control frames with unsupported opcode
226#define REG_TX_BROADCAST(pn) CRA(0x4,pn,0x1d) /* # frames broadcast */ 204 RxOkBytes = 0x04, // # octets in good frames
227#define REG_TX_MULTIPLE_COLL(pn) CRA(0x4,pn,0x1e) /* # frames tx after multiple collisions */ 205 RxBadBytes = 0x05, // # octets in bad frames
228#define REG_TX_LATE_COLL(pn) CRA(0x4,pn,0x1f) /* # late collisions detected */ 206 RxUnicast = 0x06, // # good unicast frames
229#define REG_TX_XCOLL(pn) CRA(0x4,pn,0x20) /* # frames lost, excessive collisions */ 207 RxMulticast = 0x07, // # good multicast frames
230#define REG_TX_DEFER(pn) CRA(0x4,pn,0x21) /* # frames deferred on first tx attempt */ 208 RxBroadcast = 0x08, // # good broadcast frames
231#define REG_TX_XDEFER(pn) CRA(0x4,pn,0x22) /* # frames excessively deferred */ 209 Crc = 0x09, // # frames w/ bad CRC only
232#define REG_TX_CSENSE(pn) CRA(0x4,pn,0x23) /* carrier sense errors at frame end */ 210 RxAlignment = 0x0a, // # frames w/ alignment err
233#define REG_TX_SIZE_64(pn) CRA(0x4,pn,0x24) /* # frames 64 octets long */ 211 RxUndersize = 0x0b, // # frames undersize
234#define REG_TX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x25) /* # frames 65-127 octets */ 212 RxFragments = 0x0c, // # frames undersize w/ crc err
235#define REG_TX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x26) /* # frames 128-255 */ 213 RxInRangeLengthError = 0x0d, // # frames with length error
236#define REG_TX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x27) /* # frames 256-511 */ 214 RxOutOfRangeError = 0x0e, // # frames with illegal length field
237#define REG_TX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x28) /* # frames 512-1023 */ 215 RxOversize = 0x0f, // # frames oversize
238#define REG_TX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x29) /* # frames 1024-1518 */ 216 RxJabbers = 0x10, // # frames oversize w/ crc err
239#define REG_TX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x2a) /* # frames 1519-max */ 217 RxSize64 = 0x11, // # frames 64 octets long
240#define REG_TX_SINGLE_COLL(pn) CRA(0x4,pn,0x2b) /* # frames tx after single collision */ 218 RxSize65To127 = 0x12, // # frames 65-127 octets
241#define REG_TX_BACKOFF2(pn) CRA(0x4,pn,0x2c) /* # frames tx ok after 2 backoffs/collisions */ 219 RxSize128To255 = 0x13, // # frames 128-255
242#define REG_TX_BACKOFF3(pn) CRA(0x4,pn,0x2d) /* after 3 backoffs/collisions */ 220 RxSize256To511 = 0x14, // # frames 256-511
243#define REG_TX_BACKOFF4(pn) CRA(0x4,pn,0x2e) /* after 4 */ 221 RxSize512To1023 = 0x15, // # frames 512-1023
244#define REG_TX_BACKOFF5(pn) CRA(0x4,pn,0x2f) /* after 5 */ 222 RxSize1024To1518 = 0x16, // # frames 1024-1518
245#define REG_TX_BACKOFF6(pn) CRA(0x4,pn,0x30) /* after 6 */ 223 RxSize1519ToMax = 0x17, // # frames 1519-max
246#define REG_TX_BACKOFF7(pn) CRA(0x4,pn,0x31) /* after 7 */
247#define REG_TX_BACKOFF8(pn) CRA(0x4,pn,0x32) /* after 8 */
248#define REG_TX_BACKOFF9(pn) CRA(0x4,pn,0x33) /* after 9 */
249#define REG_TX_BACKOFF10(pn) CRA(0x4,pn,0x34) /* after 10 */
250#define REG_TX_BACKOFF11(pn) CRA(0x4,pn,0x35) /* after 11 */
251#define REG_TX_BACKOFF12(pn) CRA(0x4,pn,0x36) /* after 12 */
252#define REG_TX_BACKOFF13(pn) CRA(0x4,pn,0x37) /* after 13 */
253#define REG_TX_BACKOFF14(pn) CRA(0x4,pn,0x38) /* after 14 */
254#define REG_TX_BACKOFF15(pn) CRA(0x4,pn,0x39) /* after 15 */
255#define REG_TX_UNDERRUN(pn) CRA(0x4,pn,0x3a) /* # frames dropped from underrun */
256#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */
257#define REG_RX_IPG_SHRINK(pn) CRA(0x4,pn,0x3c) /* # of IPG shrinks detected */
258 224
259#define REG_STAT_STICKY1G(pn) CRA(0x4,pn,0x3e) /* tri-speed sticky bits */ 225 TxOutBytes = 0x18, // # octets tx
260#define REG_STAT_STICKY10G CRA(0x4,0xa,0x3e) /* 10GbE sticky bits */ 226 TxPause = 0x19, // # pause frames sent
261#define REG_STAT_INIT(pn) CRA(0x4,pn,0x3f) /* Clear all statistics */ 227 TxOkBytes = 0x1a, // # octets tx OK
228 TxUnicast = 0x1b, // # frames unicast
229 TxMulticast = 0x1c, // # frames multicast
230 TxBroadcast = 0x1d, // # frames broadcast
231 TxMultipleColl = 0x1e, // # frames tx after multiple collisions
232 TxLateColl = 0x1f, // # late collisions detected
233 TxXcoll = 0x20, // # frames lost, excessive collisions
234 TxDefer = 0x21, // # frames deferred on first tx attempt
235 TxXdefer = 0x22, // # frames excessively deferred
236 TxCsense = 0x23, // carrier sense errors at frame end
237 TxSize64 = 0x24, // # frames 64 octets long
238 TxSize65To127 = 0x25, // # frames 65-127 octets
239 TxSize128To255 = 0x26, // # frames 128-255
240 TxSize256To511 = 0x27, // # frames 256-511
241 TxSize512To1023 = 0x28, // # frames 512-1023
242 TxSize1024To1518 = 0x29, // # frames 1024-1518
243 TxSize1519ToMax = 0x2a, // # frames 1519-max
244 TxSingleColl = 0x2b, // # frames tx after single collision
245 TxBackoff2 = 0x2c, // # frames tx ok after 2 backoffs/collisions
246 TxBackoff3 = 0x2d, // after 3 backoffs/collisions
247 TxBackoff4 = 0x2e, // after 4
248 TxBackoff5 = 0x2f, // after 5
249 TxBackoff6 = 0x30, // after 6
250 TxBackoff7 = 0x31, // after 7
251 TxBackoff8 = 0x32, // after 8
252 TxBackoff9 = 0x33, // after 9
253 TxBackoff10 = 0x34, // after 10
254 TxBackoff11 = 0x35, // after 11
255 TxBackoff12 = 0x36, // after 12
256 TxBackoff13 = 0x37, // after 13
257 TxBackoff14 = 0x38, // after 14
258 TxBackoff15 = 0x39, // after 15
259 TxUnderrun = 0x3a, // # frames dropped from underrun
260 // Hole. See REG_RX_XGMII_PROT_ERR below.
261 RxIpgShrink = 0x3c, // # of IPG shrinks detected
262 // Duplicate. See REG_STAT_STICKY10G below.
263 StatSticky1G = 0x3e, // tri-speed sticky bits
264 StatInit = 0x3f // Clear all statistics
265};
266
267#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */
268#define REG_STAT_STICKY10G CRA(0x4,0xa,StatSticky1G) /* 10GbE sticky bits */
269
270#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,RxOkBytes)
271#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,RxBadBytes)
272#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,TxOkBytes)
262 273
263/* MII-Management Block registers */ 274/* MII-Management Block registers */
264/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If 275/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If
diff --git a/drivers/net/chelsio/vsc8244.c b/drivers/net/chelsio/vsc8244.c
index c493e783d459..251d4859c91d 100644
--- a/drivers/net/chelsio/vsc8244.c
+++ b/drivers/net/chelsio/vsc8244.c
@@ -54,7 +54,7 @@ enum {
54}; 54};
55 55
56#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ 56#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
57 VSC_INTR_NEG_DONE) 57 VSC_INTR_NEG_DONE)
58#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ 58#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
59 VSC_INTR_ENABLE) 59 VSC_INTR_ENABLE)
60 60
@@ -94,19 +94,18 @@ static int vsc8244_intr_enable(struct cphy *cphy)
94{ 94{
95 simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK); 95 simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK);
96 96
97 /* Enable interrupts through Elmer */ 97 /* Enable interrupts through Elmer */
98 if (t1_is_asic(cphy->adapter)) { 98 if (t1_is_asic(cphy->adapter)) {
99 u32 elmer; 99 u32 elmer;
100 100
101 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 101 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
102 elmer |= ELMER0_GP_BIT1; 102 elmer |= ELMER0_GP_BIT1;
103 if (is_T2(cphy->adapter)) { 103 if (is_T2(cphy->adapter))
104 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 104 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
105 }
106 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 105 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
107 } 106 }
108 107
109 return 0; 108 return 0;
110} 109}
111 110
112static int vsc8244_intr_disable(struct cphy *cphy) 111static int vsc8244_intr_disable(struct cphy *cphy)
@@ -118,19 +117,18 @@ static int vsc8244_intr_disable(struct cphy *cphy)
118 117
119 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 118 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
120 elmer &= ~ELMER0_GP_BIT1; 119 elmer &= ~ELMER0_GP_BIT1;
121 if (is_T2(cphy->adapter)) { 120 if (is_T2(cphy->adapter))
122 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); 121 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
123 }
124 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 122 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
125 } 123 }
126 124
127 return 0; 125 return 0;
128} 126}
129 127
130static int vsc8244_intr_clear(struct cphy *cphy) 128static int vsc8244_intr_clear(struct cphy *cphy)
131{ 129{
132 u32 val; 130 u32 val;
133 u32 elmer; 131 u32 elmer;
134 132
135 /* Clear PHY interrupts by reading the register. */ 133 /* Clear PHY interrupts by reading the register. */
136 simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val); 134 simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val);
@@ -138,13 +136,12 @@ static int vsc8244_intr_clear(struct cphy *cphy)
138 if (t1_is_asic(cphy->adapter)) { 136 if (t1_is_asic(cphy->adapter)) {
139 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); 137 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
140 elmer |= ELMER0_GP_BIT1; 138 elmer |= ELMER0_GP_BIT1;
141 if (is_T2(cphy->adapter)) { 139 if (is_T2(cphy->adapter))
142 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 140 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
143 }
144 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); 141 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
145 } 142 }
146 143
147 return 0; 144 return 0;
148} 145}
149 146
150/* 147/*
@@ -179,13 +176,13 @@ static int vsc8244_set_speed_duplex(struct cphy *phy, int speed, int duplex)
179 176
180int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits) 177int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits)
181{ 178{
182 int ret; 179 int ret;
183 unsigned int val; 180 unsigned int val;
184 181
185 ret = mdio_read(phy, mmd, reg, &val); 182 ret = mdio_read(phy, mmd, reg, &val);
186 if (!ret) 183 if (!ret)
187 ret = mdio_write(phy, mmd, reg, val | bits); 184 ret = mdio_write(phy, mmd, reg, val | bits);
188 return ret; 185 return ret;
189} 186}
190 187
191static int vsc8244_autoneg_enable(struct cphy *cphy) 188static int vsc8244_autoneg_enable(struct cphy *cphy)
@@ -235,7 +232,7 @@ static int vsc8244_advertise(struct cphy *phy, unsigned int advertise_map)
235} 232}
236 233
237static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok, 234static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok,
238 int *speed, int *duplex, int *fc) 235 int *speed, int *duplex, int *fc)
239{ 236{
240 unsigned int bmcr, status, lpa, adv; 237 unsigned int bmcr, status, lpa, adv;
241 int err, sp = -1, dplx = -1, pause = 0; 238 int err, sp = -1, dplx = -1, pause = 0;
@@ -343,11 +340,13 @@ static struct cphy_ops vsc8244_ops = {
343 .get_link_status = vsc8244_get_link_status 340 .get_link_status = vsc8244_get_link_status
344}; 341};
345 342
346static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr, struct mdio_ops *mdio_ops) 343static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr,
344 struct mdio_ops *mdio_ops)
347{ 345{
348 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); 346 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
349 347
350 if (!cphy) return NULL; 348 if (!cphy)
349 return NULL;
351 350
352 cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops); 351 cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops);
353 352
diff --git a/drivers/net/cxgb3/Makefile b/drivers/net/cxgb3/Makefile
new file mode 100644
index 000000000000..343467985321
--- /dev/null
+++ b/drivers/net/cxgb3/Makefile
@@ -0,0 +1,8 @@
1#
2# Chelsio T3 driver
3#
4
5obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
6
7cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
8 xgmac.o sge.o l2t.o cxgb3_offload.o
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
new file mode 100644
index 000000000000..5c97a64451ce
--- /dev/null
+++ b/drivers/net/cxgb3/adapter.h
@@ -0,0 +1,279 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/* This file should not be included directly. Include common.h instead. */
34
35#ifndef __T3_ADAPTER_H__
36#define __T3_ADAPTER_H__
37
38#include <linux/pci.h>
39#include <linux/spinlock.h>
40#include <linux/interrupt.h>
41#include <linux/timer.h>
42#include <linux/cache.h>
43#include <linux/mutex.h>
44#include "t3cdev.h"
45#include <asm/semaphore.h>
46#include <asm/bitops.h>
47#include <asm/io.h>
48
49typedef irqreturn_t(*intr_handler_t) (int, void *);
50
51struct vlan_group;
52
53struct port_info {
54 struct vlan_group *vlan_grp;
55 const struct port_type_info *port_type;
56 u8 port_id;
57 u8 rx_csum_offload;
58 u8 nqsets;
59 u8 first_qset;
60 struct cphy phy;
61 struct cmac mac;
62 struct link_config link_config;
63 struct net_device_stats netstats;
64 int activity;
65};
66
67enum { /* adapter flags */
68 FULL_INIT_DONE = (1 << 0),
69 USING_MSI = (1 << 1),
70 USING_MSIX = (1 << 2),
71 QUEUES_BOUND = (1 << 3),
72};
73
74struct rx_desc;
75struct rx_sw_desc;
76
77struct sge_fl { /* SGE per free-buffer list state */
78 unsigned int buf_size; /* size of each Rx buffer */
79 unsigned int credits; /* # of available Rx buffers */
80 unsigned int size; /* capacity of free list */
81 unsigned int cidx; /* consumer index */
82 unsigned int pidx; /* producer index */
83 unsigned int gen; /* free list generation */
84 struct rx_desc *desc; /* address of HW Rx descriptor ring */
85 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
86 dma_addr_t phys_addr; /* physical address of HW ring start */
87 unsigned int cntxt_id; /* SGE context id for the free list */
88 unsigned long empty; /* # of times queue ran out of buffers */
89};
90
91/*
92 * Bundle size for grouping offload RX packets for delivery to the stack.
93 * Don't make this too big as we do prefetch on each packet in a bundle.
94 */
95# define RX_BUNDLE_SIZE 8
96
97struct rsp_desc;
98
99struct sge_rspq { /* state for an SGE response queue */
100 unsigned int credits; /* # of pending response credits */
101 unsigned int size; /* capacity of response queue */
102 unsigned int cidx; /* consumer index */
103 unsigned int gen; /* current generation bit */
104 unsigned int polling; /* is the queue serviced through NAPI? */
105 unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
106 unsigned int next_holdoff; /* holdoff time for next interrupt */
107 struct rsp_desc *desc; /* address of HW response ring */
108 dma_addr_t phys_addr; /* physical address of the ring */
109 unsigned int cntxt_id; /* SGE context id for the response q */
110 spinlock_t lock; /* guards response processing */
111 struct sk_buff *rx_head; /* offload packet receive queue head */
112 struct sk_buff *rx_tail; /* offload packet receive queue tail */
113
114 unsigned long offload_pkts;
115 unsigned long offload_bundles;
116 unsigned long eth_pkts; /* # of ethernet packets */
117 unsigned long pure_rsps; /* # of pure (non-data) responses */
118 unsigned long imm_data; /* responses with immediate data */
119 unsigned long rx_drops; /* # of packets dropped due to no mem */
120 unsigned long async_notif; /* # of asynchronous notification events */
121 unsigned long empty; /* # of times queue ran out of credits */
122 unsigned long nomem; /* # of responses deferred due to no mem */
123 unsigned long unhandled_irqs; /* # of spurious intrs */
124};
125
126struct tx_desc;
127struct tx_sw_desc;
128
129struct sge_txq { /* state for an SGE Tx queue */
130 unsigned long flags; /* HW DMA fetch status */
131 unsigned int in_use; /* # of in-use Tx descriptors */
132 unsigned int size; /* # of descriptors */
133 unsigned int processed; /* total # of descs HW has processed */
134 unsigned int cleaned; /* total # of descs SW has reclaimed */
135 unsigned int stop_thres; /* SW TX queue suspend threshold */
136 unsigned int cidx; /* consumer index */
137 unsigned int pidx; /* producer index */
138 unsigned int gen; /* current value of generation bit */
139 unsigned int unacked; /* Tx descriptors used since last COMPL */
140 struct tx_desc *desc; /* address of HW Tx descriptor ring */
141 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
142 spinlock_t lock; /* guards enqueueing of new packets */
143 unsigned int token; /* WR token */
144 dma_addr_t phys_addr; /* physical address of the ring */
145 struct sk_buff_head sendq; /* List of backpressured offload packets */
146 struct tasklet_struct qresume_tsk; /* restarts the queue */
147 unsigned int cntxt_id; /* SGE context id for the Tx q */
148 unsigned long stops; /* # of times q has been stopped */
149 unsigned long restarts; /* # of queue restarts */
150};
151
152enum { /* per port SGE statistics */
153 SGE_PSTAT_TSO, /* # of TSO requests */
154 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
155 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
156 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
157 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
158
159 SGE_PSTAT_MAX /* must be last */
160};
161
162struct sge_qset { /* an SGE queue set */
163 struct sge_rspq rspq;
164 struct sge_fl fl[SGE_RXQ_PER_SET];
165 struct sge_txq txq[SGE_TXQ_PER_SET];
166 struct net_device *netdev; /* associated net device */
167 unsigned long txq_stopped; /* which Tx queues are stopped */
168 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
169 unsigned long port_stats[SGE_PSTAT_MAX];
170} ____cacheline_aligned;
171
172struct sge {
173 struct sge_qset qs[SGE_QSETS];
174 spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
175};
176
177struct adapter {
178 struct t3cdev tdev;
179 struct list_head adapter_list;
180 void __iomem *regs;
181 struct pci_dev *pdev;
182 unsigned long registered_device_map;
183 unsigned long open_device_map;
184 unsigned long flags;
185
186 const char *name;
187 int msg_enable;
188 unsigned int mmio_len;
189
190 struct adapter_params params;
191 unsigned int slow_intr_mask;
192 unsigned long irq_stats[IRQ_NUM_STATS];
193
194 struct {
195 unsigned short vec;
196 char desc[22];
197 } msix_info[SGE_QSETS + 1];
198
199 /* T3 modules */
200 struct sge sge;
201 struct mc7 pmrx;
202 struct mc7 pmtx;
203 struct mc7 cm;
204 struct mc5 mc5;
205
206 struct net_device *port[MAX_NPORTS];
207 unsigned int check_task_cnt;
208 struct delayed_work adap_check_task;
209 struct work_struct ext_intr_handler_task;
210
211 /*
212 * Dummy netdevices are needed when using multiple receive queues with
213 * NAPI as each netdevice can service only one queue.
214 */
215 struct net_device *dummy_netdev[SGE_QSETS - 1];
216
217 struct dentry *debugfs_root;
218
219 struct mutex mdio_lock;
220 spinlock_t stats_lock;
221 spinlock_t work_lock;
222};
223
224static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
225{
226 u32 val = readl(adapter->regs + reg_addr);
227
228 CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
229 return val;
230}
231
232static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
233{
234 CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
235 writel(val, adapter->regs + reg_addr);
236}
237
238static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
239{
240 return netdev_priv(adap->port[idx]);
241}
242
243/*
244 * We use the spare atalk_ptr to map a net device to its SGE queue set.
245 * This is a macro so it can be used as l-value.
246 */
247#define dev2qset(netdev) ((netdev)->atalk_ptr)
248
249#define OFFLOAD_DEVMAP_BIT 15
250
251#define tdev2adap(d) container_of(d, struct adapter, tdev)
252
253static inline int offload_running(struct adapter *adapter)
254{
255 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
256}
257
258int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
259
260void t3_os_ext_intr_handler(struct adapter *adapter);
261void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
262 int speed, int duplex, int fc);
263
264void t3_sge_start(struct adapter *adap);
265void t3_sge_stop(struct adapter *adap);
266void t3_free_sge_resources(struct adapter *adap);
267void t3_sge_err_intr_handler(struct adapter *adapter);
268intr_handler_t t3_intr_handler(struct adapter *adap, int polling);
269int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
270int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
271void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
272int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
273 int irq_vec_idx, const struct qset_params *p,
274 int ntxq, struct net_device *netdev);
275int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
276 unsigned char *data);
277irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
278
279#endif /* __T3_ADAPTER_H__ */
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
new file mode 100644
index 000000000000..73a41e6a5bfc
--- /dev/null
+++ b/drivers/net/cxgb3/ael1002.c
@@ -0,0 +1,251 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35enum {
36 AEL100X_TX_DISABLE = 9,
37 AEL100X_TX_CONFIG1 = 0xc002,
38 AEL1002_PWR_DOWN_HI = 0xc011,
39 AEL1002_PWR_DOWN_LO = 0xc012,
40 AEL1002_XFI_EQL = 0xc015,
41 AEL1002_LB_EN = 0xc017,
42
43 LASI_CTRL = 0x9002,
44 LASI_STAT = 0x9005
45};
46
47static void ael100x_txon(struct cphy *phy)
48{
49 int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
50
51 msleep(100);
52 t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
53 msleep(30);
54}
55
56static int ael1002_power_down(struct cphy *phy, int enable)
57{
58 int err;
59
60 err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
61 if (!err)
62 err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
63 BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
64 return err;
65}
66
67static int ael1002_reset(struct cphy *phy, int wait)
68{
69 int err;
70
71 if ((err = ael1002_power_down(phy, 0)) ||
72 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
73 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
74 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
75 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
76 (err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
77 0, 1 << 5)))
78 return err;
79 return 0;
80}
81
82static int ael1002_intr_noop(struct cphy *phy)
83{
84 return 0;
85}
86
87static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
88 int *speed, int *duplex, int *fc)
89{
90 if (link_ok) {
91 unsigned int status;
92 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
93
94 /*
95 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
96 * once more to get the current link state.
97 */
98 if (!err && !(status & BMSR_LSTATUS))
99 err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
100 &status);
101 if (err)
102 return err;
103 *link_ok = !!(status & BMSR_LSTATUS);
104 }
105 if (speed)
106 *speed = SPEED_10000;
107 if (duplex)
108 *duplex = DUPLEX_FULL;
109 return 0;
110}
111
112static struct cphy_ops ael1002_ops = {
113 .reset = ael1002_reset,
114 .intr_enable = ael1002_intr_noop,
115 .intr_disable = ael1002_intr_noop,
116 .intr_clear = ael1002_intr_noop,
117 .intr_handler = ael1002_intr_noop,
118 .get_link_status = ael100x_get_link_status,
119 .power_down = ael1002_power_down,
120};
121
122void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
123 int phy_addr, const struct mdio_ops *mdio_ops)
124{
125 cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
126 ael100x_txon(phy);
127}
128
129static int ael1006_reset(struct cphy *phy, int wait)
130{
131 return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
132}
133
134static int ael1006_intr_enable(struct cphy *phy)
135{
136 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
137}
138
139static int ael1006_intr_disable(struct cphy *phy)
140{
141 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
142}
143
144static int ael1006_intr_clear(struct cphy *phy)
145{
146 u32 val;
147
148 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
149}
150
151static int ael1006_intr_handler(struct cphy *phy)
152{
153 unsigned int status;
154 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
155
156 if (err)
157 return err;
158 return (status & 1) ? cphy_cause_link_change : 0;
159}
160
161static int ael1006_power_down(struct cphy *phy, int enable)
162{
163 return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
164 BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
165}
166
167static struct cphy_ops ael1006_ops = {
168 .reset = ael1006_reset,
169 .intr_enable = ael1006_intr_enable,
170 .intr_disable = ael1006_intr_disable,
171 .intr_clear = ael1006_intr_clear,
172 .intr_handler = ael1006_intr_handler,
173 .get_link_status = ael100x_get_link_status,
174 .power_down = ael1006_power_down,
175};
176
177void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
178 int phy_addr, const struct mdio_ops *mdio_ops)
179{
180 cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
181 ael100x_txon(phy);
182}
183
184static struct cphy_ops qt2045_ops = {
185 .reset = ael1006_reset,
186 .intr_enable = ael1006_intr_enable,
187 .intr_disable = ael1006_intr_disable,
188 .intr_clear = ael1006_intr_clear,
189 .intr_handler = ael1006_intr_handler,
190 .get_link_status = ael100x_get_link_status,
191 .power_down = ael1006_power_down,
192};
193
194void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
195 int phy_addr, const struct mdio_ops *mdio_ops)
196{
197 unsigned int stat;
198
199 cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
200
201 /*
202 * Some cards where the PHY is supposed to be at address 0 actually
203 * have it at 1.
204 */
205 if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
206 stat == 0xffff)
207 phy->addr = 1;
208}
209
210static int xaui_direct_reset(struct cphy *phy, int wait)
211{
212 return 0;
213}
214
215static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
216 int *speed, int *duplex, int *fc)
217{
218 if (link_ok) {
219 unsigned int status;
220
221 status = t3_read_reg(phy->adapter,
222 XGM_REG(A_XGM_SERDES_STAT0, phy->addr));
223 *link_ok = !(status & F_LOWSIG0);
224 }
225 if (speed)
226 *speed = SPEED_10000;
227 if (duplex)
228 *duplex = DUPLEX_FULL;
229 return 0;
230}
231
232static int xaui_direct_power_down(struct cphy *phy, int enable)
233{
234 return 0;
235}
236
237static struct cphy_ops xaui_direct_ops = {
238 .reset = xaui_direct_reset,
239 .intr_enable = ael1002_intr_noop,
240 .intr_disable = ael1002_intr_noop,
241 .intr_clear = ael1002_intr_noop,
242 .intr_handler = ael1002_intr_noop,
243 .get_link_status = xaui_direct_get_link_status,
244 .power_down = xaui_direct_power_down,
245};
246
247void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
248 int phy_addr, const struct mdio_ops *mdio_ops)
249{
250 cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops);
251}
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
new file mode 100644
index 000000000000..e23deeb7d06d
--- /dev/null
+++ b/drivers/net/cxgb3/common.h
@@ -0,0 +1,729 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __CHELSIO_COMMON_H
33#define __CHELSIO_COMMON_H
34
35#include <linux/kernel.h>
36#include <linux/types.h>
37#include <linux/ctype.h>
38#include <linux/delay.h>
39#include <linux/init.h>
40#include <linux/netdevice.h>
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include "version.h"
44
45#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
46#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
47#define CH_ALERT(adap, fmt, ...) \
48 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
49
50/*
51 * More powerful macro that selectively prints messages based on msg_enable.
52 * For info and debugging messages.
53 */
54#define CH_MSG(adapter, level, category, fmt, ...) do { \
55 if ((adapter)->msg_enable & NETIF_MSG_##category) \
56 dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
57 ## __VA_ARGS__); \
58} while (0)
59
60#ifdef DEBUG
61# define CH_DBG(adapter, category, fmt, ...) \
62 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
63#else
64# define CH_DBG(adapter, category, fmt, ...)
65#endif
66
67/* Additional NETIF_MSG_* categories */
68#define NETIF_MSG_MMIO 0x8000000
69
70struct t3_rx_mode {
71 struct net_device *dev;
72 struct dev_mc_list *mclist;
73 unsigned int idx;
74};
75
76static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
77 struct dev_mc_list *mclist)
78{
79 p->dev = dev;
80 p->mclist = mclist;
81 p->idx = 0;
82}
83
84static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
85{
86 u8 *addr = NULL;
87
88 if (rm->mclist && rm->idx < rm->dev->mc_count) {
89 addr = rm->mclist->dmi_addr;
90 rm->mclist = rm->mclist->next;
91 rm->idx++;
92 }
93 return addr;
94}
95
96enum {
97 MAX_NPORTS = 2, /* max # of ports */
98 MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
99 EEPROMSIZE = 8192, /* Serial EEPROM size */
100 RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
101 TCB_SIZE = 128, /* TCB size */
102 NMTUS = 16, /* size of MTU table */
103 NCCTRL_WIN = 32, /* # of congestion control windows */
104};
105
106#define MAX_RX_COALESCING_LEN 16224U
107
108enum {
109 PAUSE_RX = 1 << 0,
110 PAUSE_TX = 1 << 1,
111 PAUSE_AUTONEG = 1 << 2
112};
113
114enum {
115 SUPPORTED_OFFLOAD = 1 << 24,
116 SUPPORTED_IRQ = 1 << 25
117};
118
119enum { /* adapter interrupt-maintained statistics */
120 STAT_ULP_CH0_PBL_OOB,
121 STAT_ULP_CH1_PBL_OOB,
122 STAT_PCI_CORR_ECC,
123
124 IRQ_NUM_STATS /* keep last */
125};
126
127enum {
128 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
129 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
130 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
131};
132
133enum sge_context_type { /* SGE egress context types */
134 SGE_CNTXT_RDMA = 0,
135 SGE_CNTXT_ETH = 2,
136 SGE_CNTXT_OFLD = 4,
137 SGE_CNTXT_CTRL = 5
138};
139
140enum {
141 AN_PKT_SIZE = 32, /* async notification packet size */
142 IMMED_PKT_SIZE = 48 /* packet size for immediate data */
143};
144
145struct sg_ent { /* SGE scatter/gather entry */
146 u32 len[2];
147 u64 addr[2];
148};
149
150#ifndef SGE_NUM_GENBITS
151/* Must be 1 or 2 */
152# define SGE_NUM_GENBITS 2
153#endif
154
155#define TX_DESC_FLITS 16U
156#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
157
158struct cphy;
159struct adapter;
160
161struct mdio_ops {
162 int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
163 int reg_addr, unsigned int *val);
164 int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
165 int reg_addr, unsigned int val);
166};
167
168struct adapter_info {
169 unsigned char nports; /* # of ports */
170 unsigned char phy_base_addr; /* MDIO PHY base address */
171 unsigned char mdien;
172 unsigned char mdiinv;
173 unsigned int gpio_out; /* GPIO output settings */
174 unsigned int gpio_intr; /* GPIO IRQ enable mask */
175 unsigned long caps; /* adapter capabilities */
176 const struct mdio_ops *mdio_ops; /* MDIO operations */
177 const char *desc; /* product description */
178};
179
180struct port_type_info {
181 void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
182 int phy_addr, const struct mdio_ops *ops);
183 unsigned int caps;
184 const char *desc;
185};
186
187struct mc5_stats {
188 unsigned long parity_err;
189 unsigned long active_rgn_full;
190 unsigned long nfa_srch_err;
191 unsigned long unknown_cmd;
192 unsigned long reqq_parity_err;
193 unsigned long dispq_parity_err;
194 unsigned long del_act_empty;
195};
196
197struct mc7_stats {
198 unsigned long corr_err;
199 unsigned long uncorr_err;
200 unsigned long parity_err;
201 unsigned long addr_err;
202};
203
204struct mac_stats {
205 u64 tx_octets; /* total # of octets in good frames */
206 u64 tx_octets_bad; /* total # of octets in error frames */
207 u64 tx_frames; /* all good frames */
208 u64 tx_mcast_frames; /* good multicast frames */
209 u64 tx_bcast_frames; /* good broadcast frames */
210 u64 tx_pause; /* # of transmitted pause frames */
211 u64 tx_deferred; /* frames with deferred transmissions */
212 u64 tx_late_collisions; /* # of late collisions */
213 u64 tx_total_collisions; /* # of total collisions */
214 u64 tx_excess_collisions; /* frame errors from excessive collissions */
215 u64 tx_underrun; /* # of Tx FIFO underruns */
216 u64 tx_len_errs; /* # of Tx length errors */
217 u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
218 u64 tx_excess_deferral; /* # of frames with excessive deferral */
219 u64 tx_fcs_errs; /* # of frames with bad FCS */
220
221 u64 tx_frames_64; /* # of Tx frames in a particular range */
222 u64 tx_frames_65_127;
223 u64 tx_frames_128_255;
224 u64 tx_frames_256_511;
225 u64 tx_frames_512_1023;
226 u64 tx_frames_1024_1518;
227 u64 tx_frames_1519_max;
228
229 u64 rx_octets; /* total # of octets in good frames */
230 u64 rx_octets_bad; /* total # of octets in error frames */
231 u64 rx_frames; /* all good frames */
232 u64 rx_mcast_frames; /* good multicast frames */
233 u64 rx_bcast_frames; /* good broadcast frames */
234 u64 rx_pause; /* # of received pause frames */
235 u64 rx_fcs_errs; /* # of received frames with bad FCS */
236 u64 rx_align_errs; /* alignment errors */
237 u64 rx_symbol_errs; /* symbol errors */
238 u64 rx_data_errs; /* data errors */
239 u64 rx_sequence_errs; /* sequence errors */
240 u64 rx_runt; /* # of runt frames */
241 u64 rx_jabber; /* # of jabber frames */
242 u64 rx_short; /* # of short frames */
243 u64 rx_too_long; /* # of oversized frames */
244 u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
245
246 u64 rx_frames_64; /* # of Rx frames in a particular range */
247 u64 rx_frames_65_127;
248 u64 rx_frames_128_255;
249 u64 rx_frames_256_511;
250 u64 rx_frames_512_1023;
251 u64 rx_frames_1024_1518;
252 u64 rx_frames_1519_max;
253
254 u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
255
256 unsigned long tx_fifo_parity_err;
257 unsigned long rx_fifo_parity_err;
258 unsigned long tx_fifo_urun;
259 unsigned long rx_fifo_ovfl;
260 unsigned long serdes_signal_loss;
261 unsigned long xaui_pcs_ctc_err;
262 unsigned long xaui_pcs_align_change;
263};
264
265struct tp_mib_stats {
266 u32 ipInReceive_hi;
267 u32 ipInReceive_lo;
268 u32 ipInHdrErrors_hi;
269 u32 ipInHdrErrors_lo;
270 u32 ipInAddrErrors_hi;
271 u32 ipInAddrErrors_lo;
272 u32 ipInUnknownProtos_hi;
273 u32 ipInUnknownProtos_lo;
274 u32 ipInDiscards_hi;
275 u32 ipInDiscards_lo;
276 u32 ipInDelivers_hi;
277 u32 ipInDelivers_lo;
278 u32 ipOutRequests_hi;
279 u32 ipOutRequests_lo;
280 u32 ipOutDiscards_hi;
281 u32 ipOutDiscards_lo;
282 u32 ipOutNoRoutes_hi;
283 u32 ipOutNoRoutes_lo;
284 u32 ipReasmTimeout;
285 u32 ipReasmReqds;
286 u32 ipReasmOKs;
287 u32 ipReasmFails;
288
289 u32 reserved[8];
290
291 u32 tcpActiveOpens;
292 u32 tcpPassiveOpens;
293 u32 tcpAttemptFails;
294 u32 tcpEstabResets;
295 u32 tcpOutRsts;
296 u32 tcpCurrEstab;
297 u32 tcpInSegs_hi;
298 u32 tcpInSegs_lo;
299 u32 tcpOutSegs_hi;
300 u32 tcpOutSegs_lo;
301 u32 tcpRetransSeg_hi;
302 u32 tcpRetransSeg_lo;
303 u32 tcpInErrs_hi;
304 u32 tcpInErrs_lo;
305 u32 tcpRtoMin;
306 u32 tcpRtoMax;
307};
308
309struct tp_params {
310 unsigned int nchan; /* # of channels */
311 unsigned int pmrx_size; /* total PMRX capacity */
312 unsigned int pmtx_size; /* total PMTX capacity */
313 unsigned int cm_size; /* total CM capacity */
314 unsigned int chan_rx_size; /* per channel Rx size */
315 unsigned int chan_tx_size; /* per channel Tx size */
316 unsigned int rx_pg_size; /* Rx page size */
317 unsigned int tx_pg_size; /* Tx page size */
318 unsigned int rx_num_pgs; /* # of Rx pages */
319 unsigned int tx_num_pgs; /* # of Tx pages */
320 unsigned int ntimer_qs; /* # of timer queues */
321};
322
323struct qset_params { /* SGE queue set parameters */
324 unsigned int polling; /* polling/interrupt service for rspq */
325 unsigned int coalesce_usecs; /* irq coalescing timer */
326 unsigned int rspq_size; /* # of entries in response queue */
327 unsigned int fl_size; /* # of entries in regular free list */
328 unsigned int jumbo_size; /* # of entries in jumbo free list */
329 unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
330 unsigned int cong_thres; /* FL congestion threshold */
331};
332
333struct sge_params {
334 unsigned int max_pkt_size; /* max offload pkt size */
335 struct qset_params qset[SGE_QSETS];
336};
337
338struct mc5_params {
339 unsigned int mode; /* selects MC5 width */
340 unsigned int nservers; /* size of server region */
341 unsigned int nfilters; /* size of filter region */
342 unsigned int nroutes; /* size of routing region */
343};
344
345/* Default MC5 region sizes */
346enum {
347 DEFAULT_NSERVERS = 512,
348 DEFAULT_NFILTERS = 128
349};
350
351/* MC5 modes, these must be non-0 */
352enum {
353 MC5_MODE_144_BIT = 1,
354 MC5_MODE_72_BIT = 2
355};
356
357struct vpd_params {
358 unsigned int cclk;
359 unsigned int mclk;
360 unsigned int uclk;
361 unsigned int mdc;
362 unsigned int mem_timing;
363 u8 eth_base[6];
364 u8 port_type[MAX_NPORTS];
365 unsigned short xauicfg[2];
366};
367
368struct pci_params {
369 unsigned int vpd_cap_addr;
370 unsigned int pcie_cap_addr;
371 unsigned short speed;
372 unsigned char width;
373 unsigned char variant;
374};
375
376enum {
377 PCI_VARIANT_PCI,
378 PCI_VARIANT_PCIX_MODE1_PARITY,
379 PCI_VARIANT_PCIX_MODE1_ECC,
380 PCI_VARIANT_PCIX_266_MODE2,
381 PCI_VARIANT_PCIE
382};
383
384struct adapter_params {
385 struct sge_params sge;
386 struct mc5_params mc5;
387 struct tp_params tp;
388 struct vpd_params vpd;
389 struct pci_params pci;
390
391 const struct adapter_info *info;
392
393 unsigned short mtus[NMTUS];
394 unsigned short a_wnd[NCCTRL_WIN];
395 unsigned short b_wnd[NCCTRL_WIN];
396
397 unsigned int nports; /* # of ethernet ports */
398 unsigned int stats_update_period; /* MAC stats accumulation period */
399 unsigned int linkpoll_period; /* link poll period in 0.1s */
400 unsigned int rev; /* chip revision */
401};
402
403struct trace_params {
404 u32 sip;
405 u32 sip_mask;
406 u32 dip;
407 u32 dip_mask;
408 u16 sport;
409 u16 sport_mask;
410 u16 dport;
411 u16 dport_mask;
412 u32 vlan:12;
413 u32 vlan_mask:12;
414 u32 intf:4;
415 u32 intf_mask:4;
416 u8 proto;
417 u8 proto_mask;
418};
419
420struct link_config {
421 unsigned int supported; /* link capabilities */
422 unsigned int advertising; /* advertised capabilities */
423 unsigned short requested_speed; /* speed user has requested */
424 unsigned short speed; /* actual link speed */
425 unsigned char requested_duplex; /* duplex user has requested */
426 unsigned char duplex; /* actual link duplex */
427 unsigned char requested_fc; /* flow control user has requested */
428 unsigned char fc; /* actual link flow control */
429 unsigned char autoneg; /* autonegotiating? */
430 unsigned int link_ok; /* link up? */
431};
432
433#define SPEED_INVALID 0xffff
434#define DUPLEX_INVALID 0xff
435
436struct mc5 {
437 struct adapter *adapter;
438 unsigned int tcam_size;
439 unsigned char part_type;
440 unsigned char parity_enabled;
441 unsigned char mode;
442 struct mc5_stats stats;
443};
444
445static inline unsigned int t3_mc5_size(const struct mc5 *p)
446{
447 return p->tcam_size;
448}
449
450struct mc7 {
451 struct adapter *adapter; /* backpointer to adapter */
452 unsigned int size; /* memory size in bytes */
453 unsigned int width; /* MC7 interface width */
454 unsigned int offset; /* register address offset for MC7 instance */
455 const char *name; /* name of MC7 instance */
456 struct mc7_stats stats; /* MC7 statistics */
457};
458
459static inline unsigned int t3_mc7_size(const struct mc7 *p)
460{
461 return p->size;
462}
463
464struct cmac {
465 struct adapter *adapter;
466 unsigned int offset;
467 unsigned int nucast; /* # of address filters for unicast MACs */
468 struct mac_stats stats;
469};
470
471enum {
472 MAC_DIRECTION_RX = 1,
473 MAC_DIRECTION_TX = 2,
474 MAC_RXFIFO_SIZE = 32768
475};
476
477/* IEEE 802.3ae specified MDIO devices */
478enum {
479 MDIO_DEV_PMA_PMD = 1,
480 MDIO_DEV_WIS = 2,
481 MDIO_DEV_PCS = 3,
482 MDIO_DEV_XGXS = 4
483};
484
485/* PHY loopback direction */
486enum {
487 PHY_LOOPBACK_TX = 1,
488 PHY_LOOPBACK_RX = 2
489};
490
491/* PHY interrupt types */
492enum {
493 cphy_cause_link_change = 1,
494 cphy_cause_fifo_error = 2
495};
496
497/* PHY operations */
498struct cphy_ops {
499 void (*destroy)(struct cphy *phy);
500 int (*reset)(struct cphy *phy, int wait);
501
502 int (*intr_enable)(struct cphy *phy);
503 int (*intr_disable)(struct cphy *phy);
504 int (*intr_clear)(struct cphy *phy);
505 int (*intr_handler)(struct cphy *phy);
506
507 int (*autoneg_enable)(struct cphy *phy);
508 int (*autoneg_restart)(struct cphy *phy);
509
510 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
511 int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
512 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
513 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
514 int *duplex, int *fc);
515 int (*power_down)(struct cphy *phy, int enable);
516};
517
518/* A PHY instance */
519struct cphy {
520 int addr; /* PHY address */
521 struct adapter *adapter; /* associated adapter */
522 unsigned long fifo_errors; /* FIFO over/under-flows */
523 const struct cphy_ops *ops; /* PHY operations */
524 int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
525 int reg_addr, unsigned int *val);
526 int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
527 int reg_addr, unsigned int val);
528};
529
530/* Convenience MDIO read/write wrappers */
531static inline int mdio_read(struct cphy *phy, int mmd, int reg,
532 unsigned int *valp)
533{
534 return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
535}
536
537static inline int mdio_write(struct cphy *phy, int mmd, int reg,
538 unsigned int val)
539{
540 return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
541}
542
543/* Convenience initializer */
544static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
545 int phy_addr, struct cphy_ops *phy_ops,
546 const struct mdio_ops *mdio_ops)
547{
548 phy->adapter = adapter;
549 phy->addr = phy_addr;
550 phy->ops = phy_ops;
551 if (mdio_ops) {
552 phy->mdio_read = mdio_ops->read;
553 phy->mdio_write = mdio_ops->write;
554 }
555}
556
557/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
558#define MAC_STATS_ACCUM_SECS 180
559
560#define XGM_REG(reg_addr, idx) \
561 ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
562
563struct addr_val_pair {
564 unsigned int reg_addr;
565 unsigned int val;
566};
567
568#include "adapter.h"
569
570#ifndef PCI_VENDOR_ID_CHELSIO
571# define PCI_VENDOR_ID_CHELSIO 0x1425
572#endif
573
574#define for_each_port(adapter, iter) \
575 for (iter = 0; iter < (adapter)->params.nports; ++iter)
576
577#define adapter_info(adap) ((adap)->params.info)
578
579static inline int uses_xaui(const struct adapter *adap)
580{
581 return adapter_info(adap)->caps & SUPPORTED_AUI;
582}
583
584static inline int is_10G(const struct adapter *adap)
585{
586 return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
587}
588
589static inline int is_offload(const struct adapter *adap)
590{
591 return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
592}
593
594static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
595{
596 return adap->params.vpd.cclk / 1000;
597}
598
599static inline unsigned int is_pcie(const struct adapter *adap)
600{
601 return adap->params.pci.variant == PCI_VARIANT_PCIE;
602}
603
604void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
605 u32 val);
606void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
607 int n, unsigned int offset);
608int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
609 int polarity, int attempts, int delay, u32 *valp);
610static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
611 int polarity, int attempts, int delay)
612{
613 return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
614 delay, NULL);
615}
616int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
617 unsigned int set);
618int t3_phy_reset(struct cphy *phy, int mmd, int wait);
619int t3_phy_advertise(struct cphy *phy, unsigned int advert);
620int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
621
622void t3_intr_enable(struct adapter *adapter);
623void t3_intr_disable(struct adapter *adapter);
624void t3_intr_clear(struct adapter *adapter);
625void t3_port_intr_enable(struct adapter *adapter, int idx);
626void t3_port_intr_disable(struct adapter *adapter, int idx);
627void t3_port_intr_clear(struct adapter *adapter, int idx);
628int t3_slow_intr_handler(struct adapter *adapter);
629int t3_phy_intr_handler(struct adapter *adapter);
630
631void t3_link_changed(struct adapter *adapter, int port_id);
632int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
633const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
634int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
635int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
636int t3_seeprom_wp(struct adapter *adapter, int enable);
637int t3_read_flash(struct adapter *adapter, unsigned int addr,
638 unsigned int nwords, u32 *data, int byte_oriented);
639int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
640int t3_get_fw_version(struct adapter *adapter, u32 *vers);
641int t3_check_fw_version(struct adapter *adapter);
642int t3_init_hw(struct adapter *adapter, u32 fw_params);
643void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
644void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
645int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
646 int reset);
647void t3_led_ready(struct adapter *adapter);
648void t3_fatal_err(struct adapter *adapter);
649void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
650void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
651 const u8 * cpus, const u16 *rspq);
652int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
653int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
654int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
655 unsigned int n, unsigned int *valp);
656int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
657 u64 *buf);
658
659int t3_mac_reset(struct cmac *mac);
660void t3b_pcs_reset(struct cmac *mac);
661int t3_mac_enable(struct cmac *mac, int which);
662int t3_mac_disable(struct cmac *mac, int which);
663int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
664int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
665int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
666int t3_mac_set_num_ucast(struct cmac *mac, int n);
667const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
668int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
669
670void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
671int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
672 unsigned int nroutes);
673void t3_mc5_intr_handler(struct mc5 *mc5);
674int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
675 u32 *buf);
676
677int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
678void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
679void t3_tp_set_offload_mode(struct adapter *adap, int enable);
680void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
681void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
682 unsigned short alpha[NCCTRL_WIN],
683 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
684void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
685void t3_get_cong_cntl_tab(struct adapter *adap,
686 unsigned short incr[NMTUS][NCCTRL_WIN]);
687void t3_config_trace_filter(struct adapter *adapter,
688 const struct trace_params *tp, int filter_index,
689 int invert, int enable);
690int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
691
692void t3_sge_prep(struct adapter *adap, struct sge_params *p);
693void t3_sge_init(struct adapter *adap, struct sge_params *p);
694int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
695 enum sge_context_type type, int respq, u64 base_addr,
696 unsigned int size, unsigned int token, int gen,
697 unsigned int cidx);
698int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
699 int gts_enable, u64 base_addr, unsigned int size,
700 unsigned int esize, unsigned int cong_thres, int gen,
701 unsigned int cidx);
702int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
703 int irq_vec_idx, u64 base_addr, unsigned int size,
704 unsigned int fl_thres, int gen, unsigned int cidx);
705int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
706 unsigned int size, int rspq, int ovfl_mode,
707 unsigned int credits, unsigned int credit_thres);
708int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
709int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
710int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
711int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
712int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
713int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
714int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
715int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
716int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
717 unsigned int credits);
718
719void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
720 int phy_addr, const struct mdio_ops *mdio_ops);
721void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
722 int phy_addr, const struct mdio_ops *mdio_ops);
723void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
724 int phy_addr, const struct mdio_ops *mdio_ops);
725void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
726 const struct mdio_ops *mdio_ops);
727void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
728 int phy_addr, const struct mdio_ops *mdio_ops);
729#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h
new file mode 100644
index 000000000000..2095ddacff78
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h
@@ -0,0 +1,164 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
33#define _CXGB3_OFFLOAD_CTL_DEFS_H
34
35enum {
36 GET_MAX_OUTSTANDING_WR,
37 GET_TX_MAX_CHUNK,
38 GET_TID_RANGE,
39 GET_STID_RANGE,
40 GET_RTBL_RANGE,
41 GET_L2T_CAPACITY,
42 GET_MTUS,
43 GET_WR_LEN,
44 GET_IFF_FROM_MAC,
45 GET_DDP_PARAMS,
46 GET_PORTS,
47
48 ULP_ISCSI_GET_PARAMS,
49 ULP_ISCSI_SET_PARAMS,
50
51 RDMA_GET_PARAMS,
52 RDMA_CQ_OP,
53 RDMA_CQ_SETUP,
54 RDMA_CQ_DISABLE,
55 RDMA_CTRL_QP_SETUP,
56 RDMA_GET_MEM,
57};
58
59/*
60 * Structure used to describe a TID range. Valid TIDs are [base, base+num).
61 */
62struct tid_range {
63 unsigned int base; /* first TID */
64 unsigned int num; /* number of TIDs in range */
65};
66
67/*
68 * Structure used to request the size and contents of the MTU table.
69 */
70struct mtutab {
71 unsigned int size; /* # of entries in the MTU table */
72 const unsigned short *mtus; /* the MTU table values */
73};
74
75struct net_device;
76
77/*
78 * Structure used to request the adapter net_device owning a given MAC address.
79 */
80struct iff_mac {
81 struct net_device *dev; /* the net_device */
82 const unsigned char *mac_addr; /* MAC address to lookup */
83 u16 vlan_tag;
84};
85
86struct pci_dev;
87
88/*
89 * Structure used to request the TCP DDP parameters.
90 */
91struct ddp_params {
92 unsigned int llimit; /* TDDP region start address */
93 unsigned int ulimit; /* TDDP region end address */
94 unsigned int tag_mask; /* TDDP tag mask */
95 struct pci_dev *pdev;
96};
97
98struct adap_ports {
99 unsigned int nports; /* number of ports on this adapter */
100 struct net_device *lldevs[2];
101};
102
103/*
104 * Structure used to return information to the iscsi layer.
105 */
106struct ulp_iscsi_info {
107 unsigned int offset;
108 unsigned int llimit;
109 unsigned int ulimit;
110 unsigned int tagmask;
111 unsigned int pgsz3;
112 unsigned int pgsz2;
113 unsigned int pgsz1;
114 unsigned int pgsz0;
115 unsigned int max_rxsz;
116 unsigned int max_txsz;
117 struct pci_dev *pdev;
118};
119
120/*
121 * Structure used to return information to the RDMA layer.
122 */
123struct rdma_info {
124 unsigned int tpt_base; /* TPT base address */
125 unsigned int tpt_top; /* TPT last entry address */
126 unsigned int pbl_base; /* PBL base address */
127 unsigned int pbl_top; /* PBL last entry address */
128 unsigned int rqt_base; /* RQT base address */
129 unsigned int rqt_top; /* RQT last entry address */
130 unsigned int udbell_len; /* user doorbell region length */
131 unsigned long udbell_physbase; /* user doorbell physical start addr */
132 void __iomem *kdb_addr; /* kernel doorbell register address */
133 struct pci_dev *pdev; /* associated PCI device */
134};
135
136/*
137 * Structure used to request an operation on an RDMA completion queue.
138 */
139struct rdma_cq_op {
140 unsigned int id;
141 unsigned int op;
142 unsigned int credits;
143};
144
145/*
146 * Structure used to setup RDMA completion queues.
147 */
148struct rdma_cq_setup {
149 unsigned int id;
150 unsigned long long base_addr;
151 unsigned int size;
152 unsigned int credits;
153 unsigned int credit_thres;
154 unsigned int ovfl_mode;
155};
156
157/*
158 * Structure used to setup the RDMA control egress context.
159 */
160struct rdma_ctrlqp_setup {
161 unsigned long long base_addr;
162 unsigned int size;
163};
164#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
new file mode 100644
index 000000000000..16e004990c59
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CHELSIO_DEFS_H
34#define _CHELSIO_DEFS_H
35
36#include <linux/skbuff.h>
37#include <net/tcp.h>
38
39#include "t3cdev.h"
40
41#include "cxgb3_offload.h"
42
43#define VALIDATE_TID 1
44
45void *cxgb_alloc_mem(unsigned long size);
46void cxgb_free_mem(void *addr);
47void cxgb_neigh_update(struct neighbour *neigh);
48void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
49
50/*
51 * Map an ATID or STID to their entries in the corresponding TID tables.
52 */
53static inline union active_open_entry *atid2entry(const struct tid_info *t,
54 unsigned int atid)
55{
56 return &t->atid_tab[atid - t->atid_base];
57}
58
59static inline union listen_entry *stid2entry(const struct tid_info *t,
60 unsigned int stid)
61{
62 return &t->stid_tab[stid - t->stid_base];
63}
64
65/*
66 * Find the connection corresponding to a TID.
67 */
68static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
69 unsigned int tid)
70{
71 return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
72}
73
74/*
75 * Find the connection corresponding to a server TID.
76 */
77static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
78 unsigned int tid)
79{
80 if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
81 return NULL;
82 return &(stid2entry(t, tid)->t3c_tid);
83}
84
85/*
86 * Find the connection corresponding to an active-open TID.
87 */
88static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
89 unsigned int tid)
90{
91 if (tid < t->atid_base || tid >= t->atid_base + t->natids)
92 return NULL;
93 return &(atid2entry(t, tid)->t3c_tid);
94}
95
96int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n);
97int attach_t3cdev(struct t3cdev *dev);
98void detach_t3cdev(struct t3cdev *dev);
99#endif
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
new file mode 100644
index 000000000000..a94281861a66
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -0,0 +1,185 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __CHIOCTL_H__
33#define __CHIOCTL_H__
34
35/*
36 * Ioctl commands specific to this driver.
37 */
38enum {
39 CHELSIO_SETREG = 1024,
40 CHELSIO_GETREG,
41 CHELSIO_SETTPI,
42 CHELSIO_GETTPI,
43 CHELSIO_GETMTUTAB,
44 CHELSIO_SETMTUTAB,
45 CHELSIO_GETMTU,
46 CHELSIO_SET_PM,
47 CHELSIO_GET_PM,
48 CHELSIO_GET_TCAM,
49 CHELSIO_SET_TCAM,
50 CHELSIO_GET_TCB,
51 CHELSIO_GET_MEM,
52 CHELSIO_LOAD_FW,
53 CHELSIO_GET_PROTO,
54 CHELSIO_SET_PROTO,
55 CHELSIO_SET_TRACE_FILTER,
56 CHELSIO_SET_QSET_PARAMS,
57 CHELSIO_GET_QSET_PARAMS,
58 CHELSIO_SET_QSET_NUM,
59 CHELSIO_GET_QSET_NUM,
60 CHELSIO_SET_PKTSCHED,
61};
62
63struct ch_reg {
64 uint32_t cmd;
65 uint32_t addr;
66 uint32_t val;
67};
68
69struct ch_cntxt {
70 uint32_t cmd;
71 uint32_t cntxt_type;
72 uint32_t cntxt_id;
73 uint32_t data[4];
74};
75
76/* context types */
77enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
78
79struct ch_desc {
80 uint32_t cmd;
81 uint32_t queue_num;
82 uint32_t idx;
83 uint32_t size;
84 uint8_t data[128];
85};
86
87struct ch_mem_range {
88 uint32_t cmd;
89 uint32_t mem_id;
90 uint32_t addr;
91 uint32_t len;
92 uint32_t version;
93 uint8_t buf[0];
94};
95
96struct ch_qset_params {
97 uint32_t cmd;
98 uint32_t qset_idx;
99 int32_t txq_size[3];
100 int32_t rspq_size;
101 int32_t fl_size[2];
102 int32_t intr_lat;
103 int32_t polling;
104 int32_t cong_thres;
105};
106
107struct ch_pktsched_params {
108 uint32_t cmd;
109 uint8_t sched;
110 uint8_t idx;
111 uint8_t min;
112 uint8_t max;
113 uint8_t binding;
114};
115
116#ifndef TCB_SIZE
117# define TCB_SIZE 128
118#endif
119
120/* TCB size in 32-bit words */
121#define TCB_WORDS (TCB_SIZE / 4)
122
123enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
124
125struct ch_mtus {
126 uint32_t cmd;
127 uint32_t nmtus;
128 uint16_t mtus[NMTUS];
129};
130
131struct ch_pm {
132 uint32_t cmd;
133 uint32_t tx_pg_sz;
134 uint32_t tx_num_pg;
135 uint32_t rx_pg_sz;
136 uint32_t rx_num_pg;
137 uint32_t pm_total;
138};
139
140struct ch_tcam {
141 uint32_t cmd;
142 uint32_t tcam_size;
143 uint32_t nservers;
144 uint32_t nroutes;
145 uint32_t nfilters;
146};
147
148struct ch_tcb {
149 uint32_t cmd;
150 uint32_t tcb_index;
151 uint32_t tcb_data[TCB_WORDS];
152};
153
154struct ch_tcam_word {
155 uint32_t cmd;
156 uint32_t addr;
157 uint32_t buf[3];
158};
159
160struct ch_trace {
161 uint32_t cmd;
162 uint32_t sip;
163 uint32_t sip_mask;
164 uint32_t dip;
165 uint32_t dip_mask;
166 uint16_t sport;
167 uint16_t sport_mask;
168 uint16_t dport;
169 uint16_t dport_mask;
170 uint32_t vlan:12;
171 uint32_t vlan_mask:12;
172 uint32_t intf:4;
173 uint32_t intf_mask:4;
174 uint8_t proto;
175 uint8_t proto_mask;
176 uint8_t invert_match:1;
177 uint8_t config_tx:1;
178 uint8_t config_rx:1;
179 uint8_t trace_tx:1;
180 uint8_t trace_rx:1;
181};
182
183#define SIOCCHIOCTL SIOCDEVPRIVATE
184
185#endif
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
new file mode 100644
index 000000000000..dfa035a1ad45
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -0,0 +1,2515 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
45#include <asm/uaccess.h>
46
47#include "common.h"
48#include "cxgb3_ioctl.h"
49#include "regs.h"
50#include "cxgb3_offload.h"
51#include "version.h"
52
53#include "cxgb3_ctl_defs.h"
54#include "t3_cpl.h"
55#include "firmware_exports.h"
56
57enum {
58 MAX_TXQ_ENTRIES = 16384,
59 MAX_CTRL_TXQ_ENTRIES = 1024,
60 MAX_RSPQ_ENTRIES = 16384,
61 MAX_RX_BUFFERS = 16384,
62 MAX_RX_JUMBO_BUFFERS = 16384,
63 MIN_TXQ_ENTRIES = 4,
64 MIN_CTRL_TXQ_ENTRIES = 4,
65 MIN_RSPQ_ENTRIES = 32,
66 MIN_FL_ENTRIES = 32
67};
68
69#define PORT_MASK ((1 << MAX_NPORTS) - 1)
70
71#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
74
75#define EEPROM_MAGIC 0x38E2F10C
76
77#define to_net_dev(class) container_of(class, struct net_device, class_dev)
78
79#define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
98MODULE_LICENSE("Dual BSD/GPL");
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
189
190 /* Skip changes from disabled ports. */
191 if (!netif_running(dev))
192 return;
193
194 if (link_stat != netif_carrier_ok(dev)) {
195 if (link_stat)
196 netif_carrier_on(dev);
197 else
198 netif_carrier_off(dev);
199 link_report(dev);
200 }
201}
202
203static void cxgb_set_rxmode(struct net_device *dev)
204{
205 struct t3_rx_mode rm;
206 struct port_info *pi = netdev_priv(dev);
207
208 init_rx_mode(&rm, dev, dev->mc_list);
209 t3_mac_set_rx_mode(&pi->mac, &rm);
210}
211
212/**
213 * link_start - enable a port
214 * @dev: the device to enable
215 *
216 * Performs the MAC and PHY actions needed to enable a port.
217 */
218static void link_start(struct net_device *dev)
219{
220 struct t3_rx_mode rm;
221 struct port_info *pi = netdev_priv(dev);
222 struct cmac *mac = &pi->mac;
223
224 init_rx_mode(&rm, dev, dev->mc_list);
225 t3_mac_reset(mac);
226 t3_mac_set_mtu(mac, dev->mtu);
227 t3_mac_set_address(mac, 0, dev->dev_addr);
228 t3_mac_set_rx_mode(mac, &rm);
229 t3_link_start(&pi->phy, mac, &pi->link_config);
230 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
231}
232
233static inline void cxgb_disable_msi(struct adapter *adapter)
234{
235 if (adapter->flags & USING_MSIX) {
236 pci_disable_msix(adapter->pdev);
237 adapter->flags &= ~USING_MSIX;
238 } else if (adapter->flags & USING_MSI) {
239 pci_disable_msi(adapter->pdev);
240 adapter->flags &= ~USING_MSI;
241 }
242}
243
244/*
245 * Interrupt handler for asynchronous events used with MSI-X.
246 */
247static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
248{
249 t3_slow_intr_handler(cookie);
250 return IRQ_HANDLED;
251}
252
253/*
254 * Name the MSI-X interrupts.
255 */
256static void name_msix_vecs(struct adapter *adap)
257{
258 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
259
260 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
261 adap->msix_info[0].desc[n] = 0;
262
263 for_each_port(adap, j) {
264 struct net_device *d = adap->port[j];
265 const struct port_info *pi = netdev_priv(d);
266
267 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
268 snprintf(adap->msix_info[msi_idx].desc, n,
269 "%s (queue %d)", d->name, i);
270 adap->msix_info[msi_idx].desc[n] = 0;
271 }
272 }
273}
274
275static int request_msix_data_irqs(struct adapter *adap)
276{
277 int i, j, err, qidx = 0;
278
279 for_each_port(adap, i) {
280 int nqsets = adap2pinfo(adap, i)->nqsets;
281
282 for (j = 0; j < nqsets; ++j) {
283 err = request_irq(adap->msix_info[qidx + 1].vec,
284 t3_intr_handler(adap,
285 adap->sge.qs[qidx].
286 rspq.polling), 0,
287 adap->msix_info[qidx + 1].desc,
288 &adap->sge.qs[qidx]);
289 if (err) {
290 while (--qidx >= 0)
291 free_irq(adap->msix_info[qidx + 1].vec,
292 &adap->sge.qs[qidx]);
293 return err;
294 }
295 qidx++;
296 }
297 }
298 return 0;
299}
300
301/**
302 * setup_rss - configure RSS
303 * @adap: the adapter
304 *
305 * Sets up RSS to distribute packets to multiple receive queues. We
306 * configure the RSS CPU lookup table to distribute to the number of HW
307 * receive queues, and the response queue lookup table to narrow that
308 * down to the response queues actually configured for each port.
309 * We always configure the RSS mapping for two ports since the mapping
310 * table has plenty of entries.
311 */
312static void setup_rss(struct adapter *adap)
313{
314 int i;
315 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
316 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
317 u8 cpus[SGE_QSETS + 1];
318 u16 rspq_map[RSS_TABLE_SIZE];
319
320 for (i = 0; i < SGE_QSETS; ++i)
321 cpus[i] = i;
322 cpus[SGE_QSETS] = 0xff; /* terminator */
323
324 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
325 rspq_map[i] = i % nq0;
326 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
327 }
328
329 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
330 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
331 V_RRCPLCPUSIZE(6), cpus, rspq_map);
332}
333
334/*
335 * If we have multiple receive queues per port serviced by NAPI we need one
336 * netdevice per queue as NAPI operates on netdevices. We already have one
337 * netdevice, namely the one associated with the interface, so we use dummy
338 * ones for any additional queues. Note that these netdevices exist purely
339 * so that NAPI has something to work with, they do not represent network
340 * ports and are not registered.
341 */
342static int init_dummy_netdevs(struct adapter *adap)
343{
344 int i, j, dummy_idx = 0;
345 struct net_device *nd;
346
347 for_each_port(adap, i) {
348 struct net_device *dev = adap->port[i];
349 const struct port_info *pi = netdev_priv(dev);
350
351 for (j = 0; j < pi->nqsets - 1; j++) {
352 if (!adap->dummy_netdev[dummy_idx]) {
353 nd = alloc_netdev(0, "", ether_setup);
354 if (!nd)
355 goto free_all;
356
357 nd->priv = adap;
358 nd->weight = 64;
359 set_bit(__LINK_STATE_START, &nd->state);
360 adap->dummy_netdev[dummy_idx] = nd;
361 }
362 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
363 dummy_idx++;
364 }
365 }
366 return 0;
367
368free_all:
369 while (--dummy_idx >= 0) {
370 free_netdev(adap->dummy_netdev[dummy_idx]);
371 adap->dummy_netdev[dummy_idx] = NULL;
372 }
373 return -ENOMEM;
374}
375
376/*
377 * Wait until all NAPI handlers are descheduled. This includes the handlers of
378 * both netdevices representing interfaces and the dummy ones for the extra
379 * queues.
380 */
381static void quiesce_rx(struct adapter *adap)
382{
383 int i;
384 struct net_device *dev;
385
386 for_each_port(adap, i) {
387 dev = adap->port[i];
388 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
389 msleep(1);
390 }
391
392 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
393 dev = adap->dummy_netdev[i];
394 if (dev)
395 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
396 msleep(1);
397 }
398}
399
400/**
401 * setup_sge_qsets - configure SGE Tx/Rx/response queues
402 * @adap: the adapter
403 *
404 * Determines how many sets of SGE queues to use and initializes them.
405 * We support multiple queue sets per port if we have MSI-X, otherwise
406 * just one queue set per port.
407 */
408static int setup_sge_qsets(struct adapter *adap)
409{
410 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
411 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
412
413 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
414 irq_idx = -1;
415
416 for_each_port(adap, i) {
417 struct net_device *dev = adap->port[i];
418 const struct port_info *pi = netdev_priv(dev);
419
420 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
421 err = t3_sge_alloc_qset(adap, qset_idx, 1,
422 (adap->flags & USING_MSIX) ? qset_idx + 1 :
423 irq_idx,
424 &adap->params.sge.qset[qset_idx], ntxq,
425 j == 0 ? dev :
426 adap-> dummy_netdev[dummy_dev_idx++]);
427 if (err) {
428 t3_free_sge_resources(adap);
429 return err;
430 }
431 }
432 }
433
434 return 0;
435}
436
437static ssize_t attr_show(struct class_device *cd, char *buf,
438 ssize_t(*format) (struct adapter *, char *))
439{
440 ssize_t len;
441 struct adapter *adap = to_net_dev(cd)->priv;
442
443 /* Synchronize with ioctls that may shut down the device */
444 rtnl_lock();
445 len = (*format) (adap, buf);
446 rtnl_unlock();
447 return len;
448}
449
450static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
451 ssize_t(*set) (struct adapter *, unsigned int),
452 unsigned int min_val, unsigned int max_val)
453{
454 char *endp;
455 ssize_t ret;
456 unsigned int val;
457 struct adapter *adap = to_net_dev(cd)->priv;
458
459 if (!capable(CAP_NET_ADMIN))
460 return -EPERM;
461
462 val = simple_strtoul(buf, &endp, 0);
463 if (endp == buf || val < min_val || val > max_val)
464 return -EINVAL;
465
466 rtnl_lock();
467 ret = (*set) (adap, val);
468 if (!ret)
469 ret = len;
470 rtnl_unlock();
471 return ret;
472}
473
474#define CXGB3_SHOW(name, val_expr) \
475static ssize_t format_##name(struct adapter *adap, char *buf) \
476{ \
477 return sprintf(buf, "%u\n", val_expr); \
478} \
479static ssize_t show_##name(struct class_device *cd, char *buf) \
480{ \
481 return attr_show(cd, buf, format_##name); \
482}
483
484static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
485{
486 if (adap->flags & FULL_INIT_DONE)
487 return -EBUSY;
488 if (val && adap->params.rev == 0)
489 return -EINVAL;
490 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
491 return -EINVAL;
492 adap->params.mc5.nfilters = val;
493 return 0;
494}
495
496static ssize_t store_nfilters(struct class_device *cd, const char *buf,
497 size_t len)
498{
499 return attr_store(cd, buf, len, set_nfilters, 0, ~0);
500}
501
502static ssize_t set_nservers(struct adapter *adap, unsigned int val)
503{
504 if (adap->flags & FULL_INIT_DONE)
505 return -EBUSY;
506 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
507 return -EINVAL;
508 adap->params.mc5.nservers = val;
509 return 0;
510}
511
512static ssize_t store_nservers(struct class_device *cd, const char *buf,
513 size_t len)
514{
515 return attr_store(cd, buf, len, set_nservers, 0, ~0);
516}
517
518#define CXGB3_ATTR_R(name, val_expr) \
519CXGB3_SHOW(name, val_expr) \
520static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
521
522#define CXGB3_ATTR_RW(name, val_expr, store_method) \
523CXGB3_SHOW(name, val_expr) \
524static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
525
526CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
527CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
528CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
529
530static struct attribute *cxgb3_attrs[] = {
531 &class_device_attr_cam_size.attr,
532 &class_device_attr_nfilters.attr,
533 &class_device_attr_nservers.attr,
534 NULL
535};
536
537static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
538
539static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
540{
541 ssize_t len;
542 unsigned int v, addr, bpt, cpt;
543 struct adapter *adap = to_net_dev(cd)->priv;
544
545 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
546 rtnl_lock();
547 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
548 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
549 if (sched & 1)
550 v >>= 16;
551 bpt = (v >> 8) & 0xff;
552 cpt = v & 0xff;
553 if (!cpt)
554 len = sprintf(buf, "disabled\n");
555 else {
556 v = (adap->params.vpd.cclk * 1000) / cpt;
557 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
558 }
559 rtnl_unlock();
560 return len;
561}
562
563static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
564 size_t len, int sched)
565{
566 char *endp;
567 ssize_t ret;
568 unsigned int val;
569 struct adapter *adap = to_net_dev(cd)->priv;
570
571 if (!capable(CAP_NET_ADMIN))
572 return -EPERM;
573
574 val = simple_strtoul(buf, &endp, 0);
575 if (endp == buf || val > 10000000)
576 return -EINVAL;
577
578 rtnl_lock();
579 ret = t3_config_sched(adap, val, sched);
580 if (!ret)
581 ret = len;
582 rtnl_unlock();
583 return ret;
584}
585
586#define TM_ATTR(name, sched) \
587static ssize_t show_##name(struct class_device *cd, char *buf) \
588{ \
589 return tm_attr_show(cd, buf, sched); \
590} \
591static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
592{ \
593 return tm_attr_store(cd, buf, len, sched); \
594} \
595static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
596
597TM_ATTR(sched0, 0);
598TM_ATTR(sched1, 1);
599TM_ATTR(sched2, 2);
600TM_ATTR(sched3, 3);
601TM_ATTR(sched4, 4);
602TM_ATTR(sched5, 5);
603TM_ATTR(sched6, 6);
604TM_ATTR(sched7, 7);
605
606static struct attribute *offload_attrs[] = {
607 &class_device_attr_sched0.attr,
608 &class_device_attr_sched1.attr,
609 &class_device_attr_sched2.attr,
610 &class_device_attr_sched3.attr,
611 &class_device_attr_sched4.attr,
612 &class_device_attr_sched5.attr,
613 &class_device_attr_sched6.attr,
614 &class_device_attr_sched7.attr,
615 NULL
616};
617
618static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
619
620/*
621 * Sends an sk_buff to an offload queue driver
622 * after dealing with any active network taps.
623 */
624static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
625{
626 int ret;
627
628 local_bh_disable();
629 ret = t3_offload_tx(tdev, skb);
630 local_bh_enable();
631 return ret;
632}
633
634static int write_smt_entry(struct adapter *adapter, int idx)
635{
636 struct cpl_smt_write_req *req;
637 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
638
639 if (!skb)
640 return -ENOMEM;
641
642 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
643 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
644 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
645 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
646 req->iff = idx;
647 memset(req->src_mac1, 0, sizeof(req->src_mac1));
648 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
649 skb->priority = 1;
650 offload_tx(&adapter->tdev, skb);
651 return 0;
652}
653
654static int init_smt(struct adapter *adapter)
655{
656 int i;
657
658 for_each_port(adapter, i)
659 write_smt_entry(adapter, i);
660 return 0;
661}
662
663static void init_port_mtus(struct adapter *adapter)
664{
665 unsigned int mtus = adapter->port[0]->mtu;
666
667 if (adapter->port[1])
668 mtus |= adapter->port[1]->mtu << 16;
669 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
670}
671
672static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
673 int hi, int port)
674{
675 struct sk_buff *skb;
676 struct mngt_pktsched_wr *req;
677
678 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
679 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
680 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
681 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
682 req->sched = sched;
683 req->idx = qidx;
684 req->min = lo;
685 req->max = hi;
686 req->binding = port;
687 t3_mgmt_tx(adap, skb);
688}
689
690static void bind_qsets(struct adapter *adap)
691{
692 int i, j;
693
694 for_each_port(adap, i) {
695 const struct port_info *pi = adap2pinfo(adap, i);
696
697 for (j = 0; j < pi->nqsets; ++j)
698 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
699 -1, i);
700 }
701}
702
703/**
704 * cxgb_up - enable the adapter
705 * @adapter: adapter being enabled
706 *
707 * Called when the first port is enabled, this function performs the
708 * actions necessary to make an adapter operational, such as completing
709 * the initialization of HW modules, and enabling interrupts.
710 *
711 * Must be called with the rtnl lock held.
712 */
713static int cxgb_up(struct adapter *adap)
714{
715 int err = 0;
716
717 if (!(adap->flags & FULL_INIT_DONE)) {
718 err = t3_check_fw_version(adap);
719 if (err)
720 goto out;
721
722 err = init_dummy_netdevs(adap);
723 if (err)
724 goto out;
725
726 err = t3_init_hw(adap, 0);
727 if (err)
728 goto out;
729
730 err = setup_sge_qsets(adap);
731 if (err)
732 goto out;
733
734 setup_rss(adap);
735 adap->flags |= FULL_INIT_DONE;
736 }
737
738 t3_intr_clear(adap);
739
740 if (adap->flags & USING_MSIX) {
741 name_msix_vecs(adap);
742 err = request_irq(adap->msix_info[0].vec,
743 t3_async_intr_handler, 0,
744 adap->msix_info[0].desc, adap);
745 if (err)
746 goto irq_err;
747
748 if (request_msix_data_irqs(adap)) {
749 free_irq(adap->msix_info[0].vec, adap);
750 goto irq_err;
751 }
752 } else if ((err = request_irq(adap->pdev->irq,
753 t3_intr_handler(adap,
754 adap->sge.qs[0].rspq.
755 polling),
756 (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
757 adap->name, adap)))
758 goto irq_err;
759
760 t3_sge_start(adap);
761 t3_intr_enable(adap);
762
763 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
764 bind_qsets(adap);
765 adap->flags |= QUEUES_BOUND;
766
767out:
768 return err;
769irq_err:
770 CH_ERR(adap, "request_irq failed, err %d\n", err);
771 goto out;
772}
773
774/*
775 * Release resources when all the ports and offloading have been stopped.
776 */
777static void cxgb_down(struct adapter *adapter)
778{
779 t3_sge_stop(adapter);
780 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
781 t3_intr_disable(adapter);
782 spin_unlock_irq(&adapter->work_lock);
783
784 if (adapter->flags & USING_MSIX) {
785 int i, n = 0;
786
787 free_irq(adapter->msix_info[0].vec, adapter);
788 for_each_port(adapter, i)
789 n += adap2pinfo(adapter, i)->nqsets;
790
791 for (i = 0; i < n; ++i)
792 free_irq(adapter->msix_info[i + 1].vec,
793 &adapter->sge.qs[i]);
794 } else
795 free_irq(adapter->pdev->irq, adapter);
796
797 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
798 quiesce_rx(adapter);
799}
800
801static void schedule_chk_task(struct adapter *adap)
802{
803 unsigned int timeo;
804
805 timeo = adap->params.linkpoll_period ?
806 (HZ * adap->params.linkpoll_period) / 10 :
807 adap->params.stats_update_period * HZ;
808 if (timeo)
809 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
810}
811
812static int offload_open(struct net_device *dev)
813{
814 struct adapter *adapter = dev->priv;
815 struct t3cdev *tdev = T3CDEV(dev);
816 int adap_up = adapter->open_device_map & PORT_MASK;
817 int err = 0;
818
819 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
820 return 0;
821
822 if (!adap_up && (err = cxgb_up(adapter)) < 0)
823 return err;
824
825 t3_tp_set_offload_mode(adapter, 1);
826 tdev->lldev = adapter->port[0];
827 err = cxgb3_offload_activate(adapter);
828 if (err)
829 goto out;
830
831 init_port_mtus(adapter);
832 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
833 adapter->params.b_wnd,
834 adapter->params.rev == 0 ?
835 adapter->port[0]->mtu : 0xffff);
836 init_smt(adapter);
837
838 /* Never mind if the next step fails */
839 sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
840
841 /* Call back all registered clients */
842 cxgb3_add_clients(tdev);
843
844out:
845 /* restore them in case the offload module has changed them */
846 if (err) {
847 t3_tp_set_offload_mode(adapter, 0);
848 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
849 cxgb3_set_dummy_ops(tdev);
850 }
851 return err;
852}
853
854static int offload_close(struct t3cdev *tdev)
855{
856 struct adapter *adapter = tdev2adap(tdev);
857
858 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
859 return 0;
860
861 /* Call back all registered clients */
862 cxgb3_remove_clients(tdev);
863
864 sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
865
866 tdev->lldev = NULL;
867 cxgb3_set_dummy_ops(tdev);
868 t3_tp_set_offload_mode(adapter, 0);
869 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
870
871 if (!adapter->open_device_map)
872 cxgb_down(adapter);
873
874 cxgb3_offload_deactivate(adapter);
875 return 0;
876}
877
878static int cxgb_open(struct net_device *dev)
879{
880 int err;
881 struct adapter *adapter = dev->priv;
882 struct port_info *pi = netdev_priv(dev);
883 int other_ports = adapter->open_device_map & PORT_MASK;
884
885 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
886 return err;
887
888 set_bit(pi->port_id, &adapter->open_device_map);
889 if (!ofld_disable) {
890 err = offload_open(dev);
891 if (err)
892 printk(KERN_WARNING
893 "Could not initialize offload capabilities\n");
894 }
895
896 link_start(dev);
897 t3_port_intr_enable(adapter, pi->port_id);
898 netif_start_queue(dev);
899 if (!other_ports)
900 schedule_chk_task(adapter);
901
902 return 0;
903}
904
905static int cxgb_close(struct net_device *dev)
906{
907 struct adapter *adapter = dev->priv;
908 struct port_info *p = netdev_priv(dev);
909
910 t3_port_intr_disable(adapter, p->port_id);
911 netif_stop_queue(dev);
912 p->phy.ops->power_down(&p->phy, 1);
913 netif_carrier_off(dev);
914 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
915
916 spin_lock(&adapter->work_lock); /* sync with update task */
917 clear_bit(p->port_id, &adapter->open_device_map);
918 spin_unlock(&adapter->work_lock);
919
920 if (!(adapter->open_device_map & PORT_MASK))
921 cancel_rearming_delayed_workqueue(cxgb3_wq,
922 &adapter->adap_check_task);
923
924 if (!adapter->open_device_map)
925 cxgb_down(adapter);
926
927 return 0;
928}
929
930static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
931{
932 struct adapter *adapter = dev->priv;
933 struct port_info *p = netdev_priv(dev);
934 struct net_device_stats *ns = &p->netstats;
935 const struct mac_stats *pstats;
936
937 spin_lock(&adapter->stats_lock);
938 pstats = t3_mac_update_stats(&p->mac);
939 spin_unlock(&adapter->stats_lock);
940
941 ns->tx_bytes = pstats->tx_octets;
942 ns->tx_packets = pstats->tx_frames;
943 ns->rx_bytes = pstats->rx_octets;
944 ns->rx_packets = pstats->rx_frames;
945 ns->multicast = pstats->rx_mcast_frames;
946
947 ns->tx_errors = pstats->tx_underrun;
948 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
949 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
950 pstats->rx_fifo_ovfl;
951
952 /* detailed rx_errors */
953 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
954 ns->rx_over_errors = 0;
955 ns->rx_crc_errors = pstats->rx_fcs_errs;
956 ns->rx_frame_errors = pstats->rx_symbol_errs;
957 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
958 ns->rx_missed_errors = pstats->rx_cong_drops;
959
960 /* detailed tx_errors */
961 ns->tx_aborted_errors = 0;
962 ns->tx_carrier_errors = 0;
963 ns->tx_fifo_errors = pstats->tx_underrun;
964 ns->tx_heartbeat_errors = 0;
965 ns->tx_window_errors = 0;
966 return ns;
967}
968
969static u32 get_msglevel(struct net_device *dev)
970{
971 struct adapter *adapter = dev->priv;
972
973 return adapter->msg_enable;
974}
975
976static void set_msglevel(struct net_device *dev, u32 val)
977{
978 struct adapter *adapter = dev->priv;
979
980 adapter->msg_enable = val;
981}
982
983static char stats_strings[][ETH_GSTRING_LEN] = {
984 "TxOctetsOK ",
985 "TxFramesOK ",
986 "TxMulticastFramesOK",
987 "TxBroadcastFramesOK",
988 "TxPauseFrames ",
989 "TxUnderrun ",
990 "TxExtUnderrun ",
991
992 "TxFrames64 ",
993 "TxFrames65To127 ",
994 "TxFrames128To255 ",
995 "TxFrames256To511 ",
996 "TxFrames512To1023 ",
997 "TxFrames1024To1518 ",
998 "TxFrames1519ToMax ",
999
1000 "RxOctetsOK ",
1001 "RxFramesOK ",
1002 "RxMulticastFramesOK",
1003 "RxBroadcastFramesOK",
1004 "RxPauseFrames ",
1005 "RxFCSErrors ",
1006 "RxSymbolErrors ",
1007 "RxShortErrors ",
1008 "RxJabberErrors ",
1009 "RxLengthErrors ",
1010 "RxFIFOoverflow ",
1011
1012 "RxFrames64 ",
1013 "RxFrames65To127 ",
1014 "RxFrames128To255 ",
1015 "RxFrames256To511 ",
1016 "RxFrames512To1023 ",
1017 "RxFrames1024To1518 ",
1018 "RxFrames1519ToMax ",
1019
1020 "PhyFIFOErrors ",
1021 "TSO ",
1022 "VLANextractions ",
1023 "VLANinsertions ",
1024 "TxCsumOffload ",
1025 "RxCsumGood ",
1026 "RxDrops "
1027};
1028
1029static int get_stats_count(struct net_device *dev)
1030{
1031 return ARRAY_SIZE(stats_strings);
1032}
1033
1034#define T3_REGMAP_SIZE (3 * 1024)
1035
1036static int get_regs_len(struct net_device *dev)
1037{
1038 return T3_REGMAP_SIZE;
1039}
1040
1041static int get_eeprom_len(struct net_device *dev)
1042{
1043 return EEPROMSIZE;
1044}
1045
1046static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1047{
1048 u32 fw_vers = 0;
1049 struct adapter *adapter = dev->priv;
1050
1051 t3_get_fw_version(adapter, &fw_vers);
1052
1053 strcpy(info->driver, DRV_NAME);
1054 strcpy(info->version, DRV_VERSION);
1055 strcpy(info->bus_info, pci_name(adapter->pdev));
1056 if (!fw_vers)
1057 strcpy(info->fw_version, "N/A");
1058 else {
1059 snprintf(info->fw_version, sizeof(info->fw_version),
1060 "%s %u.%u.%u",
1061 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1062 G_FW_VERSION_MAJOR(fw_vers),
1063 G_FW_VERSION_MINOR(fw_vers),
1064 G_FW_VERSION_MICRO(fw_vers));
1065 }
1066}
1067
1068static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1069{
1070 if (stringset == ETH_SS_STATS)
1071 memcpy(data, stats_strings, sizeof(stats_strings));
1072}
1073
1074static unsigned long collect_sge_port_stats(struct adapter *adapter,
1075 struct port_info *p, int idx)
1076{
1077 int i;
1078 unsigned long tot = 0;
1079
1080 for (i = 0; i < p->nqsets; ++i)
1081 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1082 return tot;
1083}
1084
1085static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1086 u64 *data)
1087{
1088 struct adapter *adapter = dev->priv;
1089 struct port_info *pi = netdev_priv(dev);
1090 const struct mac_stats *s;
1091
1092 spin_lock(&adapter->stats_lock);
1093 s = t3_mac_update_stats(&pi->mac);
1094 spin_unlock(&adapter->stats_lock);
1095
1096 *data++ = s->tx_octets;
1097 *data++ = s->tx_frames;
1098 *data++ = s->tx_mcast_frames;
1099 *data++ = s->tx_bcast_frames;
1100 *data++ = s->tx_pause;
1101 *data++ = s->tx_underrun;
1102 *data++ = s->tx_fifo_urun;
1103
1104 *data++ = s->tx_frames_64;
1105 *data++ = s->tx_frames_65_127;
1106 *data++ = s->tx_frames_128_255;
1107 *data++ = s->tx_frames_256_511;
1108 *data++ = s->tx_frames_512_1023;
1109 *data++ = s->tx_frames_1024_1518;
1110 *data++ = s->tx_frames_1519_max;
1111
1112 *data++ = s->rx_octets;
1113 *data++ = s->rx_frames;
1114 *data++ = s->rx_mcast_frames;
1115 *data++ = s->rx_bcast_frames;
1116 *data++ = s->rx_pause;
1117 *data++ = s->rx_fcs_errs;
1118 *data++ = s->rx_symbol_errs;
1119 *data++ = s->rx_short;
1120 *data++ = s->rx_jabber;
1121 *data++ = s->rx_too_long;
1122 *data++ = s->rx_fifo_ovfl;
1123
1124 *data++ = s->rx_frames_64;
1125 *data++ = s->rx_frames_65_127;
1126 *data++ = s->rx_frames_128_255;
1127 *data++ = s->rx_frames_256_511;
1128 *data++ = s->rx_frames_512_1023;
1129 *data++ = s->rx_frames_1024_1518;
1130 *data++ = s->rx_frames_1519_max;
1131
1132 *data++ = pi->phy.fifo_errors;
1133
1134 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1135 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1136 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1137 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1138 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1139 *data++ = s->rx_cong_drops;
1140}
1141
1142static inline void reg_block_dump(struct adapter *ap, void *buf,
1143 unsigned int start, unsigned int end)
1144{
1145 u32 *p = buf + start;
1146
1147 for (; start <= end; start += sizeof(u32))
1148 *p++ = t3_read_reg(ap, start);
1149}
1150
1151static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1152 void *buf)
1153{
1154 struct adapter *ap = dev->priv;
1155
1156 /*
1157 * Version scheme:
1158 * bits 0..9: chip version
1159 * bits 10..15: chip revision
1160 * bit 31: set for PCIe cards
1161 */
1162 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1163
1164 /*
1165 * We skip the MAC statistics registers because they are clear-on-read.
1166 * Also reading multi-register stats would need to synchronize with the
1167 * periodic mac stats accumulation. Hard to justify the complexity.
1168 */
1169 memset(buf, 0, T3_REGMAP_SIZE);
1170 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1171 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1172 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1173 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1174 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1175 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1176 XGM_REG(A_XGM_SERDES_STAT3, 1));
1177 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1178 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1179}
1180
1181static int restart_autoneg(struct net_device *dev)
1182{
1183 struct port_info *p = netdev_priv(dev);
1184
1185 if (!netif_running(dev))
1186 return -EAGAIN;
1187 if (p->link_config.autoneg != AUTONEG_ENABLE)
1188 return -EINVAL;
1189 p->phy.ops->autoneg_restart(&p->phy);
1190 return 0;
1191}
1192
1193static int cxgb3_phys_id(struct net_device *dev, u32 data)
1194{
1195 int i;
1196 struct adapter *adapter = dev->priv;
1197
1198 if (data == 0)
1199 data = 2;
1200
1201 for (i = 0; i < data * 2; i++) {
1202 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1203 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1204 if (msleep_interruptible(500))
1205 break;
1206 }
1207 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1208 F_GPIO0_OUT_VAL);
1209 return 0;
1210}
1211
1212static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1213{
1214 struct port_info *p = netdev_priv(dev);
1215
1216 cmd->supported = p->link_config.supported;
1217 cmd->advertising = p->link_config.advertising;
1218
1219 if (netif_carrier_ok(dev)) {
1220 cmd->speed = p->link_config.speed;
1221 cmd->duplex = p->link_config.duplex;
1222 } else {
1223 cmd->speed = -1;
1224 cmd->duplex = -1;
1225 }
1226
1227 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1228 cmd->phy_address = p->phy.addr;
1229 cmd->transceiver = XCVR_EXTERNAL;
1230 cmd->autoneg = p->link_config.autoneg;
1231 cmd->maxtxpkt = 0;
1232 cmd->maxrxpkt = 0;
1233 return 0;
1234}
1235
1236static int speed_duplex_to_caps(int speed, int duplex)
1237{
1238 int cap = 0;
1239
1240 switch (speed) {
1241 case SPEED_10:
1242 if (duplex == DUPLEX_FULL)
1243 cap = SUPPORTED_10baseT_Full;
1244 else
1245 cap = SUPPORTED_10baseT_Half;
1246 break;
1247 case SPEED_100:
1248 if (duplex == DUPLEX_FULL)
1249 cap = SUPPORTED_100baseT_Full;
1250 else
1251 cap = SUPPORTED_100baseT_Half;
1252 break;
1253 case SPEED_1000:
1254 if (duplex == DUPLEX_FULL)
1255 cap = SUPPORTED_1000baseT_Full;
1256 else
1257 cap = SUPPORTED_1000baseT_Half;
1258 break;
1259 case SPEED_10000:
1260 if (duplex == DUPLEX_FULL)
1261 cap = SUPPORTED_10000baseT_Full;
1262 }
1263 return cap;
1264}
1265
1266#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1267 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1268 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1269 ADVERTISED_10000baseT_Full)
1270
1271static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1272{
1273 struct port_info *p = netdev_priv(dev);
1274 struct link_config *lc = &p->link_config;
1275
1276 if (!(lc->supported & SUPPORTED_Autoneg))
1277 return -EOPNOTSUPP; /* can't change speed/duplex */
1278
1279 if (cmd->autoneg == AUTONEG_DISABLE) {
1280 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1281
1282 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1283 return -EINVAL;
1284 lc->requested_speed = cmd->speed;
1285 lc->requested_duplex = cmd->duplex;
1286 lc->advertising = 0;
1287 } else {
1288 cmd->advertising &= ADVERTISED_MASK;
1289 cmd->advertising &= lc->supported;
1290 if (!cmd->advertising)
1291 return -EINVAL;
1292 lc->requested_speed = SPEED_INVALID;
1293 lc->requested_duplex = DUPLEX_INVALID;
1294 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1295 }
1296 lc->autoneg = cmd->autoneg;
1297 if (netif_running(dev))
1298 t3_link_start(&p->phy, &p->mac, lc);
1299 return 0;
1300}
1301
1302static void get_pauseparam(struct net_device *dev,
1303 struct ethtool_pauseparam *epause)
1304{
1305 struct port_info *p = netdev_priv(dev);
1306
1307 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1308 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1309 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1310}
1311
1312static int set_pauseparam(struct net_device *dev,
1313 struct ethtool_pauseparam *epause)
1314{
1315 struct port_info *p = netdev_priv(dev);
1316 struct link_config *lc = &p->link_config;
1317
1318 if (epause->autoneg == AUTONEG_DISABLE)
1319 lc->requested_fc = 0;
1320 else if (lc->supported & SUPPORTED_Autoneg)
1321 lc->requested_fc = PAUSE_AUTONEG;
1322 else
1323 return -EINVAL;
1324
1325 if (epause->rx_pause)
1326 lc->requested_fc |= PAUSE_RX;
1327 if (epause->tx_pause)
1328 lc->requested_fc |= PAUSE_TX;
1329 if (lc->autoneg == AUTONEG_ENABLE) {
1330 if (netif_running(dev))
1331 t3_link_start(&p->phy, &p->mac, lc);
1332 } else {
1333 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1334 if (netif_running(dev))
1335 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1336 }
1337 return 0;
1338}
1339
1340static u32 get_rx_csum(struct net_device *dev)
1341{
1342 struct port_info *p = netdev_priv(dev);
1343
1344 return p->rx_csum_offload;
1345}
1346
1347static int set_rx_csum(struct net_device *dev, u32 data)
1348{
1349 struct port_info *p = netdev_priv(dev);
1350
1351 p->rx_csum_offload = data;
1352 return 0;
1353}
1354
1355static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1356{
1357 struct adapter *adapter = dev->priv;
1358
1359 e->rx_max_pending = MAX_RX_BUFFERS;
1360 e->rx_mini_max_pending = 0;
1361 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1362 e->tx_max_pending = MAX_TXQ_ENTRIES;
1363
1364 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1365 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1366 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1367 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1368}
1369
1370static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1371{
1372 int i;
1373 struct adapter *adapter = dev->priv;
1374
1375 if (e->rx_pending > MAX_RX_BUFFERS ||
1376 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1377 e->tx_pending > MAX_TXQ_ENTRIES ||
1378 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1379 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1380 e->rx_pending < MIN_FL_ENTRIES ||
1381 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1382 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1383 return -EINVAL;
1384
1385 if (adapter->flags & FULL_INIT_DONE)
1386 return -EBUSY;
1387
1388 for (i = 0; i < SGE_QSETS; ++i) {
1389 struct qset_params *q = &adapter->params.sge.qset[i];
1390
1391 q->rspq_size = e->rx_mini_pending;
1392 q->fl_size = e->rx_pending;
1393 q->jumbo_size = e->rx_jumbo_pending;
1394 q->txq_size[0] = e->tx_pending;
1395 q->txq_size[1] = e->tx_pending;
1396 q->txq_size[2] = e->tx_pending;
1397 }
1398 return 0;
1399}
1400
1401static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1402{
1403 struct adapter *adapter = dev->priv;
1404 struct qset_params *qsp = &adapter->params.sge.qset[0];
1405 struct sge_qset *qs = &adapter->sge.qs[0];
1406
1407 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1408 return -EINVAL;
1409
1410 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1411 t3_update_qset_coalesce(qs, qsp);
1412 return 0;
1413}
1414
1415static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1416{
1417 struct adapter *adapter = dev->priv;
1418 struct qset_params *q = adapter->params.sge.qset;
1419
1420 c->rx_coalesce_usecs = q->coalesce_usecs;
1421 return 0;
1422}
1423
1424static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1425 u8 * data)
1426{
1427 int i, err = 0;
1428 struct adapter *adapter = dev->priv;
1429
1430 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1431 if (!buf)
1432 return -ENOMEM;
1433
1434 e->magic = EEPROM_MAGIC;
1435 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1436 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1437
1438 if (!err)
1439 memcpy(data, buf + e->offset, e->len);
1440 kfree(buf);
1441 return err;
1442}
1443
1444static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1445 u8 * data)
1446{
1447 u8 *buf;
1448 int err = 0;
1449 u32 aligned_offset, aligned_len, *p;
1450 struct adapter *adapter = dev->priv;
1451
1452 if (eeprom->magic != EEPROM_MAGIC)
1453 return -EINVAL;
1454
1455 aligned_offset = eeprom->offset & ~3;
1456 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1457
1458 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1459 buf = kmalloc(aligned_len, GFP_KERNEL);
1460 if (!buf)
1461 return -ENOMEM;
1462 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1463 if (!err && aligned_len > 4)
1464 err = t3_seeprom_read(adapter,
1465 aligned_offset + aligned_len - 4,
1466 (u32 *) & buf[aligned_len - 4]);
1467 if (err)
1468 goto out;
1469 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1470 } else
1471 buf = data;
1472
1473 err = t3_seeprom_wp(adapter, 0);
1474 if (err)
1475 goto out;
1476
1477 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1478 err = t3_seeprom_write(adapter, aligned_offset, *p);
1479 aligned_offset += 4;
1480 }
1481
1482 if (!err)
1483 err = t3_seeprom_wp(adapter, 1);
1484out:
1485 if (buf != data)
1486 kfree(buf);
1487 return err;
1488}
1489
1490static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1491{
1492 wol->supported = 0;
1493 wol->wolopts = 0;
1494 memset(&wol->sopass, 0, sizeof(wol->sopass));
1495}
1496
1497static const struct ethtool_ops cxgb_ethtool_ops = {
1498 .get_settings = get_settings,
1499 .set_settings = set_settings,
1500 .get_drvinfo = get_drvinfo,
1501 .get_msglevel = get_msglevel,
1502 .set_msglevel = set_msglevel,
1503 .get_ringparam = get_sge_param,
1504 .set_ringparam = set_sge_param,
1505 .get_coalesce = get_coalesce,
1506 .set_coalesce = set_coalesce,
1507 .get_eeprom_len = get_eeprom_len,
1508 .get_eeprom = get_eeprom,
1509 .set_eeprom = set_eeprom,
1510 .get_pauseparam = get_pauseparam,
1511 .set_pauseparam = set_pauseparam,
1512 .get_rx_csum = get_rx_csum,
1513 .set_rx_csum = set_rx_csum,
1514 .get_tx_csum = ethtool_op_get_tx_csum,
1515 .set_tx_csum = ethtool_op_set_tx_csum,
1516 .get_sg = ethtool_op_get_sg,
1517 .set_sg = ethtool_op_set_sg,
1518 .get_link = ethtool_op_get_link,
1519 .get_strings = get_strings,
1520 .phys_id = cxgb3_phys_id,
1521 .nway_reset = restart_autoneg,
1522 .get_stats_count = get_stats_count,
1523 .get_ethtool_stats = get_stats,
1524 .get_regs_len = get_regs_len,
1525 .get_regs = get_regs,
1526 .get_wol = get_wol,
1527 .get_tso = ethtool_op_get_tso,
1528 .set_tso = ethtool_op_set_tso,
1529 .get_perm_addr = ethtool_op_get_perm_addr
1530};
1531
1532static int in_range(int val, int lo, int hi)
1533{
1534 return val < 0 || (val <= hi && val >= lo);
1535}
1536
1537static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1538{
1539 int ret;
1540 u32 cmd;
1541 struct adapter *adapter = dev->priv;
1542
1543 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1544 return -EFAULT;
1545
1546 switch (cmd) {
1547 case CHELSIO_SETREG:{
1548 struct ch_reg edata;
1549
1550 if (!capable(CAP_NET_ADMIN))
1551 return -EPERM;
1552 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1553 return -EFAULT;
1554 if ((edata.addr & 3) != 0
1555 || edata.addr >= adapter->mmio_len)
1556 return -EINVAL;
1557 writel(edata.val, adapter->regs + edata.addr);
1558 break;
1559 }
1560 case CHELSIO_GETREG:{
1561 struct ch_reg edata;
1562
1563 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1564 return -EFAULT;
1565 if ((edata.addr & 3) != 0
1566 || edata.addr >= adapter->mmio_len)
1567 return -EINVAL;
1568 edata.val = readl(adapter->regs + edata.addr);
1569 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1570 return -EFAULT;
1571 break;
1572 }
1573 case CHELSIO_SET_QSET_PARAMS:{
1574 int i;
1575 struct qset_params *q;
1576 struct ch_qset_params t;
1577
1578 if (!capable(CAP_NET_ADMIN))
1579 return -EPERM;
1580 if (copy_from_user(&t, useraddr, sizeof(t)))
1581 return -EFAULT;
1582 if (t.qset_idx >= SGE_QSETS)
1583 return -EINVAL;
1584 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1585 !in_range(t.cong_thres, 0, 255) ||
1586 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1587 MAX_TXQ_ENTRIES) ||
1588 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1589 MAX_TXQ_ENTRIES) ||
1590 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1591 MAX_CTRL_TXQ_ENTRIES) ||
1592 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1593 MAX_RX_BUFFERS)
1594 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1595 MAX_RX_JUMBO_BUFFERS)
1596 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1597 MAX_RSPQ_ENTRIES))
1598 return -EINVAL;
1599 if ((adapter->flags & FULL_INIT_DONE) &&
1600 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1601 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1602 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1603 t.polling >= 0 || t.cong_thres >= 0))
1604 return -EBUSY;
1605
1606 q = &adapter->params.sge.qset[t.qset_idx];
1607
1608 if (t.rspq_size >= 0)
1609 q->rspq_size = t.rspq_size;
1610 if (t.fl_size[0] >= 0)
1611 q->fl_size = t.fl_size[0];
1612 if (t.fl_size[1] >= 0)
1613 q->jumbo_size = t.fl_size[1];
1614 if (t.txq_size[0] >= 0)
1615 q->txq_size[0] = t.txq_size[0];
1616 if (t.txq_size[1] >= 0)
1617 q->txq_size[1] = t.txq_size[1];
1618 if (t.txq_size[2] >= 0)
1619 q->txq_size[2] = t.txq_size[2];
1620 if (t.cong_thres >= 0)
1621 q->cong_thres = t.cong_thres;
1622 if (t.intr_lat >= 0) {
1623 struct sge_qset *qs =
1624 &adapter->sge.qs[t.qset_idx];
1625
1626 q->coalesce_usecs = t.intr_lat;
1627 t3_update_qset_coalesce(qs, q);
1628 }
1629 if (t.polling >= 0) {
1630 if (adapter->flags & USING_MSIX)
1631 q->polling = t.polling;
1632 else {
1633 /* No polling with INTx for T3A */
1634 if (adapter->params.rev == 0 &&
1635 !(adapter->flags & USING_MSI))
1636 t.polling = 0;
1637
1638 for (i = 0; i < SGE_QSETS; i++) {
1639 q = &adapter->params.sge.
1640 qset[i];
1641 q->polling = t.polling;
1642 }
1643 }
1644 }
1645 break;
1646 }
1647 case CHELSIO_GET_QSET_PARAMS:{
1648 struct qset_params *q;
1649 struct ch_qset_params t;
1650
1651 if (copy_from_user(&t, useraddr, sizeof(t)))
1652 return -EFAULT;
1653 if (t.qset_idx >= SGE_QSETS)
1654 return -EINVAL;
1655
1656 q = &adapter->params.sge.qset[t.qset_idx];
1657 t.rspq_size = q->rspq_size;
1658 t.txq_size[0] = q->txq_size[0];
1659 t.txq_size[1] = q->txq_size[1];
1660 t.txq_size[2] = q->txq_size[2];
1661 t.fl_size[0] = q->fl_size;
1662 t.fl_size[1] = q->jumbo_size;
1663 t.polling = q->polling;
1664 t.intr_lat = q->coalesce_usecs;
1665 t.cong_thres = q->cong_thres;
1666
1667 if (copy_to_user(useraddr, &t, sizeof(t)))
1668 return -EFAULT;
1669 break;
1670 }
1671 case CHELSIO_SET_QSET_NUM:{
1672 struct ch_reg edata;
1673 struct port_info *pi = netdev_priv(dev);
1674 unsigned int i, first_qset = 0, other_qsets = 0;
1675
1676 if (!capable(CAP_NET_ADMIN))
1677 return -EPERM;
1678 if (adapter->flags & FULL_INIT_DONE)
1679 return -EBUSY;
1680 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1681 return -EFAULT;
1682 if (edata.val < 1 ||
1683 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1684 return -EINVAL;
1685
1686 for_each_port(adapter, i)
1687 if (adapter->port[i] && adapter->port[i] != dev)
1688 other_qsets += adap2pinfo(adapter, i)->nqsets;
1689
1690 if (edata.val + other_qsets > SGE_QSETS)
1691 return -EINVAL;
1692
1693 pi->nqsets = edata.val;
1694
1695 for_each_port(adapter, i)
1696 if (adapter->port[i]) {
1697 pi = adap2pinfo(adapter, i);
1698 pi->first_qset = first_qset;
1699 first_qset += pi->nqsets;
1700 }
1701 break;
1702 }
1703 case CHELSIO_GET_QSET_NUM:{
1704 struct ch_reg edata;
1705 struct port_info *pi = netdev_priv(dev);
1706
1707 edata.cmd = CHELSIO_GET_QSET_NUM;
1708 edata.val = pi->nqsets;
1709 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1710 return -EFAULT;
1711 break;
1712 }
1713 case CHELSIO_LOAD_FW:{
1714 u8 *fw_data;
1715 struct ch_mem_range t;
1716
1717 if (!capable(CAP_NET_ADMIN))
1718 return -EPERM;
1719 if (copy_from_user(&t, useraddr, sizeof(t)))
1720 return -EFAULT;
1721
1722 fw_data = kmalloc(t.len, GFP_KERNEL);
1723 if (!fw_data)
1724 return -ENOMEM;
1725
1726 if (copy_from_user
1727 (fw_data, useraddr + sizeof(t), t.len)) {
1728 kfree(fw_data);
1729 return -EFAULT;
1730 }
1731
1732 ret = t3_load_fw(adapter, fw_data, t.len);
1733 kfree(fw_data);
1734 if (ret)
1735 return ret;
1736 break;
1737 }
1738 case CHELSIO_SETMTUTAB:{
1739 struct ch_mtus m;
1740 int i;
1741
1742 if (!is_offload(adapter))
1743 return -EOPNOTSUPP;
1744 if (!capable(CAP_NET_ADMIN))
1745 return -EPERM;
1746 if (offload_running(adapter))
1747 return -EBUSY;
1748 if (copy_from_user(&m, useraddr, sizeof(m)))
1749 return -EFAULT;
1750 if (m.nmtus != NMTUS)
1751 return -EINVAL;
1752 if (m.mtus[0] < 81) /* accommodate SACK */
1753 return -EINVAL;
1754
1755 /* MTUs must be in ascending order */
1756 for (i = 1; i < NMTUS; ++i)
1757 if (m.mtus[i] < m.mtus[i - 1])
1758 return -EINVAL;
1759
1760 memcpy(adapter->params.mtus, m.mtus,
1761 sizeof(adapter->params.mtus));
1762 break;
1763 }
1764 case CHELSIO_GET_PM:{
1765 struct tp_params *p = &adapter->params.tp;
1766 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1767
1768 if (!is_offload(adapter))
1769 return -EOPNOTSUPP;
1770 m.tx_pg_sz = p->tx_pg_size;
1771 m.tx_num_pg = p->tx_num_pgs;
1772 m.rx_pg_sz = p->rx_pg_size;
1773 m.rx_num_pg = p->rx_num_pgs;
1774 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1775 if (copy_to_user(useraddr, &m, sizeof(m)))
1776 return -EFAULT;
1777 break;
1778 }
1779 case CHELSIO_SET_PM:{
1780 struct ch_pm m;
1781 struct tp_params *p = &adapter->params.tp;
1782
1783 if (!is_offload(adapter))
1784 return -EOPNOTSUPP;
1785 if (!capable(CAP_NET_ADMIN))
1786 return -EPERM;
1787 if (adapter->flags & FULL_INIT_DONE)
1788 return -EBUSY;
1789 if (copy_from_user(&m, useraddr, sizeof(m)))
1790 return -EFAULT;
1791 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1792 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1793 return -EINVAL; /* not power of 2 */
1794 if (!(m.rx_pg_sz & 0x14000))
1795 return -EINVAL; /* not 16KB or 64KB */
1796 if (!(m.tx_pg_sz & 0x1554000))
1797 return -EINVAL;
1798 if (m.tx_num_pg == -1)
1799 m.tx_num_pg = p->tx_num_pgs;
1800 if (m.rx_num_pg == -1)
1801 m.rx_num_pg = p->rx_num_pgs;
1802 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1803 return -EINVAL;
1804 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1805 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1806 return -EINVAL;
1807 p->rx_pg_size = m.rx_pg_sz;
1808 p->tx_pg_size = m.tx_pg_sz;
1809 p->rx_num_pgs = m.rx_num_pg;
1810 p->tx_num_pgs = m.tx_num_pg;
1811 break;
1812 }
1813 case CHELSIO_GET_MEM:{
1814 struct ch_mem_range t;
1815 struct mc7 *mem;
1816 u64 buf[32];
1817
1818 if (!is_offload(adapter))
1819 return -EOPNOTSUPP;
1820 if (!(adapter->flags & FULL_INIT_DONE))
1821 return -EIO; /* need the memory controllers */
1822 if (copy_from_user(&t, useraddr, sizeof(t)))
1823 return -EFAULT;
1824 if ((t.addr & 7) || (t.len & 7))
1825 return -EINVAL;
1826 if (t.mem_id == MEM_CM)
1827 mem = &adapter->cm;
1828 else if (t.mem_id == MEM_PMRX)
1829 mem = &adapter->pmrx;
1830 else if (t.mem_id == MEM_PMTX)
1831 mem = &adapter->pmtx;
1832 else
1833 return -EINVAL;
1834
1835 /*
1836 * Version scheme:
1837 * bits 0..9: chip version
1838 * bits 10..15: chip revision
1839 */
1840 t.version = 3 | (adapter->params.rev << 10);
1841 if (copy_to_user(useraddr, &t, sizeof(t)))
1842 return -EFAULT;
1843
1844 /*
1845 * Read 256 bytes at a time as len can be large and we don't
1846 * want to use huge intermediate buffers.
1847 */
1848 useraddr += sizeof(t); /* advance to start of buffer */
1849 while (t.len) {
1850 unsigned int chunk =
1851 min_t(unsigned int, t.len, sizeof(buf));
1852
1853 ret =
1854 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1855 buf);
1856 if (ret)
1857 return ret;
1858 if (copy_to_user(useraddr, buf, chunk))
1859 return -EFAULT;
1860 useraddr += chunk;
1861 t.addr += chunk;
1862 t.len -= chunk;
1863 }
1864 break;
1865 }
1866 case CHELSIO_SET_TRACE_FILTER:{
1867 struct ch_trace t;
1868 const struct trace_params *tp;
1869
1870 if (!capable(CAP_NET_ADMIN))
1871 return -EPERM;
1872 if (!offload_running(adapter))
1873 return -EAGAIN;
1874 if (copy_from_user(&t, useraddr, sizeof(t)))
1875 return -EFAULT;
1876
1877 tp = (const struct trace_params *)&t.sip;
1878 if (t.config_tx)
1879 t3_config_trace_filter(adapter, tp, 0,
1880 t.invert_match,
1881 t.trace_tx);
1882 if (t.config_rx)
1883 t3_config_trace_filter(adapter, tp, 1,
1884 t.invert_match,
1885 t.trace_rx);
1886 break;
1887 }
1888 case CHELSIO_SET_PKTSCHED:{
1889 struct ch_pktsched_params p;
1890
1891 if (!capable(CAP_NET_ADMIN))
1892 return -EPERM;
1893 if (!adapter->open_device_map)
1894 return -EAGAIN; /* uP and SGE must be running */
1895 if (copy_from_user(&p, useraddr, sizeof(p)))
1896 return -EFAULT;
1897 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1898 p.binding);
1899 break;
1900
1901 }
1902 default:
1903 return -EOPNOTSUPP;
1904 }
1905 return 0;
1906}
1907
1908static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1909{
1910 int ret, mmd;
1911 struct adapter *adapter = dev->priv;
1912 struct port_info *pi = netdev_priv(dev);
1913 struct mii_ioctl_data *data = if_mii(req);
1914
1915 switch (cmd) {
1916 case SIOCGMIIPHY:
1917 data->phy_id = pi->phy.addr;
1918 /* FALLTHRU */
1919 case SIOCGMIIREG:{
1920 u32 val;
1921 struct cphy *phy = &pi->phy;
1922
1923 if (!phy->mdio_read)
1924 return -EOPNOTSUPP;
1925 if (is_10G(adapter)) {
1926 mmd = data->phy_id >> 8;
1927 if (!mmd)
1928 mmd = MDIO_DEV_PCS;
1929 else if (mmd > MDIO_DEV_XGXS)
1930 return -EINVAL;
1931
1932 ret =
1933 phy->mdio_read(adapter, data->phy_id & 0x1f,
1934 mmd, data->reg_num, &val);
1935 } else
1936 ret =
1937 phy->mdio_read(adapter, data->phy_id & 0x1f,
1938 0, data->reg_num & 0x1f,
1939 &val);
1940 if (!ret)
1941 data->val_out = val;
1942 break;
1943 }
1944 case SIOCSMIIREG:{
1945 struct cphy *phy = &pi->phy;
1946
1947 if (!capable(CAP_NET_ADMIN))
1948 return -EPERM;
1949 if (!phy->mdio_write)
1950 return -EOPNOTSUPP;
1951 if (is_10G(adapter)) {
1952 mmd = data->phy_id >> 8;
1953 if (!mmd)
1954 mmd = MDIO_DEV_PCS;
1955 else if (mmd > MDIO_DEV_XGXS)
1956 return -EINVAL;
1957
1958 ret =
1959 phy->mdio_write(adapter,
1960 data->phy_id & 0x1f, mmd,
1961 data->reg_num,
1962 data->val_in);
1963 } else
1964 ret =
1965 phy->mdio_write(adapter,
1966 data->phy_id & 0x1f, 0,
1967 data->reg_num & 0x1f,
1968 data->val_in);
1969 break;
1970 }
1971 case SIOCCHIOCTL:
1972 return cxgb_extension_ioctl(dev, req->ifr_data);
1973 default:
1974 return -EOPNOTSUPP;
1975 }
1976 return ret;
1977}
1978
1979static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1980{
1981 int ret;
1982 struct adapter *adapter = dev->priv;
1983 struct port_info *pi = netdev_priv(dev);
1984
1985 if (new_mtu < 81) /* accommodate SACK */
1986 return -EINVAL;
1987 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1988 return ret;
1989 dev->mtu = new_mtu;
1990 init_port_mtus(adapter);
1991 if (adapter->params.rev == 0 && offload_running(adapter))
1992 t3_load_mtus(adapter, adapter->params.mtus,
1993 adapter->params.a_wnd, adapter->params.b_wnd,
1994 adapter->port[0]->mtu);
1995 return 0;
1996}
1997
1998static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1999{
2000 struct adapter *adapter = dev->priv;
2001 struct port_info *pi = netdev_priv(dev);
2002 struct sockaddr *addr = p;
2003
2004 if (!is_valid_ether_addr(addr->sa_data))
2005 return -EINVAL;
2006
2007 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2008 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2009 if (offload_running(adapter))
2010 write_smt_entry(adapter, pi->port_id);
2011 return 0;
2012}
2013
2014/**
2015 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2016 * @adap: the adapter
2017 * @p: the port
2018 *
2019 * Ensures that current Rx processing on any of the queues associated with
2020 * the given port completes before returning. We do this by acquiring and
2021 * releasing the locks of the response queues associated with the port.
2022 */
2023static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2024{
2025 int i;
2026
2027 for (i = 0; i < p->nqsets; i++) {
2028 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2029
2030 spin_lock_irq(&q->lock);
2031 spin_unlock_irq(&q->lock);
2032 }
2033}
2034
2035static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2036{
2037 struct adapter *adapter = dev->priv;
2038 struct port_info *pi = netdev_priv(dev);
2039
2040 pi->vlan_grp = grp;
2041 if (adapter->params.rev > 0)
2042 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2043 else {
2044 /* single control for all ports */
2045 unsigned int i, have_vlans = 0;
2046 for_each_port(adapter, i)
2047 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2048
2049 t3_set_vlan_accel(adapter, 1, have_vlans);
2050 }
2051 t3_synchronize_rx(adapter, pi);
2052}
2053
2054static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2055{
2056 /* nothing */
2057}
2058
2059#ifdef CONFIG_NET_POLL_CONTROLLER
2060static void cxgb_netpoll(struct net_device *dev)
2061{
2062 struct adapter *adapter = dev->priv;
2063 struct sge_qset *qs = dev2qset(dev);
2064
2065 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2066 adapter);
2067}
2068#endif
2069
2070/*
2071 * Periodic accumulation of MAC statistics.
2072 */
2073static void mac_stats_update(struct adapter *adapter)
2074{
2075 int i;
2076
2077 for_each_port(adapter, i) {
2078 struct net_device *dev = adapter->port[i];
2079 struct port_info *p = netdev_priv(dev);
2080
2081 if (netif_running(dev)) {
2082 spin_lock(&adapter->stats_lock);
2083 t3_mac_update_stats(&p->mac);
2084 spin_unlock(&adapter->stats_lock);
2085 }
2086 }
2087}
2088
2089static void check_link_status(struct adapter *adapter)
2090{
2091 int i;
2092
2093 for_each_port(adapter, i) {
2094 struct net_device *dev = adapter->port[i];
2095 struct port_info *p = netdev_priv(dev);
2096
2097 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2098 t3_link_changed(adapter, i);
2099 }
2100}
2101
2102static void t3_adap_check_task(struct work_struct *work)
2103{
2104 struct adapter *adapter = container_of(work, struct adapter,
2105 adap_check_task.work);
2106 const struct adapter_params *p = &adapter->params;
2107
2108 adapter->check_task_cnt++;
2109
2110 /* Check link status for PHYs without interrupts */
2111 if (p->linkpoll_period)
2112 check_link_status(adapter);
2113
2114 /* Accumulate MAC stats if needed */
2115 if (!p->linkpoll_period ||
2116 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2117 p->stats_update_period) {
2118 mac_stats_update(adapter);
2119 adapter->check_task_cnt = 0;
2120 }
2121
2122 /* Schedule the next check update if any port is active. */
2123 spin_lock(&adapter->work_lock);
2124 if (adapter->open_device_map & PORT_MASK)
2125 schedule_chk_task(adapter);
2126 spin_unlock(&adapter->work_lock);
2127}
2128
2129/*
2130 * Processes external (PHY) interrupts in process context.
2131 */
2132static void ext_intr_task(struct work_struct *work)
2133{
2134 struct adapter *adapter = container_of(work, struct adapter,
2135 ext_intr_handler_task);
2136
2137 t3_phy_intr_handler(adapter);
2138
2139 /* Now reenable external interrupts */
2140 spin_lock_irq(&adapter->work_lock);
2141 if (adapter->slow_intr_mask) {
2142 adapter->slow_intr_mask |= F_T3DBG;
2143 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2144 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2145 adapter->slow_intr_mask);
2146 }
2147 spin_unlock_irq(&adapter->work_lock);
2148}
2149
2150/*
2151 * Interrupt-context handler for external (PHY) interrupts.
2152 */
2153void t3_os_ext_intr_handler(struct adapter *adapter)
2154{
2155 /*
2156 * Schedule a task to handle external interrupts as they may be slow
2157 * and we use a mutex to protect MDIO registers. We disable PHY
2158 * interrupts in the meantime and let the task reenable them when
2159 * it's done.
2160 */
2161 spin_lock(&adapter->work_lock);
2162 if (adapter->slow_intr_mask) {
2163 adapter->slow_intr_mask &= ~F_T3DBG;
2164 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2165 adapter->slow_intr_mask);
2166 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2167 }
2168 spin_unlock(&adapter->work_lock);
2169}
2170
2171void t3_fatal_err(struct adapter *adapter)
2172{
2173 unsigned int fw_status[4];
2174
2175 if (adapter->flags & FULL_INIT_DONE) {
2176 t3_sge_stop(adapter);
2177 t3_intr_disable(adapter);
2178 }
2179 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2180 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2181 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2182 fw_status[0], fw_status[1],
2183 fw_status[2], fw_status[3]);
2184
2185}
2186
2187static int __devinit cxgb_enable_msix(struct adapter *adap)
2188{
2189 struct msix_entry entries[SGE_QSETS + 1];
2190 int i, err;
2191
2192 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2193 entries[i].entry = i;
2194
2195 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2196 if (!err) {
2197 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2198 adap->msix_info[i].vec = entries[i].vector;
2199 } else if (err > 0)
2200 dev_info(&adap->pdev->dev,
2201 "only %d MSI-X vectors left, not using MSI-X\n", err);
2202 return err;
2203}
2204
2205static void __devinit print_port_info(struct adapter *adap,
2206 const struct adapter_info *ai)
2207{
2208 static const char *pci_variant[] = {
2209 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2210 };
2211
2212 int i;
2213 char buf[80];
2214
2215 if (is_pcie(adap))
2216 snprintf(buf, sizeof(buf), "%s x%d",
2217 pci_variant[adap->params.pci.variant],
2218 adap->params.pci.width);
2219 else
2220 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2221 pci_variant[adap->params.pci.variant],
2222 adap->params.pci.speed, adap->params.pci.width);
2223
2224 for_each_port(adap, i) {
2225 struct net_device *dev = adap->port[i];
2226 const struct port_info *pi = netdev_priv(dev);
2227
2228 if (!test_bit(i, &adap->registered_device_map))
2229 continue;
2230 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2231 dev->name, ai->desc, pi->port_type->desc,
2232 adap->params.rev, buf,
2233 (adap->flags & USING_MSIX) ? " MSI-X" :
2234 (adap->flags & USING_MSI) ? " MSI" : "");
2235 if (adap->name == dev->name && adap->params.vpd.mclk)
2236 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2237 adap->name, t3_mc7_size(&adap->cm) >> 20,
2238 t3_mc7_size(&adap->pmtx) >> 20,
2239 t3_mc7_size(&adap->pmrx) >> 20);
2240 }
2241}
2242
2243static int __devinit init_one(struct pci_dev *pdev,
2244 const struct pci_device_id *ent)
2245{
2246 static int version_printed;
2247
2248 int i, err, pci_using_dac = 0;
2249 unsigned long mmio_start, mmio_len;
2250 const struct adapter_info *ai;
2251 struct adapter *adapter = NULL;
2252 struct port_info *pi;
2253
2254 if (!version_printed) {
2255 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2256 ++version_printed;
2257 }
2258
2259 if (!cxgb3_wq) {
2260 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2261 if (!cxgb3_wq) {
2262 printk(KERN_ERR DRV_NAME
2263 ": cannot initialize work queue\n");
2264 return -ENOMEM;
2265 }
2266 }
2267
2268 err = pci_request_regions(pdev, DRV_NAME);
2269 if (err) {
2270 /* Just info, some other driver may have claimed the device. */
2271 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2272 return err;
2273 }
2274
2275 err = pci_enable_device(pdev);
2276 if (err) {
2277 dev_err(&pdev->dev, "cannot enable PCI device\n");
2278 goto out_release_regions;
2279 }
2280
2281 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2282 pci_using_dac = 1;
2283 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2284 if (err) {
2285 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2286 "coherent allocations\n");
2287 goto out_disable_device;
2288 }
2289 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2290 dev_err(&pdev->dev, "no usable DMA configuration\n");
2291 goto out_disable_device;
2292 }
2293
2294 pci_set_master(pdev);
2295
2296 mmio_start = pci_resource_start(pdev, 0);
2297 mmio_len = pci_resource_len(pdev, 0);
2298 ai = t3_get_adapter_info(ent->driver_data);
2299
2300 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2301 if (!adapter) {
2302 err = -ENOMEM;
2303 goto out_disable_device;
2304 }
2305
2306 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2307 if (!adapter->regs) {
2308 dev_err(&pdev->dev, "cannot map device registers\n");
2309 err = -ENOMEM;
2310 goto out_free_adapter;
2311 }
2312
2313 adapter->pdev = pdev;
2314 adapter->name = pci_name(pdev);
2315 adapter->msg_enable = dflt_msg_enable;
2316 adapter->mmio_len = mmio_len;
2317
2318 mutex_init(&adapter->mdio_lock);
2319 spin_lock_init(&adapter->work_lock);
2320 spin_lock_init(&adapter->stats_lock);
2321
2322 INIT_LIST_HEAD(&adapter->adapter_list);
2323 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2324 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2325
2326 for (i = 0; i < ai->nports; ++i) {
2327 struct net_device *netdev;
2328
2329 netdev = alloc_etherdev(sizeof(struct port_info));
2330 if (!netdev) {
2331 err = -ENOMEM;
2332 goto out_free_dev;
2333 }
2334
2335 SET_MODULE_OWNER(netdev);
2336 SET_NETDEV_DEV(netdev, &pdev->dev);
2337
2338 adapter->port[i] = netdev;
2339 pi = netdev_priv(netdev);
2340 pi->rx_csum_offload = 1;
2341 pi->nqsets = 1;
2342 pi->first_qset = i;
2343 pi->activity = 0;
2344 pi->port_id = i;
2345 netif_carrier_off(netdev);
2346 netdev->irq = pdev->irq;
2347 netdev->mem_start = mmio_start;
2348 netdev->mem_end = mmio_start + mmio_len - 1;
2349 netdev->priv = adapter;
2350 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2351 netdev->features |= NETIF_F_LLTX;
2352 if (pci_using_dac)
2353 netdev->features |= NETIF_F_HIGHDMA;
2354
2355 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2356 netdev->vlan_rx_register = vlan_rx_register;
2357 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2358
2359 netdev->open = cxgb_open;
2360 netdev->stop = cxgb_close;
2361 netdev->hard_start_xmit = t3_eth_xmit;
2362 netdev->get_stats = cxgb_get_stats;
2363 netdev->set_multicast_list = cxgb_set_rxmode;
2364 netdev->do_ioctl = cxgb_ioctl;
2365 netdev->change_mtu = cxgb_change_mtu;
2366 netdev->set_mac_address = cxgb_set_mac_addr;
2367#ifdef CONFIG_NET_POLL_CONTROLLER
2368 netdev->poll_controller = cxgb_netpoll;
2369#endif
2370 netdev->weight = 64;
2371
2372 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2373 }
2374
2375 pci_set_drvdata(pdev, adapter->port[0]);
2376 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2377 err = -ENODEV;
2378 goto out_free_dev;
2379 }
2380
2381 /*
2382 * The card is now ready to go. If any errors occur during device
2383 * registration we do not fail the whole card but rather proceed only
2384 * with the ports we manage to register successfully. However we must
2385 * register at least one net device.
2386 */
2387 for_each_port(adapter, i) {
2388 err = register_netdev(adapter->port[i]);
2389 if (err)
2390 dev_warn(&pdev->dev,
2391 "cannot register net device %s, skipping\n",
2392 adapter->port[i]->name);
2393 else {
2394 /*
2395 * Change the name we use for messages to the name of
2396 * the first successfully registered interface.
2397 */
2398 if (!adapter->registered_device_map)
2399 adapter->name = adapter->port[i]->name;
2400
2401 __set_bit(i, &adapter->registered_device_map);
2402 }
2403 }
2404 if (!adapter->registered_device_map) {
2405 dev_err(&pdev->dev, "could not register any net devices\n");
2406 goto out_free_dev;
2407 }
2408
2409 /* Driver's ready. Reflect it on LEDs */
2410 t3_led_ready(adapter);
2411
2412 if (is_offload(adapter)) {
2413 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2414 cxgb3_adapter_ofld(adapter);
2415 }
2416
2417 /* See what interrupts we'll be using */
2418 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2419 adapter->flags |= USING_MSIX;
2420 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2421 adapter->flags |= USING_MSI;
2422
2423 err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
2424 &cxgb3_attr_group);
2425
2426 print_port_info(adapter, ai);
2427 return 0;
2428
2429out_free_dev:
2430 iounmap(adapter->regs);
2431 for (i = ai->nports - 1; i >= 0; --i)
2432 if (adapter->port[i])
2433 free_netdev(adapter->port[i]);
2434
2435out_free_adapter:
2436 kfree(adapter);
2437
2438out_disable_device:
2439 pci_disable_device(pdev);
2440out_release_regions:
2441 pci_release_regions(pdev);
2442 pci_set_drvdata(pdev, NULL);
2443 return err;
2444}
2445
2446static void __devexit remove_one(struct pci_dev *pdev)
2447{
2448 struct net_device *dev = pci_get_drvdata(pdev);
2449
2450 if (dev) {
2451 int i;
2452 struct adapter *adapter = dev->priv;
2453
2454 t3_sge_stop(adapter);
2455 sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
2456 &cxgb3_attr_group);
2457
2458 for_each_port(adapter, i)
2459 if (test_bit(i, &adapter->registered_device_map))
2460 unregister_netdev(adapter->port[i]);
2461
2462 if (is_offload(adapter)) {
2463 cxgb3_adapter_unofld(adapter);
2464 if (test_bit(OFFLOAD_DEVMAP_BIT,
2465 &adapter->open_device_map))
2466 offload_close(&adapter->tdev);
2467 }
2468
2469 t3_free_sge_resources(adapter);
2470 cxgb_disable_msi(adapter);
2471
2472 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2473 if (adapter->dummy_netdev[i]) {
2474 free_netdev(adapter->dummy_netdev[i]);
2475 adapter->dummy_netdev[i] = NULL;
2476 }
2477
2478 for_each_port(adapter, i)
2479 if (adapter->port[i])
2480 free_netdev(adapter->port[i]);
2481
2482 iounmap(adapter->regs);
2483 kfree(adapter);
2484 pci_release_regions(pdev);
2485 pci_disable_device(pdev);
2486 pci_set_drvdata(pdev, NULL);
2487 }
2488}
2489
2490static struct pci_driver driver = {
2491 .name = DRV_NAME,
2492 .id_table = cxgb3_pci_tbl,
2493 .probe = init_one,
2494 .remove = __devexit_p(remove_one),
2495};
2496
2497static int __init cxgb3_init_module(void)
2498{
2499 int ret;
2500
2501 cxgb3_offload_init();
2502
2503 ret = pci_register_driver(&driver);
2504 return ret;
2505}
2506
2507static void __exit cxgb3_cleanup_module(void)
2508{
2509 pci_unregister_driver(&driver);
2510 if (cxgb3_wq)
2511 destroy_workqueue(cxgb3_wq);
2512}
2513
2514module_init(cxgb3_init_module);
2515module_exit(cxgb3_cleanup_module);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
new file mode 100644
index 000000000000..c3a02d613382
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -0,0 +1,1222 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/list.h>
35#include <net/neighbour.h>
36#include <linux/notifier.h>
37#include <asm/atomic.h>
38#include <linux/proc_fs.h>
39#include <linux/if_vlan.h>
40#include <net/netevent.h>
41#include <linux/highmem.h>
42#include <linux/vmalloc.h>
43
44#include "common.h"
45#include "regs.h"
46#include "cxgb3_ioctl.h"
47#include "cxgb3_ctl_defs.h"
48#include "cxgb3_defs.h"
49#include "l2t.h"
50#include "firmware_exports.h"
51#include "cxgb3_offload.h"
52
53static LIST_HEAD(client_list);
54static LIST_HEAD(ofld_dev_list);
55static DEFINE_MUTEX(cxgb3_db_lock);
56
57static DEFINE_RWLOCK(adapter_list_lock);
58static LIST_HEAD(adapter_list);
59
60static const unsigned int MAX_ATIDS = 64 * 1024;
61static const unsigned int ATID_BASE = 0x100000;
62
63static inline int offload_activated(struct t3cdev *tdev)
64{
65 const struct adapter *adapter = tdev2adap(tdev);
66
67 return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
68}
69
70/**
71 * cxgb3_register_client - register an offload client
72 * @client: the client
73 *
74 * Add the client to the client list,
75 * and call backs the client for each activated offload device
76 */
77void cxgb3_register_client(struct cxgb3_client *client)
78{
79 struct t3cdev *tdev;
80
81 mutex_lock(&cxgb3_db_lock);
82 list_add_tail(&client->client_list, &client_list);
83
84 if (client->add) {
85 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
86 if (offload_activated(tdev))
87 client->add(tdev);
88 }
89 }
90 mutex_unlock(&cxgb3_db_lock);
91}
92
93EXPORT_SYMBOL(cxgb3_register_client);
94
95/**
96 * cxgb3_unregister_client - unregister an offload client
97 * @client: the client
98 *
99 * Remove the client to the client list,
100 * and call backs the client for each activated offload device.
101 */
102void cxgb3_unregister_client(struct cxgb3_client *client)
103{
104 struct t3cdev *tdev;
105
106 mutex_lock(&cxgb3_db_lock);
107 list_del(&client->client_list);
108
109 if (client->remove) {
110 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
111 if (offload_activated(tdev))
112 client->remove(tdev);
113 }
114 }
115 mutex_unlock(&cxgb3_db_lock);
116}
117
118EXPORT_SYMBOL(cxgb3_unregister_client);
119
120/**
121 * cxgb3_add_clients - activate registered clients for an offload device
122 * @tdev: the offload device
123 *
124 * Call backs all registered clients once a offload device is activated
125 */
126void cxgb3_add_clients(struct t3cdev *tdev)
127{
128 struct cxgb3_client *client;
129
130 mutex_lock(&cxgb3_db_lock);
131 list_for_each_entry(client, &client_list, client_list) {
132 if (client->add)
133 client->add(tdev);
134 }
135 mutex_unlock(&cxgb3_db_lock);
136}
137
138/**
139 * cxgb3_remove_clients - deactivates registered clients
140 * for an offload device
141 * @tdev: the offload device
142 *
143 * Call backs all registered clients once a offload device is deactivated
144 */
145void cxgb3_remove_clients(struct t3cdev *tdev)
146{
147 struct cxgb3_client *client;
148
149 mutex_lock(&cxgb3_db_lock);
150 list_for_each_entry(client, &client_list, client_list) {
151 if (client->remove)
152 client->remove(tdev);
153 }
154 mutex_unlock(&cxgb3_db_lock);
155}
156
157static struct net_device *get_iff_from_mac(struct adapter *adapter,
158 const unsigned char *mac,
159 unsigned int vlan)
160{
161 int i;
162
163 for_each_port(adapter, i) {
164 const struct vlan_group *grp;
165 struct net_device *dev = adapter->port[i];
166 const struct port_info *p = netdev_priv(dev);
167
168 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
169 if (vlan && vlan != VLAN_VID_MASK) {
170 grp = p->vlan_grp;
171 dev = grp ? grp->vlan_devices[vlan] : NULL;
172 } else
173 while (dev->master)
174 dev = dev->master;
175 return dev;
176 }
177 }
178 return NULL;
179}
180
181static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
182 void *data)
183{
184 int ret = 0;
185 struct ulp_iscsi_info *uiip = data;
186
187 switch (req) {
188 case ULP_ISCSI_GET_PARAMS:
189 uiip->pdev = adapter->pdev;
190 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
191 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
192 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
193 /*
194 * On tx, the iscsi pdu has to be <= tx page size and has to
195 * fit into the Tx PM FIFO.
196 */
197 uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
198 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
199 /* on rx, the iscsi pdu has to be < rx page size and the
200 whole pdu + cpl headers has to fit into one sge buffer */
201 uiip->max_rxsz = min_t(unsigned int,
202 adapter->params.tp.rx_pg_size,
203 (adapter->sge.qs[0].fl[1].buf_size -
204 sizeof(struct cpl_rx_data) * 2 -
205 sizeof(struct cpl_rx_data_ddp)));
206 break;
207 case ULP_ISCSI_SET_PARAMS:
208 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
209 break;
210 default:
211 ret = -EOPNOTSUPP;
212 }
213 return ret;
214}
215
216/* Response queue used for RDMA events. */
217#define ASYNC_NOTIF_RSPQ 0
218
219static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
220{
221 int ret = 0;
222
223 switch (req) {
224 case RDMA_GET_PARAMS:{
225 struct rdma_info *req = data;
226 struct pci_dev *pdev = adapter->pdev;
227
228 req->udbell_physbase = pci_resource_start(pdev, 2);
229 req->udbell_len = pci_resource_len(pdev, 2);
230 req->tpt_base =
231 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
232 req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
233 req->pbl_base =
234 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
235 req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
236 req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
237 req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
238 req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
239 req->pdev = pdev;
240 break;
241 }
242 case RDMA_CQ_OP:{
243 unsigned long flags;
244 struct rdma_cq_op *req = data;
245
246 /* may be called in any context */
247 spin_lock_irqsave(&adapter->sge.reg_lock, flags);
248 ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
249 req->credits);
250 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
251 break;
252 }
253 case RDMA_GET_MEM:{
254 struct ch_mem_range *t = data;
255 struct mc7 *mem;
256
257 if ((t->addr & 7) || (t->len & 7))
258 return -EINVAL;
259 if (t->mem_id == MEM_CM)
260 mem = &adapter->cm;
261 else if (t->mem_id == MEM_PMRX)
262 mem = &adapter->pmrx;
263 else if (t->mem_id == MEM_PMTX)
264 mem = &adapter->pmtx;
265 else
266 return -EINVAL;
267
268 ret =
269 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
270 (u64 *) t->buf);
271 if (ret)
272 return ret;
273 break;
274 }
275 case RDMA_CQ_SETUP:{
276 struct rdma_cq_setup *req = data;
277
278 spin_lock_irq(&adapter->sge.reg_lock);
279 ret =
280 t3_sge_init_cqcntxt(adapter, req->id,
281 req->base_addr, req->size,
282 ASYNC_NOTIF_RSPQ,
283 req->ovfl_mode, req->credits,
284 req->credit_thres);
285 spin_unlock_irq(&adapter->sge.reg_lock);
286 break;
287 }
288 case RDMA_CQ_DISABLE:
289 spin_lock_irq(&adapter->sge.reg_lock);
290 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
291 spin_unlock_irq(&adapter->sge.reg_lock);
292 break;
293 case RDMA_CTRL_QP_SETUP:{
294 struct rdma_ctrlqp_setup *req = data;
295
296 spin_lock_irq(&adapter->sge.reg_lock);
297 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
298 SGE_CNTXT_RDMA,
299 ASYNC_NOTIF_RSPQ,
300 req->base_addr, req->size,
301 FW_RI_TID_START, 1, 0);
302 spin_unlock_irq(&adapter->sge.reg_lock);
303 break;
304 }
305 default:
306 ret = -EOPNOTSUPP;
307 }
308 return ret;
309}
310
311static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
312{
313 struct adapter *adapter = tdev2adap(tdev);
314 struct tid_range *tid;
315 struct mtutab *mtup;
316 struct iff_mac *iffmacp;
317 struct ddp_params *ddpp;
318 struct adap_ports *ports;
319 int i;
320
321 switch (req) {
322 case GET_MAX_OUTSTANDING_WR:
323 *(unsigned int *)data = FW_WR_NUM;
324 break;
325 case GET_WR_LEN:
326 *(unsigned int *)data = WR_FLITS;
327 break;
328 case GET_TX_MAX_CHUNK:
329 *(unsigned int *)data = 1 << 20; /* 1MB */
330 break;
331 case GET_TID_RANGE:
332 tid = data;
333 tid->num = t3_mc5_size(&adapter->mc5) -
334 adapter->params.mc5.nroutes -
335 adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
336 tid->base = 0;
337 break;
338 case GET_STID_RANGE:
339 tid = data;
340 tid->num = adapter->params.mc5.nservers;
341 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
342 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
343 break;
344 case GET_L2T_CAPACITY:
345 *(unsigned int *)data = 2048;
346 break;
347 case GET_MTUS:
348 mtup = data;
349 mtup->size = NMTUS;
350 mtup->mtus = adapter->params.mtus;
351 break;
352 case GET_IFF_FROM_MAC:
353 iffmacp = data;
354 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
355 iffmacp->vlan_tag &
356 VLAN_VID_MASK);
357 break;
358 case GET_DDP_PARAMS:
359 ddpp = data;
360 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
361 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
362 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
363 break;
364 case GET_PORTS:
365 ports = data;
366 ports->nports = adapter->params.nports;
367 for_each_port(adapter, i)
368 ports->lldevs[i] = adapter->port[i];
369 break;
370 case ULP_ISCSI_GET_PARAMS:
371 case ULP_ISCSI_SET_PARAMS:
372 if (!offload_running(adapter))
373 return -EAGAIN;
374 return cxgb_ulp_iscsi_ctl(adapter, req, data);
375 case RDMA_GET_PARAMS:
376 case RDMA_CQ_OP:
377 case RDMA_CQ_SETUP:
378 case RDMA_CQ_DISABLE:
379 case RDMA_CTRL_QP_SETUP:
380 case RDMA_GET_MEM:
381 if (!offload_running(adapter))
382 return -EAGAIN;
383 return cxgb_rdma_ctl(adapter, req, data);
384 default:
385 return -EOPNOTSUPP;
386 }
387 return 0;
388}
389
390/*
391 * Dummy handler for Rx offload packets in case we get an offload packet before
392 * proper processing is setup. This complains and drops the packet as it isn't
393 * normal to get offload packets at this stage.
394 */
395static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
396 int n)
397{
398 CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
399 n, ntohl(*(u32 *)skbs[0]->data));
400 while (n--)
401 dev_kfree_skb_any(skbs[n]);
402 return 0;
403}
404
405static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
406{
407}
408
409void cxgb3_set_dummy_ops(struct t3cdev *dev)
410{
411 dev->recv = rx_offload_blackhole;
412 dev->neigh_update = dummy_neigh_update;
413}
414
415/*
416 * Free an active-open TID.
417 */
418void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
419{
420 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
421 union active_open_entry *p = atid2entry(t, atid);
422 void *ctx = p->t3c_tid.ctx;
423
424 spin_lock_bh(&t->atid_lock);
425 p->next = t->afree;
426 t->afree = p;
427 t->atids_in_use--;
428 spin_unlock_bh(&t->atid_lock);
429
430 return ctx;
431}
432
433EXPORT_SYMBOL(cxgb3_free_atid);
434
435/*
436 * Free a server TID and return it to the free pool.
437 */
438void cxgb3_free_stid(struct t3cdev *tdev, int stid)
439{
440 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
441 union listen_entry *p = stid2entry(t, stid);
442
443 spin_lock_bh(&t->stid_lock);
444 p->next = t->sfree;
445 t->sfree = p;
446 t->stids_in_use--;
447 spin_unlock_bh(&t->stid_lock);
448}
449
450EXPORT_SYMBOL(cxgb3_free_stid);
451
452void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
453 void *ctx, unsigned int tid)
454{
455 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
456
457 t->tid_tab[tid].client = client;
458 t->tid_tab[tid].ctx = ctx;
459 atomic_inc(&t->tids_in_use);
460}
461
462EXPORT_SYMBOL(cxgb3_insert_tid);
463
464/*
465 * Populate a TID_RELEASE WR. The skb must be already propely sized.
466 */
467static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
468{
469 struct cpl_tid_release *req;
470
471 skb->priority = CPL_PRIORITY_SETUP;
472 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
473 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
474 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
475}
476
477static void t3_process_tid_release_list(struct work_struct *work)
478{
479 struct t3c_data *td = container_of(work, struct t3c_data,
480 tid_release_task);
481 struct sk_buff *skb;
482 struct t3cdev *tdev = td->dev;
483
484
485 spin_lock_bh(&td->tid_release_lock);
486 while (td->tid_release_list) {
487 struct t3c_tid_entry *p = td->tid_release_list;
488
489 td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
490 spin_unlock_bh(&td->tid_release_lock);
491
492 skb = alloc_skb(sizeof(struct cpl_tid_release),
493 GFP_KERNEL | __GFP_NOFAIL);
494 mk_tid_release(skb, p - td->tid_maps.tid_tab);
495 cxgb3_ofld_send(tdev, skb);
496 p->ctx = NULL;
497 spin_lock_bh(&td->tid_release_lock);
498 }
499 spin_unlock_bh(&td->tid_release_lock);
500}
501
502/* use ctx as a next pointer in the tid release list */
503void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
504{
505 struct t3c_data *td = T3C_DATA(tdev);
506 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
507
508 spin_lock_bh(&td->tid_release_lock);
509 p->ctx = (void *)td->tid_release_list;
510 td->tid_release_list = p;
511 if (!p->ctx)
512 schedule_work(&td->tid_release_task);
513 spin_unlock_bh(&td->tid_release_lock);
514}
515
516EXPORT_SYMBOL(cxgb3_queue_tid_release);
517
518/*
519 * Remove a tid from the TID table. A client may defer processing its last
520 * CPL message if it is locked at the time it arrives, and while the message
521 * sits in the client's backlog the TID may be reused for another connection.
522 * To handle this we atomically switch the TID association if it still points
523 * to the original client context.
524 */
525void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
526{
527 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
528
529 BUG_ON(tid >= t->ntids);
530 if (tdev->type == T3A)
531 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
532 else {
533 struct sk_buff *skb;
534
535 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
536 if (likely(skb)) {
537 mk_tid_release(skb, tid);
538 cxgb3_ofld_send(tdev, skb);
539 t->tid_tab[tid].ctx = NULL;
540 } else
541 cxgb3_queue_tid_release(tdev, tid);
542 }
543 atomic_dec(&t->tids_in_use);
544}
545
546EXPORT_SYMBOL(cxgb3_remove_tid);
547
548int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
549 void *ctx)
550{
551 int atid = -1;
552 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
553
554 spin_lock_bh(&t->atid_lock);
555 if (t->afree) {
556 union active_open_entry *p = t->afree;
557
558 atid = (p - t->atid_tab) + t->atid_base;
559 t->afree = p->next;
560 p->t3c_tid.ctx = ctx;
561 p->t3c_tid.client = client;
562 t->atids_in_use++;
563 }
564 spin_unlock_bh(&t->atid_lock);
565 return atid;
566}
567
568EXPORT_SYMBOL(cxgb3_alloc_atid);
569
570int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
571 void *ctx)
572{
573 int stid = -1;
574 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
575
576 spin_lock_bh(&t->stid_lock);
577 if (t->sfree) {
578 union listen_entry *p = t->sfree;
579
580 stid = (p - t->stid_tab) + t->stid_base;
581 t->sfree = p->next;
582 p->t3c_tid.ctx = ctx;
583 p->t3c_tid.client = client;
584 t->stids_in_use++;
585 }
586 spin_unlock_bh(&t->stid_lock);
587 return stid;
588}
589
590EXPORT_SYMBOL(cxgb3_alloc_stid);
591
592static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
593{
594 struct cpl_smt_write_rpl *rpl = cplhdr(skb);
595
596 if (rpl->status != CPL_ERR_NONE)
597 printk(KERN_ERR
598 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
599 rpl->status, GET_TID(rpl));
600
601 return CPL_RET_BUF_DONE;
602}
603
604static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
605{
606 struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
607
608 if (rpl->status != CPL_ERR_NONE)
609 printk(KERN_ERR
610 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
611 rpl->status, GET_TID(rpl));
612
613 return CPL_RET_BUF_DONE;
614}
615
616static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
617{
618 struct cpl_act_open_rpl *rpl = cplhdr(skb);
619 unsigned int atid = G_TID(ntohl(rpl->atid));
620 struct t3c_tid_entry *t3c_tid;
621
622 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
623 if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
624 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
625 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
626 t3c_tid->
627 ctx);
628 } else {
629 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
630 dev->name, CPL_ACT_OPEN_RPL);
631 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
632 }
633}
634
635static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
636{
637 union opcode_tid *p = cplhdr(skb);
638 unsigned int stid = G_TID(ntohl(p->opcode_tid));
639 struct t3c_tid_entry *t3c_tid;
640
641 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
642 if (t3c_tid->ctx && t3c_tid->client->handlers &&
643 t3c_tid->client->handlers[p->opcode]) {
644 return t3c_tid->client->handlers[p->opcode] (dev, skb,
645 t3c_tid->ctx);
646 } else {
647 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
648 dev->name, p->opcode);
649 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
650 }
651}
652
653static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
654{
655 union opcode_tid *p = cplhdr(skb);
656 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
657 struct t3c_tid_entry *t3c_tid;
658
659 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
660 if (t3c_tid->ctx && t3c_tid->client->handlers &&
661 t3c_tid->client->handlers[p->opcode]) {
662 return t3c_tid->client->handlers[p->opcode]
663 (dev, skb, t3c_tid->ctx);
664 } else {
665 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
666 dev->name, p->opcode);
667 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
668 }
669}
670
671static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
672{
673 struct cpl_pass_accept_req *req = cplhdr(skb);
674 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
675 struct t3c_tid_entry *t3c_tid;
676
677 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
678 if (t3c_tid->ctx && t3c_tid->client->handlers &&
679 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
680 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
681 (dev, skb, t3c_tid->ctx);
682 } else {
683 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
684 dev->name, CPL_PASS_ACCEPT_REQ);
685 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
686 }
687}
688
689static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
690{
691 union opcode_tid *p = cplhdr(skb);
692 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
693 struct t3c_tid_entry *t3c_tid;
694
695 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
696 if (t3c_tid->ctx && t3c_tid->client->handlers &&
697 t3c_tid->client->handlers[p->opcode]) {
698 return t3c_tid->client->handlers[p->opcode]
699 (dev, skb, t3c_tid->ctx);
700 } else {
701 struct cpl_abort_req_rss *req = cplhdr(skb);
702 struct cpl_abort_rpl *rpl;
703
704 struct sk_buff *skb =
705 alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
706 if (!skb) {
707 printk("do_abort_req_rss: couldn't get skb!\n");
708 goto out;
709 }
710 skb->priority = CPL_PRIORITY_DATA;
711 __skb_put(skb, sizeof(struct cpl_abort_rpl));
712 rpl = cplhdr(skb);
713 rpl->wr.wr_hi =
714 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
715 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
716 OPCODE_TID(rpl) =
717 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
718 rpl->cmd = req->status;
719 cxgb3_ofld_send(dev, skb);
720out:
721 return CPL_RET_BUF_DONE;
722 }
723}
724
725static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
726{
727 struct cpl_act_establish *req = cplhdr(skb);
728 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
729 struct t3c_tid_entry *t3c_tid;
730
731 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
732 if (t3c_tid->ctx && t3c_tid->client->handlers &&
733 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
734 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
735 (dev, skb, t3c_tid->ctx);
736 } else {
737 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
738 dev->name, CPL_PASS_ACCEPT_REQ);
739 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
740 }
741}
742
743static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
744{
745 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
746
747 if (rpl->status != CPL_ERR_NONE)
748 printk(KERN_ERR
749 "Unexpected SET_TCB_RPL status %u for tid %u\n",
750 rpl->status, GET_TID(rpl));
751 return CPL_RET_BUF_DONE;
752}
753
754static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
755{
756 struct cpl_trace_pkt *p = cplhdr(skb);
757
758 skb->protocol = 0xffff;
759 skb->dev = dev->lldev;
760 skb_pull(skb, sizeof(*p));
761 skb->mac.raw = skb->data;
762 netif_receive_skb(skb);
763 return 0;
764}
765
766static int do_term(struct t3cdev *dev, struct sk_buff *skb)
767{
768 unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
769 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
770 struct t3c_tid_entry *t3c_tid;
771
772 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
773 if (t3c_tid->ctx && t3c_tid->client->handlers &&
774 t3c_tid->client->handlers[opcode]) {
775 return t3c_tid->client->handlers[opcode] (dev, skb,
776 t3c_tid->ctx);
777 } else {
778 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
779 dev->name, opcode);
780 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
781 }
782}
783
784static int nb_callback(struct notifier_block *self, unsigned long event,
785 void *ctx)
786{
787 switch (event) {
788 case (NETEVENT_NEIGH_UPDATE):{
789 cxgb_neigh_update((struct neighbour *)ctx);
790 break;
791 }
792 case (NETEVENT_PMTU_UPDATE):
793 break;
794 case (NETEVENT_REDIRECT):{
795 struct netevent_redirect *nr = ctx;
796 cxgb_redirect(nr->old, nr->new);
797 cxgb_neigh_update(nr->new->neighbour);
798 break;
799 }
800 default:
801 break;
802 }
803 return 0;
804}
805
806static struct notifier_block nb = {
807 .notifier_call = nb_callback
808};
809
810/*
811 * Process a received packet with an unknown/unexpected CPL opcode.
812 */
813static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
814{
815 printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
816 *skb->data);
817 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
818}
819
820/*
821 * Handlers for each CPL opcode
822 */
823static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
824
825/*
826 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
827 * to unregister an existing handler.
828 */
829void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
830{
831 if (opcode < NUM_CPL_CMDS)
832 cpl_handlers[opcode] = h ? h : do_bad_cpl;
833 else
834 printk(KERN_ERR "T3C: handler registration for "
835 "opcode %x failed\n", opcode);
836}
837
838EXPORT_SYMBOL(t3_register_cpl_handler);
839
840/*
841 * T3CDEV's receive method.
842 */
843int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
844{
845 while (n--) {
846 struct sk_buff *skb = *skbs++;
847 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
848 int ret = cpl_handlers[opcode] (dev, skb);
849
850#if VALIDATE_TID
851 if (ret & CPL_RET_UNKNOWN_TID) {
852 union opcode_tid *p = cplhdr(skb);
853
854 printk(KERN_ERR "%s: CPL message (opcode %u) had "
855 "unknown TID %u\n", dev->name, opcode,
856 G_TID(ntohl(p->opcode_tid)));
857 }
858#endif
859 if (ret & CPL_RET_BUF_DONE)
860 kfree_skb(skb);
861 }
862 return 0;
863}
864
865/*
866 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
867 */
868int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
869{
870 int r;
871
872 local_bh_disable();
873 r = dev->send(dev, skb);
874 local_bh_enable();
875 return r;
876}
877
878EXPORT_SYMBOL(cxgb3_ofld_send);
879
880static int is_offloading(struct net_device *dev)
881{
882 struct adapter *adapter;
883 int i;
884
885 read_lock_bh(&adapter_list_lock);
886 list_for_each_entry(adapter, &adapter_list, adapter_list) {
887 for_each_port(adapter, i) {
888 if (dev == adapter->port[i]) {
889 read_unlock_bh(&adapter_list_lock);
890 return 1;
891 }
892 }
893 }
894 read_unlock_bh(&adapter_list_lock);
895 return 0;
896}
897
898void cxgb_neigh_update(struct neighbour *neigh)
899{
900 struct net_device *dev = neigh->dev;
901
902 if (dev && (is_offloading(dev))) {
903 struct t3cdev *tdev = T3CDEV(dev);
904
905 BUG_ON(!tdev);
906 t3_l2t_update(tdev, neigh);
907 }
908}
909
910static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
911{
912 struct sk_buff *skb;
913 struct cpl_set_tcb_field *req;
914
915 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
916 if (!skb) {
917 printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
918 return;
919 }
920 skb->priority = CPL_PRIORITY_CONTROL;
921 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
922 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
923 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
924 req->reply = 0;
925 req->cpu_idx = 0;
926 req->word = htons(W_TCB_L2T_IX);
927 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
928 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
929 tdev->send(tdev, skb);
930}
931
932void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
933{
934 struct net_device *olddev, *newdev;
935 struct tid_info *ti;
936 struct t3cdev *tdev;
937 u32 tid;
938 int update_tcb;
939 struct l2t_entry *e;
940 struct t3c_tid_entry *te;
941
942 olddev = old->neighbour->dev;
943 newdev = new->neighbour->dev;
944 if (!is_offloading(olddev))
945 return;
946 if (!is_offloading(newdev)) {
947 printk(KERN_WARNING "%s: Redirect to non-offload"
948 "device ignored.\n", __FUNCTION__);
949 return;
950 }
951 tdev = T3CDEV(olddev);
952 BUG_ON(!tdev);
953 if (tdev != T3CDEV(newdev)) {
954 printk(KERN_WARNING "%s: Redirect to different "
955 "offload device ignored.\n", __FUNCTION__);
956 return;
957 }
958
959 /* Add new L2T entry */
960 e = t3_l2t_get(tdev, new->neighbour, newdev);
961 if (!e) {
962 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
963 __FUNCTION__);
964 return;
965 }
966
967 /* Walk tid table and notify clients of dst change. */
968 ti = &(T3C_DATA(tdev))->tid_maps;
969 for (tid = 0; tid < ti->ntids; tid++) {
970 te = lookup_tid(ti, tid);
971 BUG_ON(!te);
972 if (te->ctx && te->client && te->client->redirect) {
973 update_tcb = te->client->redirect(te->ctx, old, new, e);
974 if (update_tcb) {
975 l2t_hold(L2DATA(tdev), e);
976 set_l2t_ix(tdev, tid, e);
977 }
978 }
979 }
980 l2t_release(L2DATA(tdev), e);
981}
982
983/*
984 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
985 * The allocated memory is cleared.
986 */
987void *cxgb_alloc_mem(unsigned long size)
988{
989 void *p = kmalloc(size, GFP_KERNEL);
990
991 if (!p)
992 p = vmalloc(size);
993 if (p)
994 memset(p, 0, size);
995 return p;
996}
997
998/*
999 * Free memory allocated through t3_alloc_mem().
1000 */
1001void cxgb_free_mem(void *addr)
1002{
1003 unsigned long p = (unsigned long)addr;
1004
1005 if (p >= VMALLOC_START && p < VMALLOC_END)
1006 vfree(addr);
1007 else
1008 kfree(addr);
1009}
1010
1011/*
1012 * Allocate and initialize the TID tables. Returns 0 on success.
1013 */
1014static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1015 unsigned int natids, unsigned int nstids,
1016 unsigned int atid_base, unsigned int stid_base)
1017{
1018 unsigned long size = ntids * sizeof(*t->tid_tab) +
1019 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1020
1021 t->tid_tab = cxgb_alloc_mem(size);
1022 if (!t->tid_tab)
1023 return -ENOMEM;
1024
1025 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1026 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1027 t->ntids = ntids;
1028 t->nstids = nstids;
1029 t->stid_base = stid_base;
1030 t->sfree = NULL;
1031 t->natids = natids;
1032 t->atid_base = atid_base;
1033 t->afree = NULL;
1034 t->stids_in_use = t->atids_in_use = 0;
1035 atomic_set(&t->tids_in_use, 0);
1036 spin_lock_init(&t->stid_lock);
1037 spin_lock_init(&t->atid_lock);
1038
1039 /*
1040 * Setup the free lists for stid_tab and atid_tab.
1041 */
1042 if (nstids) {
1043 while (--nstids)
1044 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1045 t->sfree = t->stid_tab;
1046 }
1047 if (natids) {
1048 while (--natids)
1049 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1050 t->afree = t->atid_tab;
1051 }
1052 return 0;
1053}
1054
1055static void free_tid_maps(struct tid_info *t)
1056{
1057 cxgb_free_mem(t->tid_tab);
1058}
1059
1060static inline void add_adapter(struct adapter *adap)
1061{
1062 write_lock_bh(&adapter_list_lock);
1063 list_add_tail(&adap->adapter_list, &adapter_list);
1064 write_unlock_bh(&adapter_list_lock);
1065}
1066
1067static inline void remove_adapter(struct adapter *adap)
1068{
1069 write_lock_bh(&adapter_list_lock);
1070 list_del(&adap->adapter_list);
1071 write_unlock_bh(&adapter_list_lock);
1072}
1073
1074int cxgb3_offload_activate(struct adapter *adapter)
1075{
1076 struct t3cdev *dev = &adapter->tdev;
1077 int natids, err;
1078 struct t3c_data *t;
1079 struct tid_range stid_range, tid_range;
1080 struct mtutab mtutab;
1081 unsigned int l2t_capacity;
1082
1083 t = kcalloc(1, sizeof(*t), GFP_KERNEL);
1084 if (!t)
1085 return -ENOMEM;
1086
1087 err = -EOPNOTSUPP;
1088 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1089 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1090 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1091 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1092 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1093 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1094 goto out_free;
1095
1096 err = -ENOMEM;
1097 L2DATA(dev) = t3_init_l2t(l2t_capacity);
1098 if (!L2DATA(dev))
1099 goto out_free;
1100
1101 natids = min(tid_range.num / 2, MAX_ATIDS);
1102 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1103 stid_range.num, ATID_BASE, stid_range.base);
1104 if (err)
1105 goto out_free_l2t;
1106
1107 t->mtus = mtutab.mtus;
1108 t->nmtus = mtutab.size;
1109
1110 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1111 spin_lock_init(&t->tid_release_lock);
1112 INIT_LIST_HEAD(&t->list_node);
1113 t->dev = dev;
1114
1115 T3C_DATA(dev) = t;
1116 dev->recv = process_rx;
1117 dev->neigh_update = t3_l2t_update;
1118
1119 /* Register netevent handler once */
1120 if (list_empty(&adapter_list))
1121 register_netevent_notifier(&nb);
1122
1123 add_adapter(adapter);
1124 return 0;
1125
1126out_free_l2t:
1127 t3_free_l2t(L2DATA(dev));
1128 L2DATA(dev) = NULL;
1129out_free:
1130 kfree(t);
1131 return err;
1132}
1133
1134void cxgb3_offload_deactivate(struct adapter *adapter)
1135{
1136 struct t3cdev *tdev = &adapter->tdev;
1137 struct t3c_data *t = T3C_DATA(tdev);
1138
1139 remove_adapter(adapter);
1140 if (list_empty(&adapter_list))
1141 unregister_netevent_notifier(&nb);
1142
1143 free_tid_maps(&t->tid_maps);
1144 T3C_DATA(tdev) = NULL;
1145 t3_free_l2t(L2DATA(tdev));
1146 L2DATA(tdev) = NULL;
1147 kfree(t);
1148}
1149
1150static inline void register_tdev(struct t3cdev *tdev)
1151{
1152 static int unit;
1153
1154 mutex_lock(&cxgb3_db_lock);
1155 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1156 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1157 mutex_unlock(&cxgb3_db_lock);
1158}
1159
1160static inline void unregister_tdev(struct t3cdev *tdev)
1161{
1162 mutex_lock(&cxgb3_db_lock);
1163 list_del(&tdev->ofld_dev_list);
1164 mutex_unlock(&cxgb3_db_lock);
1165}
1166
1167void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
1168{
1169 struct t3cdev *tdev = &adapter->tdev;
1170
1171 INIT_LIST_HEAD(&tdev->ofld_dev_list);
1172
1173 cxgb3_set_dummy_ops(tdev);
1174 tdev->send = t3_offload_tx;
1175 tdev->ctl = cxgb_offload_ctl;
1176 tdev->type = adapter->params.rev == 0 ? T3A : T3B;
1177
1178 register_tdev(tdev);
1179}
1180
1181void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
1182{
1183 struct t3cdev *tdev = &adapter->tdev;
1184
1185 tdev->recv = NULL;
1186 tdev->neigh_update = NULL;
1187
1188 unregister_tdev(tdev);
1189}
1190
1191void __init cxgb3_offload_init(void)
1192{
1193 int i;
1194
1195 for (i = 0; i < NUM_CPL_CMDS; ++i)
1196 cpl_handlers[i] = do_bad_cpl;
1197
1198 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1199 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1200 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1201 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1202 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1203 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1204 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1205 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1206 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1207 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1208 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1209 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1210 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1211 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1212 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1213 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1214 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1215 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
1216 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1217 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1218 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1219 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1220 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1221 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1222}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
new file mode 100644
index 000000000000..0e6beb69ba17
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CXGB3_OFFLOAD_H
34#define _CXGB3_OFFLOAD_H
35
36#include <linux/list.h>
37#include <linux/skbuff.h>
38
39#include "l2t.h"
40
41#include "t3cdev.h"
42#include "t3_cpl.h"
43
44struct adapter;
45
46void cxgb3_offload_init(void);
47
48void cxgb3_adapter_ofld(struct adapter *adapter);
49void cxgb3_adapter_unofld(struct adapter *adapter);
50int cxgb3_offload_activate(struct adapter *adapter);
51void cxgb3_offload_deactivate(struct adapter *adapter);
52
53void cxgb3_set_dummy_ops(struct t3cdev *dev);
54
55/*
56 * Client registration. Users of T3 driver must register themselves.
57 * The T3 driver will call the add function of every client for each T3
58 * adapter activated, passing up the t3cdev ptr. Each client fills out an
59 * array of callback functions to process CPL messages.
60 */
61
62void cxgb3_register_client(struct cxgb3_client *client);
63void cxgb3_unregister_client(struct cxgb3_client *client);
64void cxgb3_add_clients(struct t3cdev *tdev);
65void cxgb3_remove_clients(struct t3cdev *tdev);
66
67typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
68 struct sk_buff *skb, void *ctx);
69
70struct cxgb3_client {
71 char *name;
72 void (*add) (struct t3cdev *);
73 void (*remove) (struct t3cdev *);
74 cxgb3_cpl_handler_func *handlers;
75 int (*redirect)(void *ctx, struct dst_entry *old,
76 struct dst_entry *new, struct l2t_entry *l2t);
77 struct list_head client_list;
78};
79
80/*
81 * TID allocation services.
82 */
83int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
84 void *ctx);
85int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
86 void *ctx);
87void *cxgb3_free_atid(struct t3cdev *dev, int atid);
88void cxgb3_free_stid(struct t3cdev *dev, int stid);
89void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
90 void *ctx, unsigned int tid);
91void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
92void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
93
94struct t3c_tid_entry {
95 struct cxgb3_client *client;
96 void *ctx;
97};
98
99/* CPL message priority levels */
100enum {
101 CPL_PRIORITY_DATA = 0, /* data messages */
102 CPL_PRIORITY_SETUP = 1, /* connection setup messages */
103 CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
104 CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
105 CPL_PRIORITY_ACK = 1, /* RX ACK messages */
106 CPL_PRIORITY_CONTROL = 1 /* offload control messages */
107};
108
109/* Flags for return value of CPL message handlers */
110enum {
111 CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
112 CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
113 CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
114};
115
116typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
117
118/*
119 * Returns a pointer to the first byte of the CPL header in an sk_buff that
120 * contains a CPL message.
121 */
122static inline void *cplhdr(struct sk_buff *skb)
123{
124 return skb->data;
125}
126
127void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
128
129union listen_entry {
130 struct t3c_tid_entry t3c_tid;
131 union listen_entry *next;
132};
133
134union active_open_entry {
135 struct t3c_tid_entry t3c_tid;
136 union active_open_entry *next;
137};
138
139/*
140 * Holds the size, base address, free list start, etc of the TID, server TID,
141 * and active-open TID tables for a offload device.
142 * The tables themselves are allocated dynamically.
143 */
144struct tid_info {
145 struct t3c_tid_entry *tid_tab;
146 unsigned int ntids;
147 atomic_t tids_in_use;
148
149 union listen_entry *stid_tab;
150 unsigned int nstids;
151 unsigned int stid_base;
152
153 union active_open_entry *atid_tab;
154 unsigned int natids;
155 unsigned int atid_base;
156
157 /*
158 * The following members are accessed R/W so we put them in their own
159 * cache lines.
160 *
161 * XXX We could combine the atid fields above with the lock here since
162 * atids are use once (unlike other tids). OTOH the above fields are
163 * usually in cache due to tid_tab.
164 */
165 spinlock_t atid_lock ____cacheline_aligned_in_smp;
166 union active_open_entry *afree;
167 unsigned int atids_in_use;
168
169 spinlock_t stid_lock ____cacheline_aligned;
170 union listen_entry *sfree;
171 unsigned int stids_in_use;
172};
173
174struct t3c_data {
175 struct list_head list_node;
176 struct t3cdev *dev;
177 unsigned int tx_max_chunk; /* max payload for TX_DATA */
178 unsigned int max_wrs; /* max in-flight WRs per connection */
179 unsigned int nmtus;
180 const unsigned short *mtus;
181 struct tid_info tid_maps;
182
183 struct t3c_tid_entry *tid_release_list;
184 spinlock_t tid_release_lock;
185 struct work_struct tid_release_task;
186};
187
188/*
189 * t3cdev -> t3c_data accessor
190 */
191#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
192
193#endif
diff --git a/drivers/net/cxgb3/firmware_exports.h b/drivers/net/cxgb3/firmware_exports.h
new file mode 100644
index 000000000000..6a835f6a262a
--- /dev/null
+++ b/drivers/net/cxgb3/firmware_exports.h
@@ -0,0 +1,177 @@
1/*
2 * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _FIRMWARE_EXPORTS_H_
33#define _FIRMWARE_EXPORTS_H_
34
35/* WR OPCODES supported by the firmware.
36 */
37#define FW_WROPCODE_FORWARD 0x01
38#define FW_WROPCODE_BYPASS 0x05
39
40#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
41
42#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
43#define FW_WROPCODE_ULPTX_MEM_READ 0x02
44#define FW_WROPCODE_ULPTX_PKT 0x04
45#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
46
47#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
48
49#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
50#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
51#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
52#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
53#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
54#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
55#define FW_WROPCODE_OFLD_TX_DATA 0x0D
56#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
57
58#define FW_WROPCODE_RI_RDMA_INIT 0x10
59#define FW_WROPCODE_RI_RDMA_WRITE 0x11
60#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
61#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
62#define FW_WROPCODE_RI_SEND 0x14
63#define FW_WROPCODE_RI_TERMINATE 0x15
64#define FW_WROPCODE_RI_RDMA_READ 0x16
65#define FW_WROPCODE_RI_RECEIVE 0x17
66#define FW_WROPCODE_RI_BIND_MW 0x18
67#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
68#define FW_WROPCODE_RI_LOCAL_INV 0x1A
69#define FW_WROPCODE_RI_MODIFY_QP 0x1B
70#define FW_WROPCODE_RI_BYPASS 0x1C
71
72#define FW_WROPOCDE_RSVD 0x1E
73
74#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
75
76#define FW_WROPCODE_MNGT 0x1D
77#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
78
79/* Maximum size of a WR sent from the host, limited by the SGE.
80 *
81 * Note: WR coming from ULP or TP are only limited by CIM.
82 */
83#define FW_WR_SIZE 128
84
85/* Maximum number of outstanding WRs sent from the host. Value must be
86 * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
87 * offload modules to limit the number of WRs per connection.
88 */
89#define FW_T3_WR_NUM 16
90#define FW_N3_WR_NUM 7
91
92#ifndef N3
93# define FW_WR_NUM FW_T3_WR_NUM
94#else
95# define FW_WR_NUM FW_N3_WR_NUM
96#endif
97
98/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
99 * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
100 * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
101 *
102 * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
103 * to RESP Queue[i].
104 */
105#define FW_TUNNEL_NUM 8
106#define FW_TUNNEL_SGEEC_START 8
107#define FW_TUNNEL_TID_START 65544
108
109/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
110 * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
111 * (or 'uP Token') FW_CTRL_TID_START.
112 *
113 * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
114 */
115#define FW_CTRL_NUM 8
116#define FW_CTRL_SGEEC_START 65528
117#define FW_CTRL_TID_START 65536
118
119/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
120 * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
121 *
122 * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
123 * OFFLOAD Queues, as the host is responsible for providing the correct TID in
124 * every WR.
125 *
126 * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
127 */
128#define FW_OFLD_NUM 8
129#define FW_OFLD_SGEEC_START 0
130
131/*
132 *
133 */
134#define FW_RI_NUM 1
135#define FW_RI_SGEEC_START 65527
136#define FW_RI_TID_START 65552
137
138/*
139 * The RX_PKT_TID
140 */
141#define FW_RX_PKT_NUM 1
142#define FW_RX_PKT_TID_START 65553
143
144/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
145 * by the firmware.
146 */
147#define FW_WRC_NUM \
148 (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
149
150/*
151 * FW type and version.
152 */
153#define S_FW_VERSION_TYPE 28
154#define M_FW_VERSION_TYPE 0xF
155#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE)
156#define G_FW_VERSION_TYPE(x) \
157 (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
158
159#define S_FW_VERSION_MAJOR 16
160#define M_FW_VERSION_MAJOR 0xFFF
161#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR)
162#define G_FW_VERSION_MAJOR(x) \
163 (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
164
165#define S_FW_VERSION_MINOR 8
166#define M_FW_VERSION_MINOR 0xFF
167#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR)
168#define G_FW_VERSION_MINOR(x) \
169 (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
170
171#define S_FW_VERSION_MICRO 0
172#define M_FW_VERSION_MICRO 0xFF
173#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO)
174#define G_FW_VERSION_MICRO(x) \
175 (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
176
177#endif /* _FIRMWARE_EXPORTS_H_ */
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
new file mode 100644
index 000000000000..3c0cb8557058
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.c
@@ -0,0 +1,450 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/if.h>
36#include <linux/if_vlan.h>
37#include <linux/jhash.h>
38#include <net/neighbour.h>
39#include "common.h"
40#include "t3cdev.h"
41#include "cxgb3_defs.h"
42#include "l2t.h"
43#include "t3_cpl.h"
44#include "firmware_exports.h"
45
46#define VLAN_NONE 0xfff
47
48/*
49 * Module locking notes: There is a RW lock protecting the L2 table as a
50 * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
51 * under the protection of the table lock, individual entry changes happen
52 * while holding that entry's spinlock. The table lock nests outside the
53 * entry locks. Allocations of new entries take the table lock as writers so
54 * no other lookups can happen while allocating new entries. Entry updates
55 * take the table lock as readers so multiple entries can be updated in
56 * parallel. An L2T entry can be dropped by decrementing its reference count
57 * and therefore can happen in parallel with entry allocation but no entry
58 * can change state or increment its ref count during allocation as both of
59 * these perform lookups.
60 */
61
62static inline unsigned int vlan_prio(const struct l2t_entry *e)
63{
64 return e->vlan >> 13;
65}
66
67static inline unsigned int arp_hash(u32 key, int ifindex,
68 const struct l2t_data *d)
69{
70 return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
71}
72
73static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
74{
75 neigh_hold(n);
76 if (e->neigh)
77 neigh_release(e->neigh);
78 e->neigh = n;
79}
80
81/*
82 * Set up an L2T entry and send any packets waiting in the arp queue. The
83 * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
84 * entry locked.
85 */
86static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
87 struct l2t_entry *e)
88{
89 struct cpl_l2t_write_req *req;
90
91 if (!skb) {
92 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
93 if (!skb)
94 return -ENOMEM;
95 }
96
97 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
98 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
99 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
100 req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
101 V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
102 V_L2T_W_PRIO(vlan_prio(e)));
103 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
104 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
105 skb->priority = CPL_PRIORITY_CONTROL;
106 cxgb3_ofld_send(dev, skb);
107 while (e->arpq_head) {
108 skb = e->arpq_head;
109 e->arpq_head = skb->next;
110 skb->next = NULL;
111 cxgb3_ofld_send(dev, skb);
112 }
113 e->arpq_tail = NULL;
114 e->state = L2T_STATE_VALID;
115
116 return 0;
117}
118
119/*
120 * Add a packet to the an L2T entry's queue of packets awaiting resolution.
121 * Must be called with the entry's lock held.
122 */
123static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
124{
125 skb->next = NULL;
126 if (e->arpq_head)
127 e->arpq_tail->next = skb;
128 else
129 e->arpq_head = skb;
130 e->arpq_tail = skb;
131}
132
133int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
134 struct l2t_entry *e)
135{
136again:
137 switch (e->state) {
138 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
139 neigh_event_send(e->neigh, NULL);
140 spin_lock_bh(&e->lock);
141 if (e->state == L2T_STATE_STALE)
142 e->state = L2T_STATE_VALID;
143 spin_unlock_bh(&e->lock);
144 case L2T_STATE_VALID: /* fast-path, send the packet on */
145 return cxgb3_ofld_send(dev, skb);
146 case L2T_STATE_RESOLVING:
147 spin_lock_bh(&e->lock);
148 if (e->state != L2T_STATE_RESOLVING) {
149 /* ARP already completed */
150 spin_unlock_bh(&e->lock);
151 goto again;
152 }
153 arpq_enqueue(e, skb);
154 spin_unlock_bh(&e->lock);
155
156 /*
157 * Only the first packet added to the arpq should kick off
158 * resolution. However, because the alloc_skb below can fail,
159 * we allow each packet added to the arpq to retry resolution
160 * as a way of recovering from transient memory exhaustion.
161 * A better way would be to use a work request to retry L2T
162 * entries when there's no memory.
163 */
164 if (!neigh_event_send(e->neigh, NULL)) {
165 skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
166 GFP_ATOMIC);
167 if (!skb)
168 break;
169
170 spin_lock_bh(&e->lock);
171 if (e->arpq_head)
172 setup_l2e_send_pending(dev, skb, e);
173 else /* we lost the race */
174 __kfree_skb(skb);
175 spin_unlock_bh(&e->lock);
176 }
177 }
178 return 0;
179}
180
181EXPORT_SYMBOL(t3_l2t_send_slow);
182
183void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
184{
185again:
186 switch (e->state) {
187 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
188 neigh_event_send(e->neigh, NULL);
189 spin_lock_bh(&e->lock);
190 if (e->state == L2T_STATE_STALE) {
191 e->state = L2T_STATE_VALID;
192 }
193 spin_unlock_bh(&e->lock);
194 return;
195 case L2T_STATE_VALID: /* fast-path, send the packet on */
196 return;
197 case L2T_STATE_RESOLVING:
198 spin_lock_bh(&e->lock);
199 if (e->state != L2T_STATE_RESOLVING) {
200 /* ARP already completed */
201 spin_unlock_bh(&e->lock);
202 goto again;
203 }
204 spin_unlock_bh(&e->lock);
205
206 /*
207 * Only the first packet added to the arpq should kick off
208 * resolution. However, because the alloc_skb below can fail,
209 * we allow each packet added to the arpq to retry resolution
210 * as a way of recovering from transient memory exhaustion.
211 * A better way would be to use a work request to retry L2T
212 * entries when there's no memory.
213 */
214 neigh_event_send(e->neigh, NULL);
215 }
216 return;
217}
218
219EXPORT_SYMBOL(t3_l2t_send_event);
220
221/*
222 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
223 */
224static struct l2t_entry *alloc_l2e(struct l2t_data *d)
225{
226 struct l2t_entry *end, *e, **p;
227
228 if (!atomic_read(&d->nfree))
229 return NULL;
230
231 /* there's definitely a free entry */
232 for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
233 if (atomic_read(&e->refcnt) == 0)
234 goto found;
235
236 for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
237found:
238 d->rover = e + 1;
239 atomic_dec(&d->nfree);
240
241 /*
242 * The entry we found may be an inactive entry that is
243 * presently in the hash table. We need to remove it.
244 */
245 if (e->state != L2T_STATE_UNUSED) {
246 int hash = arp_hash(e->addr, e->ifindex, d);
247
248 for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
249 if (*p == e) {
250 *p = e->next;
251 break;
252 }
253 e->state = L2T_STATE_UNUSED;
254 }
255 return e;
256}
257
258/*
259 * Called when an L2T entry has no more users. The entry is left in the hash
260 * table since it is likely to be reused but we also bump nfree to indicate
261 * that the entry can be reallocated for a different neighbor. We also drop
262 * the existing neighbor reference in case the neighbor is going away and is
263 * waiting on our reference.
264 *
265 * Because entries can be reallocated to other neighbors once their ref count
266 * drops to 0 we need to take the entry's lock to avoid races with a new
267 * incarnation.
268 */
269void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
270{
271 spin_lock_bh(&e->lock);
272 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
273 if (e->neigh) {
274 neigh_release(e->neigh);
275 e->neigh = NULL;
276 }
277 }
278 spin_unlock_bh(&e->lock);
279 atomic_inc(&d->nfree);
280}
281
282EXPORT_SYMBOL(t3_l2e_free);
283
284/*
285 * Update an L2T entry that was previously used for the same next hop as neigh.
286 * Must be called with softirqs disabled.
287 */
288static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
289{
290 unsigned int nud_state;
291
292 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
293
294 if (neigh != e->neigh)
295 neigh_replace(e, neigh);
296 nud_state = neigh->nud_state;
297 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
298 !(nud_state & NUD_VALID))
299 e->state = L2T_STATE_RESOLVING;
300 else if (nud_state & NUD_CONNECTED)
301 e->state = L2T_STATE_VALID;
302 else
303 e->state = L2T_STATE_STALE;
304 spin_unlock(&e->lock);
305}
306
307struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
308 struct net_device *dev)
309{
310 struct l2t_entry *e;
311 struct l2t_data *d = L2DATA(cdev);
312 u32 addr = *(u32 *) neigh->primary_key;
313 int ifidx = neigh->dev->ifindex;
314 int hash = arp_hash(addr, ifidx, d);
315 struct port_info *p = netdev_priv(dev);
316 int smt_idx = p->port_id;
317
318 write_lock_bh(&d->lock);
319 for (e = d->l2tab[hash].first; e; e = e->next)
320 if (e->addr == addr && e->ifindex == ifidx &&
321 e->smt_idx == smt_idx) {
322 l2t_hold(d, e);
323 if (atomic_read(&e->refcnt) == 1)
324 reuse_entry(e, neigh);
325 goto done;
326 }
327
328 /* Need to allocate a new entry */
329 e = alloc_l2e(d);
330 if (e) {
331 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
332 e->next = d->l2tab[hash].first;
333 d->l2tab[hash].first = e;
334 e->state = L2T_STATE_RESOLVING;
335 e->addr = addr;
336 e->ifindex = ifidx;
337 e->smt_idx = smt_idx;
338 atomic_set(&e->refcnt, 1);
339 neigh_replace(e, neigh);
340 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
341 e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
342 else
343 e->vlan = VLAN_NONE;
344 spin_unlock(&e->lock);
345 }
346done:
347 write_unlock_bh(&d->lock);
348 return e;
349}
350
351EXPORT_SYMBOL(t3_l2t_get);
352
353/*
354 * Called when address resolution fails for an L2T entry to handle packets
355 * on the arpq head. If a packet specifies a failure handler it is invoked,
356 * otherwise the packets is sent to the offload device.
357 *
358 * XXX: maybe we should abandon the latter behavior and just require a failure
359 * handler.
360 */
361static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
362{
363 while (arpq) {
364 struct sk_buff *skb = arpq;
365 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
366
367 arpq = skb->next;
368 skb->next = NULL;
369 if (cb->arp_failure_handler)
370 cb->arp_failure_handler(dev, skb);
371 else
372 cxgb3_ofld_send(dev, skb);
373 }
374}
375
376/*
377 * Called when the host's ARP layer makes a change to some entry that is
378 * loaded into the HW L2 table.
379 */
380void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
381{
382 struct l2t_entry *e;
383 struct sk_buff *arpq = NULL;
384 struct l2t_data *d = L2DATA(dev);
385 u32 addr = *(u32 *) neigh->primary_key;
386 int ifidx = neigh->dev->ifindex;
387 int hash = arp_hash(addr, ifidx, d);
388
389 read_lock_bh(&d->lock);
390 for (e = d->l2tab[hash].first; e; e = e->next)
391 if (e->addr == addr && e->ifindex == ifidx) {
392 spin_lock(&e->lock);
393 goto found;
394 }
395 read_unlock_bh(&d->lock);
396 return;
397
398found:
399 read_unlock(&d->lock);
400 if (atomic_read(&e->refcnt)) {
401 if (neigh != e->neigh)
402 neigh_replace(e, neigh);
403
404 if (e->state == L2T_STATE_RESOLVING) {
405 if (neigh->nud_state & NUD_FAILED) {
406 arpq = e->arpq_head;
407 e->arpq_head = e->arpq_tail = NULL;
408 } else if (neigh_is_connected(neigh))
409 setup_l2e_send_pending(dev, NULL, e);
410 } else {
411 e->state = neigh_is_connected(neigh) ?
412 L2T_STATE_VALID : L2T_STATE_STALE;
413 if (memcmp(e->dmac, neigh->ha, 6))
414 setup_l2e_send_pending(dev, NULL, e);
415 }
416 }
417 spin_unlock_bh(&e->lock);
418
419 if (arpq)
420 handle_failed_resolution(dev, arpq);
421}
422
423struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
424{
425 struct l2t_data *d;
426 int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
427
428 d = cxgb_alloc_mem(size);
429 if (!d)
430 return NULL;
431
432 d->nentries = l2t_capacity;
433 d->rover = &d->l2tab[1]; /* entry 0 is not used */
434 atomic_set(&d->nfree, l2t_capacity - 1);
435 rwlock_init(&d->lock);
436
437 for (i = 0; i < l2t_capacity; ++i) {
438 d->l2tab[i].idx = i;
439 d->l2tab[i].state = L2T_STATE_UNUSED;
440 spin_lock_init(&d->l2tab[i].lock);
441 atomic_set(&d->l2tab[i].refcnt, 0);
442 }
443 return d;
444}
445
446void t3_free_l2t(struct l2t_data *d)
447{
448 cxgb_free_mem(d);
449}
450
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
new file mode 100644
index 000000000000..ba5d2cbd7241
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.h
@@ -0,0 +1,143 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CHELSIO_L2T_H
34#define _CHELSIO_L2T_H
35
36#include <linux/spinlock.h>
37#include "t3cdev.h"
38#include <asm/atomic.h>
39
40enum {
41 L2T_STATE_VALID, /* entry is up to date */
42 L2T_STATE_STALE, /* entry may be used but needs revalidation */
43 L2T_STATE_RESOLVING, /* entry needs address resolution */
44 L2T_STATE_UNUSED /* entry not in use */
45};
46
47struct neighbour;
48struct sk_buff;
49
50/*
51 * Each L2T entry plays multiple roles. First of all, it keeps state for the
52 * corresponding entry of the HW L2 table and maintains a queue of offload
53 * packets awaiting address resolution. Second, it is a node of a hash table
54 * chain, where the nodes of the chain are linked together through their next
55 * pointer. Finally, each node is a bucket of a hash table, pointing to the
56 * first element in its chain through its first pointer.
57 */
58struct l2t_entry {
59 u16 state; /* entry state */
60 u16 idx; /* entry index */
61 u32 addr; /* dest IP address */
62 int ifindex; /* neighbor's net_device's ifindex */
63 u16 smt_idx; /* SMT index */
64 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
65 struct neighbour *neigh; /* associated neighbour */
66 struct l2t_entry *first; /* start of hash chain */
67 struct l2t_entry *next; /* next l2t_entry on chain */
68 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
69 struct sk_buff *arpq_tail;
70 spinlock_t lock;
71 atomic_t refcnt; /* entry reference count */
72 u8 dmac[6]; /* neighbour's MAC address */
73};
74
75struct l2t_data {
76 unsigned int nentries; /* number of entries */
77 struct l2t_entry *rover; /* starting point for next allocation */
78 atomic_t nfree; /* number of free entries */
79 rwlock_t lock;
80 struct l2t_entry l2tab[0];
81};
82
83typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
84 struct sk_buff * skb);
85
86/*
87 * Callback stored in an skb to handle address resolution failure.
88 */
89struct l2t_skb_cb {
90 arp_failure_handler_func arp_failure_handler;
91};
92
93#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
94
95static inline void set_arp_failure_handler(struct sk_buff *skb,
96 arp_failure_handler_func hnd)
97{
98 L2T_SKB_CB(skb)->arp_failure_handler = hnd;
99}
100
101/*
102 * Getting to the L2 data from an offload device.
103 */
104#define L2DATA(dev) ((dev)->l2opt)
105
106#define W_TCB_L2T_IX 0
107#define S_TCB_L2T_IX 7
108#define M_TCB_L2T_IX 0x7ffULL
109#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
110
111void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
112void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
113struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
114 struct net_device *dev);
115int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
116 struct l2t_entry *e);
117void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
118struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
119void t3_free_l2t(struct l2t_data *d);
120
121int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
122
123static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
124 struct l2t_entry *e)
125{
126 if (likely(e->state == L2T_STATE_VALID))
127 return cxgb3_ofld_send(dev, skb);
128 return t3_l2t_send_slow(dev, skb, e);
129}
130
131static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
132{
133 if (atomic_dec_and_test(&e->refcnt))
134 t3_l2e_free(d, e);
135}
136
137static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
138{
139 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
140 atomic_dec(&d->nfree);
141}
142
143#endif
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
new file mode 100644
index 000000000000..644d62ea86a6
--- /dev/null
+++ b/drivers/net/cxgb3/mc5.c
@@ -0,0 +1,473 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35enum {
36 IDT75P52100 = 4,
37 IDT75N43102 = 5
38};
39
40/* DBGI command mode */
41enum {
42 DBGI_MODE_MBUS = 0,
43 DBGI_MODE_IDT52100 = 5
44};
45
46/* IDT 75P52100 commands */
47#define IDT_CMD_READ 0
48#define IDT_CMD_WRITE 1
49#define IDT_CMD_SEARCH 2
50#define IDT_CMD_LEARN 3
51
52/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
53#define IDT_LAR_ADR0 0x180006
54#define IDT_LAR_MODE144 0xffff0000
55
56/* IDT SCR and SSR addresses (low 32 bits) */
57#define IDT_SCR_ADR0 0x180000
58#define IDT_SSR0_ADR0 0x180002
59#define IDT_SSR1_ADR0 0x180004
60
61/* IDT GMR base address (low 32 bits) */
62#define IDT_GMR_BASE_ADR0 0x180020
63
64/* IDT data and mask array base addresses (low 32 bits) */
65#define IDT_DATARY_BASE_ADR0 0
66#define IDT_MSKARY_BASE_ADR0 0x80000
67
68/* IDT 75N43102 commands */
69#define IDT4_CMD_SEARCH144 3
70#define IDT4_CMD_WRITE 4
71#define IDT4_CMD_READ 5
72
73/* IDT 75N43102 SCR address (low 32 bits) */
74#define IDT4_SCR_ADR0 0x3
75
76/* IDT 75N43102 GMR base addresses (low 32 bits) */
77#define IDT4_GMR_BASE0 0x10
78#define IDT4_GMR_BASE1 0x20
79#define IDT4_GMR_BASE2 0x30
80
81/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
82#define IDT4_DATARY_BASE_ADR0 0x1000000
83#define IDT4_MSKARY_BASE_ADR0 0x2000000
84
85#define MAX_WRITE_ATTEMPTS 5
86
87#define MAX_ROUTES 2048
88
89/*
90 * Issue a command to the TCAM and wait for its completion. The address and
91 * any data required by the command must have been setup by the caller.
92 */
93static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
94{
95 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
96 return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
97 F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
98}
99
100static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
101 u32 v3)
102{
103 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
104 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
105 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
106}
107
108static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
109 u32 v3)
110{
111 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
112 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
113 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
114}
115
116static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
117 u32 *v3)
118{
119 *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
120 *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
121 *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
122}
123
124/*
125 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
126 * command cmd. The data to be written must have been set up by the caller.
127 * Returns -1 on failure, 0 on success.
128 */
129static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
130{
131 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
132 if (mc5_cmd_write(adapter, cmd) == 0)
133 return 0;
134 CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
135 addr_lo);
136 return -1;
137}
138
139static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
140 u32 data_array_base, u32 write_cmd,
141 int addr_shift)
142{
143 unsigned int i;
144 struct adapter *adap = mc5->adapter;
145
146 /*
147 * We need the size of the TCAM data and mask arrays in terms of
148 * 72-bit entries.
149 */
150 unsigned int size72 = mc5->tcam_size;
151 unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
152
153 if (mc5->mode == MC5_MODE_144_BIT) {
154 size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
155 server_base *= 2;
156 }
157
158 /* Clear the data array */
159 dbgi_wr_data3(adap, 0, 0, 0);
160 for (i = 0; i < size72; i++)
161 if (mc5_write(adap, data_array_base + (i << addr_shift),
162 write_cmd))
163 return -1;
164
165 /* Initialize the mask array. */
166 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
167 for (i = 0; i < size72; i++) {
168 if (i == server_base) /* entering server or routing region */
169 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
170 mc5->mode == MC5_MODE_144_BIT ?
171 0xfffffff9 : 0xfffffffd);
172 if (mc5_write(adap, mask_array_base + (i << addr_shift),
173 write_cmd))
174 return -1;
175 }
176 return 0;
177}
178
179static int init_idt52100(struct mc5 *mc5)
180{
181 int i;
182 struct adapter *adap = mc5->adapter;
183
184 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
185 V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
186 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
187
188 /*
189 * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
190 * GMRs 8-9 for ACK- and AOPEN searches.
191 */
192 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
193 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
194 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
195 t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
196 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
197 t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
198 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
199 t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
200 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
201 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
202 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
203 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
204
205 /* Set DBGI command mode for IDT TCAM. */
206 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
207
208 /* Set up LAR */
209 dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
210 if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
211 goto err;
212
213 /* Set up SSRs */
214 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
215 if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
216 mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
217 goto err;
218
219 /* Set up GMRs */
220 for (i = 0; i < 32; ++i) {
221 if (i >= 12 && i < 15)
222 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
223 else if (i == 15)
224 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
225 else
226 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
227
228 if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
229 goto err;
230 }
231
232 /* Set up SCR */
233 dbgi_wr_data3(adap, 1, 0, 0);
234 if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
235 goto err;
236
237 return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
238 IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
239err:
240 return -EIO;
241}
242
243static int init_idt43102(struct mc5 *mc5)
244{
245 int i;
246 struct adapter *adap = mc5->adapter;
247
248 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
249 adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
250 V_RDLAT(0xd) | V_SRCHLAT(0x12));
251
252 /*
253 * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
254 * for ACK- and AOPEN searches.
255 */
256 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
257 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
258 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
259 IDT4_CMD_SEARCH144 | 0x3800);
260 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
261 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
262 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
263 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
264 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
265 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
266
267 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
268
269 /* Set DBGI command mode for IDT TCAM. */
270 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
271
272 /* Set up GMRs */
273 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
274 for (i = 0; i < 7; ++i)
275 if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
276 goto err;
277
278 for (i = 0; i < 4; ++i)
279 if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
280 goto err;
281
282 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
283 if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
284 mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
285 mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
286 goto err;
287
288 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
289 if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
290 goto err;
291
292 /* Set up SCR */
293 dbgi_wr_data3(adap, 0xf0000000, 0, 0);
294 if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
295 goto err;
296
297 return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
298 IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
299err:
300 return -EIO;
301}
302
303/* Put MC5 in DBGI mode. */
304static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
305{
306 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
307 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
308}
309
310/* Put MC5 in M-Bus mode. */
311static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
312{
313 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
314 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
315 V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
316 V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
317}
318
319/*
320 * Initialization that requires the OS and protocol layers to already
321 * be intialized goes here.
322 */
323int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
324 unsigned int nroutes)
325{
326 u32 cfg;
327 int err;
328 unsigned int tcam_size = mc5->tcam_size;
329 struct adapter *adap = mc5->adapter;
330
331 if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
332 return -EINVAL;
333
334 /* Reset the TCAM */
335 cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
336 cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
337 t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
338 if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
339 CH_ERR(adap, "TCAM reset timed out\n");
340 return -1;
341 }
342
343 t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
344 t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
345 tcam_size - nroutes - nfilters);
346 t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
347 tcam_size - nroutes - nfilters - nservers);
348
349 mc5->parity_enabled = 1;
350
351 /* All the TCAM addresses we access have only the low 32 bits non 0 */
352 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
353 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
354
355 mc5_dbgi_mode_enable(mc5);
356
357 switch (mc5->part_type) {
358 case IDT75P52100:
359 err = init_idt52100(mc5);
360 break;
361 case IDT75N43102:
362 err = init_idt43102(mc5);
363 break;
364 default:
365 CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
366 err = -EINVAL;
367 break;
368 }
369
370 mc5_dbgi_mode_disable(mc5);
371 return err;
372}
373
374/*
375 * read_mc5_range - dump a part of the memory managed by MC5
376 * @mc5: the MC5 handle
377 * @start: the start address for the dump
378 * @n: number of 72-bit words to read
379 * @buf: result buffer
380 *
381 * Read n 72-bit words from MC5 memory from the given start location.
382 */
383int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
384 unsigned int n, u32 *buf)
385{
386 u32 read_cmd;
387 int err = 0;
388 struct adapter *adap = mc5->adapter;
389
390 if (mc5->part_type == IDT75P52100)
391 read_cmd = IDT_CMD_READ;
392 else if (mc5->part_type == IDT75N43102)
393 read_cmd = IDT4_CMD_READ;
394 else
395 return -EINVAL;
396
397 mc5_dbgi_mode_enable(mc5);
398
399 while (n--) {
400 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
401 if (mc5_cmd_write(adap, read_cmd)) {
402 err = -EIO;
403 break;
404 }
405 dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
406 buf += 3;
407 }
408
409 mc5_dbgi_mode_disable(mc5);
410 return 0;
411}
412
413#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
414
415/*
416 * MC5 interrupt handler
417 */
418void t3_mc5_intr_handler(struct mc5 *mc5)
419{
420 struct adapter *adap = mc5->adapter;
421 u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
422
423 if ((cause & F_PARITYERR) && mc5->parity_enabled) {
424 CH_ALERT(adap, "MC5 parity error\n");
425 mc5->stats.parity_err++;
426 }
427
428 if (cause & F_REQQPARERR) {
429 CH_ALERT(adap, "MC5 request queue parity error\n");
430 mc5->stats.reqq_parity_err++;
431 }
432
433 if (cause & F_DISPQPARERR) {
434 CH_ALERT(adap, "MC5 dispatch queue parity error\n");
435 mc5->stats.dispq_parity_err++;
436 }
437
438 if (cause & F_ACTRGNFULL)
439 mc5->stats.active_rgn_full++;
440 if (cause & F_NFASRCHFAIL)
441 mc5->stats.nfa_srch_err++;
442 if (cause & F_UNKNOWNCMD)
443 mc5->stats.unknown_cmd++;
444 if (cause & F_DELACTEMPTY)
445 mc5->stats.del_act_empty++;
446 if (cause & MC5_INT_FATAL)
447 t3_fatal_err(adap);
448
449 t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
450}
451
452void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
453{
454#define K * 1024
455
456 static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
457 64 K, 128 K, 256 K, 32 K
458 };
459
460#undef K
461
462 u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
463
464 mc5->adapter = adapter;
465 mc5->mode = (unsigned char)mode;
466 mc5->part_type = (unsigned char)G_TMTYPE(cfg);
467 if (cfg & F_TMTYPEHI)
468 mc5->part_type |= 4;
469
470 mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
471 if (mode == MC5_MODE_144_BIT)
472 mc5->tcam_size /= 2;
473}
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
new file mode 100644
index 000000000000..b56c5f52bcdc
--- /dev/null
+++ b/drivers/net/cxgb3/regs.h
@@ -0,0 +1,2195 @@
1#define A_SG_CONTROL 0x0
2
3#define S_DROPPKT 20
4#define V_DROPPKT(x) ((x) << S_DROPPKT)
5#define F_DROPPKT V_DROPPKT(1U)
6
7#define S_EGRGENCTRL 19
8#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
9#define F_EGRGENCTRL V_EGRGENCTRL(1U)
10
11#define S_USERSPACESIZE 14
12#define M_USERSPACESIZE 0x1f
13#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
14
15#define S_HOSTPAGESIZE 11
16#define M_HOSTPAGESIZE 0x7
17#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
18
19#define S_FLMODE 9
20#define V_FLMODE(x) ((x) << S_FLMODE)
21#define F_FLMODE V_FLMODE(1U)
22
23#define S_PKTSHIFT 6
24#define M_PKTSHIFT 0x7
25#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
26
27#define S_ONEINTMULTQ 5
28#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
29#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
30
31#define S_BIGENDIANINGRESS 2
32#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
33#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
34
35#define S_ISCSICOALESCING 1
36#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
37#define F_ISCSICOALESCING V_ISCSICOALESCING(1U)
38
39#define S_GLOBALENABLE 0
40#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
41#define F_GLOBALENABLE V_GLOBALENABLE(1U)
42
43#define S_AVOIDCQOVFL 24
44#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
45#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
46
47#define S_OPTONEINTMULTQ 23
48#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
49#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
50
51#define S_CQCRDTCTRL 22
52#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
53#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
54
55#define A_SG_KDOORBELL 0x4
56
57#define S_SELEGRCNTX 31
58#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
59#define F_SELEGRCNTX V_SELEGRCNTX(1U)
60
61#define S_EGRCNTX 0
62#define M_EGRCNTX 0xffff
63#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
64
65#define A_SG_GTS 0x8
66
67#define S_RSPQ 29
68#define M_RSPQ 0x7
69#define V_RSPQ(x) ((x) << S_RSPQ)
70#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
71
72#define S_NEWTIMER 16
73#define M_NEWTIMER 0x1fff
74#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
75
76#define S_NEWINDEX 0
77#define M_NEWINDEX 0xffff
78#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
79
80#define A_SG_CONTEXT_CMD 0xc
81
82#define S_CONTEXT_CMD_OPCODE 28
83#define M_CONTEXT_CMD_OPCODE 0xf
84#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
85
86#define S_CONTEXT_CMD_BUSY 27
87#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
88#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
89
90#define S_CQ_CREDIT 20
91
92#define M_CQ_CREDIT 0x7f
93
94#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
95
96#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
97
98#define S_CQ 19
99
100#define V_CQ(x) ((x) << S_CQ)
101#define F_CQ V_CQ(1U)
102
103#define S_RESPONSEQ 18
104#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
105#define F_RESPONSEQ V_RESPONSEQ(1U)
106
107#define S_EGRESS 17
108#define V_EGRESS(x) ((x) << S_EGRESS)
109#define F_EGRESS V_EGRESS(1U)
110
111#define S_FREELIST 16
112#define V_FREELIST(x) ((x) << S_FREELIST)
113#define F_FREELIST V_FREELIST(1U)
114
115#define S_CONTEXT 0
116#define M_CONTEXT 0xffff
117#define V_CONTEXT(x) ((x) << S_CONTEXT)
118
119#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
120
121#define A_SG_CONTEXT_DATA0 0x10
122
123#define A_SG_CONTEXT_DATA1 0x14
124
125#define A_SG_CONTEXT_DATA2 0x18
126
127#define A_SG_CONTEXT_DATA3 0x1c
128
129#define A_SG_CONTEXT_MASK0 0x20
130
131#define A_SG_CONTEXT_MASK1 0x24
132
133#define A_SG_CONTEXT_MASK2 0x28
134
135#define A_SG_CONTEXT_MASK3 0x2c
136
137#define A_SG_RSPQ_CREDIT_RETURN 0x30
138
139#define S_CREDITS 0
140#define M_CREDITS 0xffff
141#define V_CREDITS(x) ((x) << S_CREDITS)
142
143#define A_SG_DATA_INTR 0x34
144
145#define S_ERRINTR 31
146#define V_ERRINTR(x) ((x) << S_ERRINTR)
147#define F_ERRINTR V_ERRINTR(1U)
148
149#define A_SG_HI_DRB_HI_THRSH 0x38
150
151#define A_SG_HI_DRB_LO_THRSH 0x3c
152
153#define A_SG_LO_DRB_HI_THRSH 0x40
154
155#define A_SG_LO_DRB_LO_THRSH 0x44
156
157#define A_SG_RSPQ_FL_STATUS 0x4c
158
159#define S_RSPQ0DISABLED 8
160
161#define A_SG_EGR_RCQ_DRB_THRSH 0x54
162
163#define S_HIRCQDRBTHRSH 16
164#define M_HIRCQDRBTHRSH 0x7ff
165#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
166
167#define S_LORCQDRBTHRSH 0
168#define M_LORCQDRBTHRSH 0x7ff
169#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
170
171#define A_SG_EGR_CNTX_BADDR 0x58
172
173#define A_SG_INT_CAUSE 0x5c
174
175#define S_RSPQDISABLED 3
176#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
177#define F_RSPQDISABLED V_RSPQDISABLED(1U)
178
179#define S_RSPQCREDITOVERFOW 2
180#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
181#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
182
183#define A_SG_INT_ENABLE 0x60
184
185#define A_SG_CMDQ_CREDIT_TH 0x64
186
187#define S_TIMEOUT 8
188#define M_TIMEOUT 0xffffff
189#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
190
191#define S_THRESHOLD 0
192#define M_THRESHOLD 0xff
193#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
194
195#define A_SG_TIMER_TICK 0x68
196
197#define A_SG_CQ_CONTEXT_BADDR 0x6c
198
199#define A_SG_OCO_BASE 0x70
200
201#define S_BASE1 16
202#define M_BASE1 0xffff
203#define V_BASE1(x) ((x) << S_BASE1)
204
205#define A_SG_DRB_PRI_THRESH 0x74
206
207#define A_PCIX_INT_ENABLE 0x80
208
209#define S_MSIXPARERR 22
210#define M_MSIXPARERR 0x7
211
212#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
213
214#define S_CFPARERR 18
215#define M_CFPARERR 0xf
216
217#define V_CFPARERR(x) ((x) << S_CFPARERR)
218
219#define S_RFPARERR 14
220#define M_RFPARERR 0xf
221
222#define V_RFPARERR(x) ((x) << S_RFPARERR)
223
224#define S_WFPARERR 12
225#define M_WFPARERR 0x3
226
227#define V_WFPARERR(x) ((x) << S_WFPARERR)
228
229#define S_PIOPARERR 11
230#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
231#define F_PIOPARERR V_PIOPARERR(1U)
232
233#define S_DETUNCECCERR 10
234#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
235#define F_DETUNCECCERR V_DETUNCECCERR(1U)
236
237#define S_DETCORECCERR 9
238#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
239#define F_DETCORECCERR V_DETCORECCERR(1U)
240
241#define S_RCVSPLCMPERR 8
242#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
243#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U)
244
245#define S_UNXSPLCMP 7
246#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
247#define F_UNXSPLCMP V_UNXSPLCMP(1U)
248
249#define S_SPLCMPDIS 6
250#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
251#define F_SPLCMPDIS V_SPLCMPDIS(1U)
252
253#define S_DETPARERR 5
254#define V_DETPARERR(x) ((x) << S_DETPARERR)
255#define F_DETPARERR V_DETPARERR(1U)
256
257#define S_SIGSYSERR 4
258#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
259#define F_SIGSYSERR V_SIGSYSERR(1U)
260
261#define S_RCVMSTABT 3
262#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
263#define F_RCVMSTABT V_RCVMSTABT(1U)
264
265#define S_RCVTARABT 2
266#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
267#define F_RCVTARABT V_RCVTARABT(1U)
268
269#define S_SIGTARABT 1
270#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
271#define F_SIGTARABT V_SIGTARABT(1U)
272
273#define S_MSTDETPARERR 0
274#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
275#define F_MSTDETPARERR V_MSTDETPARERR(1U)
276
277#define A_PCIX_INT_CAUSE 0x84
278
279#define A_PCIX_CFG 0x88
280
281#define S_CLIDECEN 18
282#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
283#define F_CLIDECEN V_CLIDECEN(1U)
284
285#define A_PCIX_MODE 0x8c
286
287#define S_PCLKRANGE 6
288#define M_PCLKRANGE 0x3
289#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
290#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
291
292#define S_PCIXINITPAT 2
293#define M_PCIXINITPAT 0xf
294#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
295#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
296
297#define S_64BIT 0
298#define V_64BIT(x) ((x) << S_64BIT)
299#define F_64BIT V_64BIT(1U)
300
301#define A_PCIE_INT_ENABLE 0x80
302
303#define S_BISTERR 15
304#define M_BISTERR 0xff
305
306#define V_BISTERR(x) ((x) << S_BISTERR)
307
308#define S_PCIE_MSIXPARERR 12
309#define M_PCIE_MSIXPARERR 0x7
310
311#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
312
313#define S_PCIE_CFPARERR 11
314#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
315#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U)
316
317#define S_PCIE_RFPARERR 10
318#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
319#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U)
320
321#define S_PCIE_WFPARERR 9
322#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
323#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U)
324
325#define S_PCIE_PIOPARERR 8
326#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
327#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U)
328
329#define S_UNXSPLCPLERRC 7
330#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
331#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U)
332
333#define S_UNXSPLCPLERRR 6
334#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
335#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
336
337#define S_PEXERR 0
338#define V_PEXERR(x) ((x) << S_PEXERR)
339#define F_PEXERR V_PEXERR(1U)
340
341#define A_PCIE_INT_CAUSE 0x84
342
343#define A_PCIE_CFG 0x88
344
345#define S_PCIE_CLIDECEN 16
346#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
347#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
348
349#define S_CRSTWRMMODE 0
350#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
351#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
352
353#define A_PCIE_MODE 0x8c
354
355#define S_NUMFSTTRNSEQRX 10
356#define M_NUMFSTTRNSEQRX 0xff
357#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
358#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
359
360#define A_PCIE_PEX_CTRL0 0x98
361
362#define S_NUMFSTTRNSEQ 22
363#define M_NUMFSTTRNSEQ 0xff
364#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
365#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
366
367#define S_REPLAYLMT 2
368#define M_REPLAYLMT 0xfffff
369
370#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
371
372#define A_PCIE_PEX_CTRL1 0x9c
373
374#define S_T3A_ACKLAT 0
375#define M_T3A_ACKLAT 0x7ff
376
377#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
378
379#define S_ACKLAT 0
380#define M_ACKLAT 0x1fff
381
382#define V_ACKLAT(x) ((x) << S_ACKLAT)
383
384#define A_PCIE_PEX_ERR 0xa4
385
386#define A_T3DBG_GPIO_EN 0xd0
387
388#define S_GPIO11_OEN 27
389#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
390#define F_GPIO11_OEN V_GPIO11_OEN(1U)
391
392#define S_GPIO10_OEN 26
393#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
394#define F_GPIO10_OEN V_GPIO10_OEN(1U)
395
396#define S_GPIO7_OEN 23
397#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
398#define F_GPIO7_OEN V_GPIO7_OEN(1U)
399
400#define S_GPIO6_OEN 22
401#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
402#define F_GPIO6_OEN V_GPIO6_OEN(1U)
403
404#define S_GPIO5_OEN 21
405#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
406#define F_GPIO5_OEN V_GPIO5_OEN(1U)
407
408#define S_GPIO4_OEN 20
409#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
410#define F_GPIO4_OEN V_GPIO4_OEN(1U)
411
412#define S_GPIO2_OEN 18
413#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
414#define F_GPIO2_OEN V_GPIO2_OEN(1U)
415
416#define S_GPIO1_OEN 17
417#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
418#define F_GPIO1_OEN V_GPIO1_OEN(1U)
419
420#define S_GPIO0_OEN 16
421#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
422#define F_GPIO0_OEN V_GPIO0_OEN(1U)
423
424#define S_GPIO10_OUT_VAL 10
425#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
426#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
427
428#define S_GPIO7_OUT_VAL 7
429#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
430#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
431
432#define S_GPIO6_OUT_VAL 6
433#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
434#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
435
436#define S_GPIO5_OUT_VAL 5
437#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
438#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
439
440#define S_GPIO4_OUT_VAL 4
441#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
442#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
443
444#define S_GPIO2_OUT_VAL 2
445#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
446#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
447
448#define S_GPIO1_OUT_VAL 1
449#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
450#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
451
452#define S_GPIO0_OUT_VAL 0
453#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
454#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
455
456#define A_T3DBG_INT_ENABLE 0xd8
457
458#define S_GPIO11 11
459#define V_GPIO11(x) ((x) << S_GPIO11)
460#define F_GPIO11 V_GPIO11(1U)
461
462#define S_GPIO10 10
463#define V_GPIO10(x) ((x) << S_GPIO10)
464#define F_GPIO10 V_GPIO10(1U)
465
466#define S_GPIO7 7
467#define V_GPIO7(x) ((x) << S_GPIO7)
468#define F_GPIO7 V_GPIO7(1U)
469
470#define S_GPIO6 6
471#define V_GPIO6(x) ((x) << S_GPIO6)
472#define F_GPIO6 V_GPIO6(1U)
473
474#define S_GPIO5 5
475#define V_GPIO5(x) ((x) << S_GPIO5)
476#define F_GPIO5 V_GPIO5(1U)
477
478#define S_GPIO4 4
479#define V_GPIO4(x) ((x) << S_GPIO4)
480#define F_GPIO4 V_GPIO4(1U)
481
482#define S_GPIO3 3
483#define V_GPIO3(x) ((x) << S_GPIO3)
484#define F_GPIO3 V_GPIO3(1U)
485
486#define S_GPIO2 2
487#define V_GPIO2(x) ((x) << S_GPIO2)
488#define F_GPIO2 V_GPIO2(1U)
489
490#define S_GPIO1 1
491#define V_GPIO1(x) ((x) << S_GPIO1)
492#define F_GPIO1 V_GPIO1(1U)
493
494#define S_GPIO0 0
495#define V_GPIO0(x) ((x) << S_GPIO0)
496#define F_GPIO0 V_GPIO0(1U)
497
498#define A_T3DBG_INT_CAUSE 0xdc
499
500#define A_T3DBG_GPIO_ACT_LOW 0xf0
501
502#define MC7_PMRX_BASE_ADDR 0x100
503
504#define A_MC7_CFG 0x100
505
506#define S_IFEN 13
507#define V_IFEN(x) ((x) << S_IFEN)
508#define F_IFEN V_IFEN(1U)
509
510#define S_TERM150 11
511#define V_TERM150(x) ((x) << S_TERM150)
512#define F_TERM150 V_TERM150(1U)
513
514#define S_SLOW 10
515#define V_SLOW(x) ((x) << S_SLOW)
516#define F_SLOW V_SLOW(1U)
517
518#define S_WIDTH 8
519#define M_WIDTH 0x3
520#define V_WIDTH(x) ((x) << S_WIDTH)
521#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
522
523#define S_BKS 6
524#define V_BKS(x) ((x) << S_BKS)
525#define F_BKS V_BKS(1U)
526
527#define S_ORG 5
528#define V_ORG(x) ((x) << S_ORG)
529#define F_ORG V_ORG(1U)
530
531#define S_DEN 2
532#define M_DEN 0x7
533#define V_DEN(x) ((x) << S_DEN)
534#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
535
536#define S_RDY 1
537#define V_RDY(x) ((x) << S_RDY)
538#define F_RDY V_RDY(1U)
539
540#define S_CLKEN 0
541#define V_CLKEN(x) ((x) << S_CLKEN)
542#define F_CLKEN V_CLKEN(1U)
543
544#define A_MC7_MODE 0x104
545
546#define S_BUSY 31
547#define V_BUSY(x) ((x) << S_BUSY)
548#define F_BUSY V_BUSY(1U)
549
550#define S_BUSY 31
551#define V_BUSY(x) ((x) << S_BUSY)
552#define F_BUSY V_BUSY(1U)
553
554#define A_MC7_EXT_MODE1 0x108
555
556#define A_MC7_EXT_MODE2 0x10c
557
558#define A_MC7_EXT_MODE3 0x110
559
560#define A_MC7_PRE 0x114
561
562#define A_MC7_REF 0x118
563
564#define S_PREREFDIV 1
565#define M_PREREFDIV 0x3fff
566#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
567
568#define S_PERREFEN 0
569#define V_PERREFEN(x) ((x) << S_PERREFEN)
570#define F_PERREFEN V_PERREFEN(1U)
571
572#define A_MC7_DLL 0x11c
573
574#define S_DLLENB 1
575#define V_DLLENB(x) ((x) << S_DLLENB)
576#define F_DLLENB V_DLLENB(1U)
577
578#define S_DLLRST 0
579#define V_DLLRST(x) ((x) << S_DLLRST)
580#define F_DLLRST V_DLLRST(1U)
581
582#define A_MC7_PARM 0x120
583
584#define S_ACTTOPREDLY 26
585#define M_ACTTOPREDLY 0xf
586#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
587
588#define S_ACTTORDWRDLY 23
589#define M_ACTTORDWRDLY 0x7
590#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
591
592#define S_PRECYC 20
593#define M_PRECYC 0x7
594#define V_PRECYC(x) ((x) << S_PRECYC)
595
596#define S_REFCYC 13
597#define M_REFCYC 0x7f
598#define V_REFCYC(x) ((x) << S_REFCYC)
599
600#define S_BKCYC 8
601#define M_BKCYC 0x1f
602#define V_BKCYC(x) ((x) << S_BKCYC)
603
604#define S_WRTORDDLY 4
605#define M_WRTORDDLY 0xf
606#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
607
608#define S_RDTOWRDLY 0
609#define M_RDTOWRDLY 0xf
610#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
611
612#define A_MC7_CAL 0x128
613
614#define S_BUSY 31
615#define V_BUSY(x) ((x) << S_BUSY)
616#define F_BUSY V_BUSY(1U)
617
618#define S_BUSY 31
619#define V_BUSY(x) ((x) << S_BUSY)
620#define F_BUSY V_BUSY(1U)
621
622#define S_CAL_FAULT 30
623#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
624#define F_CAL_FAULT V_CAL_FAULT(1U)
625
626#define S_SGL_CAL_EN 20
627#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
628#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
629
630#define A_MC7_ERR_ADDR 0x12c
631
632#define A_MC7_ECC 0x130
633
634#define S_ECCCHKEN 1
635#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
636#define F_ECCCHKEN V_ECCCHKEN(1U)
637
638#define S_ECCGENEN 0
639#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
640#define F_ECCGENEN V_ECCGENEN(1U)
641
642#define A_MC7_CE_ADDR 0x134
643
644#define A_MC7_CE_DATA0 0x138
645
646#define A_MC7_CE_DATA1 0x13c
647
648#define A_MC7_CE_DATA2 0x140
649
650#define S_DATA 0
651#define M_DATA 0xff
652
653#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
654
655#define A_MC7_UE_ADDR 0x144
656
657#define A_MC7_UE_DATA0 0x148
658
659#define A_MC7_UE_DATA1 0x14c
660
661#define A_MC7_UE_DATA2 0x150
662
663#define A_MC7_BD_ADDR 0x154
664
665#define S_ADDR 3
666
667#define M_ADDR 0x1fffffff
668
669#define A_MC7_BD_DATA0 0x158
670
671#define A_MC7_BD_DATA1 0x15c
672
673#define A_MC7_BD_OP 0x164
674
675#define S_OP 0
676
677#define V_OP(x) ((x) << S_OP)
678#define F_OP V_OP(1U)
679
680#define F_OP V_OP(1U)
681#define A_SF_OP 0x6dc
682
683#define A_MC7_BIST_ADDR_BEG 0x168
684
685#define A_MC7_BIST_ADDR_END 0x16c
686
687#define A_MC7_BIST_DATA 0x170
688
689#define A_MC7_BIST_OP 0x174
690
691#define S_CONT 3
692#define V_CONT(x) ((x) << S_CONT)
693#define F_CONT V_CONT(1U)
694
695#define F_CONT V_CONT(1U)
696
697#define A_MC7_INT_ENABLE 0x178
698
699#define S_AE 17
700#define V_AE(x) ((x) << S_AE)
701#define F_AE V_AE(1U)
702
703#define S_PE 2
704#define M_PE 0x7fff
705
706#define V_PE(x) ((x) << S_PE)
707
708#define G_PE(x) (((x) >> S_PE) & M_PE)
709
710#define S_UE 1
711#define V_UE(x) ((x) << S_UE)
712#define F_UE V_UE(1U)
713
714#define S_CE 0
715#define V_CE(x) ((x) << S_CE)
716#define F_CE V_CE(1U)
717
718#define A_MC7_INT_CAUSE 0x17c
719
720#define MC7_PMTX_BASE_ADDR 0x180
721
722#define MC7_CM_BASE_ADDR 0x200
723
724#define A_CIM_BOOT_CFG 0x280
725
726#define S_BOOTADDR 2
727#define M_BOOTADDR 0x3fffffff
728#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
729
730#define A_CIM_SDRAM_BASE_ADDR 0x28c
731
732#define A_CIM_SDRAM_ADDR_SIZE 0x290
733
734#define A_CIM_HOST_INT_ENABLE 0x298
735
736#define A_CIM_HOST_INT_CAUSE 0x29c
737
738#define S_BLKWRPLINT 12
739#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
740#define F_BLKWRPLINT V_BLKWRPLINT(1U)
741
742#define S_BLKRDPLINT 11
743#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
744#define F_BLKRDPLINT V_BLKRDPLINT(1U)
745
746#define S_BLKWRCTLINT 10
747#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
748#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
749
750#define S_BLKRDCTLINT 9
751#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
752#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
753
754#define S_BLKWRFLASHINT 8
755#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
756#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
757
758#define S_BLKRDFLASHINT 7
759#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
760#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
761
762#define S_SGLWRFLASHINT 6
763#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
764#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
765
766#define S_WRBLKFLASHINT 5
767#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
768#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U)
769
770#define S_BLKWRBOOTINT 4
771#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
772#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
773
774#define S_FLASHRANGEINT 2
775#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
776#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
777
778#define S_SDRAMRANGEINT 1
779#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
780#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U)
781
782#define S_RSVDSPACEINT 0
783#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
784#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
785
786#define A_CIM_HOST_ACC_CTRL 0x2b0
787
788#define S_HOSTBUSY 17
789#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
790#define F_HOSTBUSY V_HOSTBUSY(1U)
791
792#define A_CIM_HOST_ACC_DATA 0x2b4
793
794#define A_TP_IN_CONFIG 0x300
795
796#define S_NICMODE 14
797#define V_NICMODE(x) ((x) << S_NICMODE)
798#define F_NICMODE V_NICMODE(1U)
799
800#define F_NICMODE V_NICMODE(1U)
801
802#define S_IPV6ENABLE 15
803#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
804#define F_IPV6ENABLE V_IPV6ENABLE(1U)
805
806#define A_TP_OUT_CONFIG 0x304
807
808#define S_VLANEXTRACTIONENABLE 12
809
810#define A_TP_GLOBAL_CONFIG 0x308
811
812#define S_TXPACINGENABLE 24
813#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
814#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
815
816#define S_PATHMTU 15
817#define V_PATHMTU(x) ((x) << S_PATHMTU)
818#define F_PATHMTU V_PATHMTU(1U)
819
820#define S_IPCHECKSUMOFFLOAD 13
821#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
822#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
823
824#define S_UDPCHECKSUMOFFLOAD 12
825#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
826#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
827
828#define S_TCPCHECKSUMOFFLOAD 11
829#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
830#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
831
832#define S_IPTTL 0
833#define M_IPTTL 0xff
834#define V_IPTTL(x) ((x) << S_IPTTL)
835
836#define A_TP_CMM_MM_BASE 0x314
837
838#define A_TP_CMM_TIMER_BASE 0x318
839
840#define S_CMTIMERMAXNUM 28
841#define M_CMTIMERMAXNUM 0x3
842#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
843
844#define A_TP_PMM_SIZE 0x31c
845
846#define A_TP_PMM_TX_BASE 0x320
847
848#define A_TP_PMM_RX_BASE 0x328
849
850#define A_TP_PMM_RX_PAGE_SIZE 0x32c
851
852#define A_TP_PMM_RX_MAX_PAGE 0x330
853
854#define A_TP_PMM_TX_PAGE_SIZE 0x334
855
856#define A_TP_PMM_TX_MAX_PAGE 0x338
857
858#define A_TP_TCP_OPTIONS 0x340
859
860#define S_MTUDEFAULT 16
861#define M_MTUDEFAULT 0xffff
862#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
863
864#define S_MTUENABLE 10
865#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
866#define F_MTUENABLE V_MTUENABLE(1U)
867
868#define S_SACKRX 8
869#define V_SACKRX(x) ((x) << S_SACKRX)
870#define F_SACKRX V_SACKRX(1U)
871
872#define S_SACKMODE 4
873
874#define M_SACKMODE 0x3
875
876#define V_SACKMODE(x) ((x) << S_SACKMODE)
877
878#define S_WINDOWSCALEMODE 2
879#define M_WINDOWSCALEMODE 0x3
880#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
881
882#define S_TIMESTAMPSMODE 0
883
884#define M_TIMESTAMPSMODE 0x3
885
886#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
887
888#define A_TP_DACK_CONFIG 0x344
889
890#define S_AUTOSTATE3 30
891#define M_AUTOSTATE3 0x3
892#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
893
894#define S_AUTOSTATE2 28
895#define M_AUTOSTATE2 0x3
896#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
897
898#define S_AUTOSTATE1 26
899#define M_AUTOSTATE1 0x3
900#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
901
902#define S_BYTETHRESHOLD 5
903#define M_BYTETHRESHOLD 0xfffff
904#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
905
906#define S_MSSTHRESHOLD 3
907#define M_MSSTHRESHOLD 0x3
908#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
909
910#define S_AUTOCAREFUL 2
911#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
912#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
913
914#define S_AUTOENABLE 1
915#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
916#define F_AUTOENABLE V_AUTOENABLE(1U)
917
918#define S_DACK_MODE 0
919#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
920#define F_DACK_MODE V_DACK_MODE(1U)
921
922#define A_TP_PC_CONFIG 0x348
923
924#define S_TXTOSQUEUEMAPMODE 26
925#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
926#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
927
928#define S_ENABLEEPCMDAFULL 23
929#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
930#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
931
932#define S_MODULATEUNIONMODE 22
933#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
934#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
935
936#define S_TXDEFERENABLE 20
937#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
938#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
939
940#define S_RXCONGESTIONMODE 19
941#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
942#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
943
944#define S_HEARBEATDACK 16
945#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
946#define F_HEARBEATDACK V_HEARBEATDACK(1U)
947
948#define S_TXCONGESTIONMODE 15
949#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
950#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
951
952#define S_ENABLEOCSPIFULL 30
953#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
954#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
955
956#define S_LOCKTID 28
957#define V_LOCKTID(x) ((x) << S_LOCKTID)
958#define F_LOCKTID V_LOCKTID(1U)
959
960#define A_TP_PC_CONFIG2 0x34c
961
962#define S_CHDRAFULL 4
963#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
964#define F_CHDRAFULL V_CHDRAFULL(1U)
965
966#define A_TP_TCP_BACKOFF_REG0 0x350
967
968#define A_TP_TCP_BACKOFF_REG1 0x354
969
970#define A_TP_TCP_BACKOFF_REG2 0x358
971
972#define A_TP_TCP_BACKOFF_REG3 0x35c
973
974#define A_TP_PARA_REG2 0x368
975
976#define S_MAXRXDATA 16
977#define M_MAXRXDATA 0xffff
978#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
979
980#define S_RXCOALESCESIZE 0
981#define M_RXCOALESCESIZE 0xffff
982#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
983
984#define A_TP_PARA_REG3 0x36c
985
986#define S_TXDATAACKIDX 16
987#define M_TXDATAACKIDX 0xf
988
989#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
990
991#define S_TXPACEAUTOSTRICT 10
992#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
993#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
994
995#define S_TXPACEFIXED 9
996#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
997#define F_TXPACEFIXED V_TXPACEFIXED(1U)
998
999#define S_TXPACEAUTO 8
1000#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
1001#define F_TXPACEAUTO V_TXPACEAUTO(1U)
1002
1003#define S_RXCOALESCEENABLE 1
1004#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
1005#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
1006
1007#define S_RXCOALESCEPSHEN 0
1008#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
1009#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
1010
1011#define A_TP_PARA_REG4 0x370
1012
1013#define A_TP_PARA_REG6 0x378
1014
1015#define S_T3A_ENABLEESND 13
1016#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
1017#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
1018
1019#define S_ENABLEESND 11
1020#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
1021#define F_ENABLEESND V_ENABLEESND(1U)
1022
1023#define A_TP_PARA_REG7 0x37c
1024
1025#define S_PMMAXXFERLEN1 16
1026#define M_PMMAXXFERLEN1 0xffff
1027#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
1028
1029#define S_PMMAXXFERLEN0 0
1030#define M_PMMAXXFERLEN0 0xffff
1031#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
1032
1033#define A_TP_TIMER_RESOLUTION 0x390
1034
1035#define S_TIMERRESOLUTION 16
1036#define M_TIMERRESOLUTION 0xff
1037#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
1038
1039#define S_TIMESTAMPRESOLUTION 8
1040#define M_TIMESTAMPRESOLUTION 0xff
1041#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
1042
1043#define S_DELAYEDACKRESOLUTION 0
1044#define M_DELAYEDACKRESOLUTION 0xff
1045#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
1046
1047#define A_TP_MSL 0x394
1048
1049#define A_TP_RXT_MIN 0x398
1050
1051#define A_TP_RXT_MAX 0x39c
1052
1053#define A_TP_PERS_MIN 0x3a0
1054
1055#define A_TP_PERS_MAX 0x3a4
1056
1057#define A_TP_KEEP_IDLE 0x3a8
1058
1059#define A_TP_KEEP_INTVL 0x3ac
1060
1061#define A_TP_INIT_SRTT 0x3b0
1062
1063#define A_TP_DACK_TIMER 0x3b4
1064
1065#define A_TP_FINWAIT2_TIMER 0x3b8
1066
1067#define A_TP_SHIFT_CNT 0x3c0
1068
1069#define S_SYNSHIFTMAX 24
1070
1071#define M_SYNSHIFTMAX 0xff
1072
1073#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
1074
1075#define S_RXTSHIFTMAXR1 20
1076
1077#define M_RXTSHIFTMAXR1 0xf
1078
1079#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
1080
1081#define S_RXTSHIFTMAXR2 16
1082
1083#define M_RXTSHIFTMAXR2 0xf
1084
1085#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
1086
1087#define S_PERSHIFTBACKOFFMAX 12
1088#define M_PERSHIFTBACKOFFMAX 0xf
1089#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
1090
1091#define S_PERSHIFTMAX 8
1092#define M_PERSHIFTMAX 0xf
1093#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
1094
1095#define S_KEEPALIVEMAX 0
1096
1097#define M_KEEPALIVEMAX 0xff
1098
1099#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
1100
1101#define A_TP_MTU_PORT_TABLE 0x3d0
1102
1103#define A_TP_CCTRL_TABLE 0x3dc
1104
1105#define A_TP_MTU_TABLE 0x3e4
1106
1107#define A_TP_RSS_MAP_TABLE 0x3e8
1108
1109#define A_TP_RSS_LKP_TABLE 0x3ec
1110
1111#define A_TP_RSS_CONFIG 0x3f0
1112
1113#define S_TNL4TUPEN 29
1114#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
1115#define F_TNL4TUPEN V_TNL4TUPEN(1U)
1116
1117#define S_TNL2TUPEN 28
1118#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
1119#define F_TNL2TUPEN V_TNL2TUPEN(1U)
1120
1121#define S_TNLPRTEN 26
1122#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
1123#define F_TNLPRTEN V_TNLPRTEN(1U)
1124
1125#define S_TNLMAPEN 25
1126#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
1127#define F_TNLMAPEN V_TNLMAPEN(1U)
1128
1129#define S_TNLLKPEN 24
1130#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
1131#define F_TNLLKPEN V_TNLLKPEN(1U)
1132
1133#define S_RRCPLCPUSIZE 4
1134#define M_RRCPLCPUSIZE 0x7
1135#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
1136
1137#define S_RQFEEDBACKENABLE 3
1138#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
1139#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
1140
1141#define S_DISABLE 0
1142
1143#define A_TP_TM_PIO_ADDR 0x418
1144
1145#define A_TP_TM_PIO_DATA 0x41c
1146
1147#define A_TP_TX_MOD_QUE_TABLE 0x420
1148
1149#define A_TP_TX_RESOURCE_LIMIT 0x424
1150
1151#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
1152
1153#define S_TX_MOD_QUEUE_REQ_MAP 0
1154#define M_TX_MOD_QUEUE_REQ_MAP 0xff
1155#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
1156
1157#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
1158
1159#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
1160
1161#define A_TP_MOD_CHANNEL_WEIGHT 0x434
1162
1163#define A_TP_PIO_ADDR 0x440
1164
1165#define A_TP_PIO_DATA 0x444
1166
1167#define A_TP_RESET 0x44c
1168
1169#define S_FLSTINITENABLE 1
1170#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
1171#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
1172
1173#define S_TPRESET 0
1174#define V_TPRESET(x) ((x) << S_TPRESET)
1175#define F_TPRESET V_TPRESET(1U)
1176
1177#define A_TP_CMM_MM_RX_FLST_BASE 0x460
1178
1179#define A_TP_CMM_MM_TX_FLST_BASE 0x464
1180
1181#define A_TP_CMM_MM_PS_FLST_BASE 0x468
1182
1183#define A_TP_MIB_INDEX 0x450
1184
1185#define A_TP_MIB_RDATA 0x454
1186
1187#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
1188
1189#define A_TP_INT_ENABLE 0x470
1190
1191#define A_TP_INT_CAUSE 0x474
1192
1193#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
1194
1195#define A_TP_TX_DROP_CFG_CH0 0x12b
1196
1197#define A_TP_TX_DROP_MODE 0x12f
1198
1199#define A_TP_EGRESS_CONFIG 0x145
1200
1201#define S_REWRITEFORCETOSIZE 0
1202#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
1203#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
1204
1205#define A_TP_TX_TRC_KEY0 0x20
1206
1207#define A_TP_RX_TRC_KEY0 0x120
1208
1209#define A_ULPRX_CTL 0x500
1210
1211#define S_ROUND_ROBIN 4
1212#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
1213#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
1214
1215#define A_ULPRX_INT_ENABLE 0x504
1216
1217#define S_PARERR 0
1218#define V_PARERR(x) ((x) << S_PARERR)
1219#define F_PARERR V_PARERR(1U)
1220
1221#define A_ULPRX_INT_CAUSE 0x508
1222
1223#define A_ULPRX_ISCSI_LLIMIT 0x50c
1224
1225#define A_ULPRX_ISCSI_ULIMIT 0x510
1226
1227#define A_ULPRX_ISCSI_TAGMASK 0x514
1228
1229#define A_ULPRX_TDDP_LLIMIT 0x51c
1230
1231#define A_ULPRX_TDDP_ULIMIT 0x520
1232
1233#define A_ULPRX_STAG_LLIMIT 0x52c
1234
1235#define A_ULPRX_STAG_ULIMIT 0x530
1236
1237#define A_ULPRX_RQ_LLIMIT 0x534
1238#define A_ULPRX_RQ_LLIMIT 0x534
1239
1240#define A_ULPRX_RQ_ULIMIT 0x538
1241#define A_ULPRX_RQ_ULIMIT 0x538
1242
1243#define A_ULPRX_PBL_LLIMIT 0x53c
1244
1245#define A_ULPRX_PBL_ULIMIT 0x540
1246#define A_ULPRX_PBL_ULIMIT 0x540
1247
1248#define A_ULPRX_TDDP_TAGMASK 0x524
1249
1250#define A_ULPRX_RQ_LLIMIT 0x534
1251#define A_ULPRX_RQ_LLIMIT 0x534
1252
1253#define A_ULPRX_RQ_ULIMIT 0x538
1254#define A_ULPRX_RQ_ULIMIT 0x538
1255
1256#define A_ULPRX_PBL_ULIMIT 0x540
1257#define A_ULPRX_PBL_ULIMIT 0x540
1258
1259#define A_ULPTX_CONFIG 0x580
1260
1261#define S_CFG_RR_ARB 0
1262#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
1263#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
1264
1265#define A_ULPTX_INT_ENABLE 0x584
1266
1267#define S_PBL_BOUND_ERR_CH1 1
1268#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
1269#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
1270
1271#define S_PBL_BOUND_ERR_CH0 0
1272#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
1273#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
1274
1275#define A_ULPTX_INT_CAUSE 0x588
1276
1277#define A_ULPTX_TPT_LLIMIT 0x58c
1278
1279#define A_ULPTX_TPT_ULIMIT 0x590
1280
1281#define A_ULPTX_PBL_LLIMIT 0x594
1282
1283#define A_ULPTX_PBL_ULIMIT 0x598
1284
1285#define A_ULPTX_DMA_WEIGHT 0x5ac
1286
1287#define S_D1_WEIGHT 16
1288#define M_D1_WEIGHT 0xffff
1289#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
1290
1291#define S_D0_WEIGHT 0
1292#define M_D0_WEIGHT 0xffff
1293#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
1294
1295#define A_PM1_RX_CFG 0x5c0
1296
1297#define A_PM1_RX_INT_ENABLE 0x5d8
1298
1299#define S_ZERO_E_CMD_ERROR 18
1300#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
1301#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
1302
1303#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17
1304#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
1305#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1306
1307#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16
1308#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
1309#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1310
1311#define S_IESPI0_RX_FRAMING_ERROR 15
1312#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
1313#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
1314
1315#define S_IESPI1_RX_FRAMING_ERROR 14
1316#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
1317#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
1318
1319#define S_IESPI0_TX_FRAMING_ERROR 13
1320#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
1321#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
1322
1323#define S_IESPI1_TX_FRAMING_ERROR 12
1324#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
1325#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
1326
1327#define S_OCSPI0_RX_FRAMING_ERROR 11
1328#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
1329#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
1330
1331#define S_OCSPI1_RX_FRAMING_ERROR 10
1332#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
1333#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
1334
1335#define S_OCSPI0_TX_FRAMING_ERROR 9
1336#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
1337#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
1338
1339#define S_OCSPI1_TX_FRAMING_ERROR 8
1340#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
1341#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
1342
1343#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7
1344#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
1345#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1346
1347#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6
1348#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1349#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1350
1351#define S_IESPI_PAR_ERROR 3
1352#define M_IESPI_PAR_ERROR 0x7
1353
1354#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
1355
1356#define S_OCSPI_PAR_ERROR 0
1357#define M_OCSPI_PAR_ERROR 0x7
1358
1359#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
1360
1361#define A_PM1_RX_INT_CAUSE 0x5dc
1362
1363#define A_PM1_TX_CFG 0x5e0
1364
1365#define A_PM1_TX_INT_ENABLE 0x5f8
1366
1367#define S_ZERO_C_CMD_ERROR 18
1368#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
1369#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
1370
1371#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17
1372#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
1373#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1374
1375#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16
1376#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
1377#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1378
1379#define S_ICSPI0_RX_FRAMING_ERROR 15
1380#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
1381#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
1382
1383#define S_ICSPI1_RX_FRAMING_ERROR 14
1384#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
1385#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
1386
1387#define S_ICSPI0_TX_FRAMING_ERROR 13
1388#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
1389#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
1390
1391#define S_ICSPI1_TX_FRAMING_ERROR 12
1392#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
1393#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
1394
1395#define S_OESPI0_RX_FRAMING_ERROR 11
1396#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
1397#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
1398
1399#define S_OESPI1_RX_FRAMING_ERROR 10
1400#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
1401#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
1402
1403#define S_OESPI0_TX_FRAMING_ERROR 9
1404#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
1405#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
1406
1407#define S_OESPI1_TX_FRAMING_ERROR 8
1408#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
1409#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
1410
1411#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
1412#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
1413#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1414
1415#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
1416#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1417#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1418
1419#define S_ICSPI_PAR_ERROR 3
1420#define M_ICSPI_PAR_ERROR 0x7
1421
1422#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
1423
1424#define S_OESPI_PAR_ERROR 0
1425#define M_OESPI_PAR_ERROR 0x7
1426
1427#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
1428
1429#define A_PM1_TX_INT_CAUSE 0x5fc
1430
1431#define A_MPS_CFG 0x600
1432
1433#define S_TPRXPORTEN 4
1434#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
1435#define F_TPRXPORTEN V_TPRXPORTEN(1U)
1436
1437#define S_TPTXPORT1EN 3
1438#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
1439#define F_TPTXPORT1EN V_TPTXPORT1EN(1U)
1440
1441#define S_TPTXPORT0EN 2
1442#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
1443#define F_TPTXPORT0EN V_TPTXPORT0EN(1U)
1444
1445#define S_PORT1ACTIVE 1
1446#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
1447#define F_PORT1ACTIVE V_PORT1ACTIVE(1U)
1448
1449#define S_PORT0ACTIVE 0
1450#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
1451#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
1452
1453#define S_ENFORCEPKT 11
1454#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
1455#define F_ENFORCEPKT V_ENFORCEPKT(1U)
1456
1457#define A_MPS_INT_ENABLE 0x61c
1458
1459#define S_MCAPARERRENB 6
1460#define M_MCAPARERRENB 0x7
1461
1462#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
1463
1464#define S_RXTPPARERRENB 4
1465#define M_RXTPPARERRENB 0x3
1466
1467#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
1468
1469#define S_TX1TPPARERRENB 2
1470#define M_TX1TPPARERRENB 0x3
1471
1472#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
1473
1474#define S_TX0TPPARERRENB 0
1475#define M_TX0TPPARERRENB 0x3
1476
1477#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
1478
1479#define A_MPS_INT_CAUSE 0x620
1480
1481#define S_MCAPARERR 6
1482#define M_MCAPARERR 0x7
1483
1484#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
1485
1486#define S_RXTPPARERR 4
1487#define M_RXTPPARERR 0x3
1488
1489#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
1490
1491#define S_TX1TPPARERR 2
1492#define M_TX1TPPARERR 0x3
1493
1494#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
1495
1496#define S_TX0TPPARERR 0
1497#define M_TX0TPPARERR 0x3
1498
1499#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
1500
1501#define A_CPL_SWITCH_CNTRL 0x640
1502
1503#define A_CPL_INTR_ENABLE 0x650
1504
1505#define S_CIM_OVFL_ERROR 4
1506#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
1507#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
1508
1509#define S_TP_FRAMING_ERROR 3
1510#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
1511#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
1512
1513#define S_SGE_FRAMING_ERROR 2
1514#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
1515#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
1516
1517#define S_CIM_FRAMING_ERROR 1
1518#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
1519#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
1520
1521#define S_ZERO_SWITCH_ERROR 0
1522#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
1523#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
1524
1525#define A_CPL_INTR_CAUSE 0x654
1526
1527#define A_CPL_MAP_TBL_DATA 0x65c
1528
1529#define A_SMB_GLOBAL_TIME_CFG 0x660
1530
1531#define A_I2C_CFG 0x6a0
1532
1533#define S_I2C_CLKDIV 0
1534#define M_I2C_CLKDIV 0xfff
1535#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
1536
1537#define A_MI1_CFG 0x6b0
1538
1539#define S_CLKDIV 5
1540#define M_CLKDIV 0xff
1541#define V_CLKDIV(x) ((x) << S_CLKDIV)
1542
1543#define S_ST 3
1544
1545#define M_ST 0x3
1546
1547#define V_ST(x) ((x) << S_ST)
1548
1549#define G_ST(x) (((x) >> S_ST) & M_ST)
1550
1551#define S_PREEN 2
1552#define V_PREEN(x) ((x) << S_PREEN)
1553#define F_PREEN V_PREEN(1U)
1554
1555#define S_MDIINV 1
1556#define V_MDIINV(x) ((x) << S_MDIINV)
1557#define F_MDIINV V_MDIINV(1U)
1558
1559#define S_MDIEN 0
1560#define V_MDIEN(x) ((x) << S_MDIEN)
1561#define F_MDIEN V_MDIEN(1U)
1562
1563#define A_MI1_ADDR 0x6b4
1564
1565#define S_PHYADDR 5
1566#define M_PHYADDR 0x1f
1567#define V_PHYADDR(x) ((x) << S_PHYADDR)
1568
1569#define S_REGADDR 0
1570#define M_REGADDR 0x1f
1571#define V_REGADDR(x) ((x) << S_REGADDR)
1572
1573#define A_MI1_DATA 0x6b8
1574
1575#define A_MI1_OP 0x6bc
1576
1577#define S_MDI_OP 0
1578#define M_MDI_OP 0x3
1579#define V_MDI_OP(x) ((x) << S_MDI_OP)
1580
1581#define A_SF_DATA 0x6d8
1582
1583#define A_SF_OP 0x6dc
1584
1585#define S_BYTECNT 1
1586#define M_BYTECNT 0x3
1587#define V_BYTECNT(x) ((x) << S_BYTECNT)
1588
1589#define A_PL_INT_ENABLE0 0x6e0
1590
1591#define S_T3DBG 23
1592#define V_T3DBG(x) ((x) << S_T3DBG)
1593#define F_T3DBG V_T3DBG(1U)
1594
1595#define S_XGMAC0_1 20
1596#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
1597#define F_XGMAC0_1 V_XGMAC0_1(1U)
1598
1599#define S_XGMAC0_0 19
1600#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
1601#define F_XGMAC0_0 V_XGMAC0_0(1U)
1602
1603#define S_MC5A 18
1604#define V_MC5A(x) ((x) << S_MC5A)
1605#define F_MC5A V_MC5A(1U)
1606
1607#define S_CPL_SWITCH 12
1608#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
1609#define F_CPL_SWITCH V_CPL_SWITCH(1U)
1610
1611#define S_MPS0 11
1612#define V_MPS0(x) ((x) << S_MPS0)
1613#define F_MPS0 V_MPS0(1U)
1614
1615#define S_PM1_TX 10
1616#define V_PM1_TX(x) ((x) << S_PM1_TX)
1617#define F_PM1_TX V_PM1_TX(1U)
1618
1619#define S_PM1_RX 9
1620#define V_PM1_RX(x) ((x) << S_PM1_RX)
1621#define F_PM1_RX V_PM1_RX(1U)
1622
1623#define S_ULP2_TX 8
1624#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
1625#define F_ULP2_TX V_ULP2_TX(1U)
1626
1627#define S_ULP2_RX 7
1628#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
1629#define F_ULP2_RX V_ULP2_RX(1U)
1630
1631#define S_TP1 6
1632#define V_TP1(x) ((x) << S_TP1)
1633#define F_TP1 V_TP1(1U)
1634
1635#define S_CIM 5
1636#define V_CIM(x) ((x) << S_CIM)
1637#define F_CIM V_CIM(1U)
1638
1639#define S_MC7_CM 4
1640#define V_MC7_CM(x) ((x) << S_MC7_CM)
1641#define F_MC7_CM V_MC7_CM(1U)
1642
1643#define S_MC7_PMTX 3
1644#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
1645#define F_MC7_PMTX V_MC7_PMTX(1U)
1646
1647#define S_MC7_PMRX 2
1648#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
1649#define F_MC7_PMRX V_MC7_PMRX(1U)
1650
1651#define S_PCIM0 1
1652#define V_PCIM0(x) ((x) << S_PCIM0)
1653#define F_PCIM0 V_PCIM0(1U)
1654
1655#define S_SGE3 0
1656#define V_SGE3(x) ((x) << S_SGE3)
1657#define F_SGE3 V_SGE3(1U)
1658
1659#define A_PL_INT_CAUSE0 0x6e4
1660
1661#define A_PL_RST 0x6f0
1662
1663#define S_CRSTWRM 1
1664#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
1665#define F_CRSTWRM V_CRSTWRM(1U)
1666
1667#define A_PL_REV 0x6f4
1668
1669#define A_PL_CLI 0x6f8
1670
1671#define A_MC5_DB_CONFIG 0x704
1672
1673#define S_TMTYPEHI 30
1674#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
1675#define F_TMTYPEHI V_TMTYPEHI(1U)
1676
1677#define S_TMPARTSIZE 28
1678#define M_TMPARTSIZE 0x3
1679#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
1680#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
1681
1682#define S_TMTYPE 26
1683#define M_TMTYPE 0x3
1684#define V_TMTYPE(x) ((x) << S_TMTYPE)
1685#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
1686
1687#define S_COMPEN 17
1688#define V_COMPEN(x) ((x) << S_COMPEN)
1689#define F_COMPEN V_COMPEN(1U)
1690
1691#define S_PRTYEN 6
1692#define V_PRTYEN(x) ((x) << S_PRTYEN)
1693#define F_PRTYEN V_PRTYEN(1U)
1694
1695#define S_MBUSEN 5
1696#define V_MBUSEN(x) ((x) << S_MBUSEN)
1697#define F_MBUSEN V_MBUSEN(1U)
1698
1699#define S_DBGIEN 4
1700#define V_DBGIEN(x) ((x) << S_DBGIEN)
1701#define F_DBGIEN V_DBGIEN(1U)
1702
1703#define S_TMRDY 2
1704#define V_TMRDY(x) ((x) << S_TMRDY)
1705#define F_TMRDY V_TMRDY(1U)
1706
1707#define S_TMRST 1
1708#define V_TMRST(x) ((x) << S_TMRST)
1709#define F_TMRST V_TMRST(1U)
1710
1711#define S_TMMODE 0
1712#define V_TMMODE(x) ((x) << S_TMMODE)
1713#define F_TMMODE V_TMMODE(1U)
1714
1715#define F_TMMODE V_TMMODE(1U)
1716
1717#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
1718
1719#define A_MC5_DB_FILTER_TABLE 0x710
1720
1721#define A_MC5_DB_SERVER_INDEX 0x714
1722
1723#define A_MC5_DB_RSP_LATENCY 0x720
1724
1725#define S_RDLAT 16
1726#define M_RDLAT 0x1f
1727#define V_RDLAT(x) ((x) << S_RDLAT)
1728
1729#define S_LRNLAT 8
1730#define M_LRNLAT 0x1f
1731#define V_LRNLAT(x) ((x) << S_LRNLAT)
1732
1733#define S_SRCHLAT 0
1734#define M_SRCHLAT 0x1f
1735#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
1736
1737#define A_MC5_DB_PART_ID_INDEX 0x72c
1738
1739#define A_MC5_DB_INT_ENABLE 0x740
1740
1741#define S_DELACTEMPTY 18
1742#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
1743#define F_DELACTEMPTY V_DELACTEMPTY(1U)
1744
1745#define S_DISPQPARERR 17
1746#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
1747#define F_DISPQPARERR V_DISPQPARERR(1U)
1748
1749#define S_REQQPARERR 16
1750#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
1751#define F_REQQPARERR V_REQQPARERR(1U)
1752
1753#define S_UNKNOWNCMD 15
1754#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
1755#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
1756
1757#define S_NFASRCHFAIL 8
1758#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
1759#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
1760
1761#define S_ACTRGNFULL 7
1762#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
1763#define F_ACTRGNFULL V_ACTRGNFULL(1U)
1764
1765#define S_PARITYERR 6
1766#define V_PARITYERR(x) ((x) << S_PARITYERR)
1767#define F_PARITYERR V_PARITYERR(1U)
1768
1769#define A_MC5_DB_INT_CAUSE 0x744
1770
1771#define A_MC5_DB_DBGI_CONFIG 0x774
1772
1773#define A_MC5_DB_DBGI_REQ_CMD 0x778
1774
1775#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
1776
1777#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
1778
1779#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
1780
1781#define A_MC5_DB_DBGI_REQ_DATA0 0x788
1782
1783#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
1784
1785#define A_MC5_DB_DBGI_REQ_DATA2 0x790
1786
1787#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
1788
1789#define S_DBGIRSPVALID 0
1790#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
1791#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
1792
1793#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
1794
1795#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
1796
1797#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
1798
1799#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
1800
1801#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
1802
1803#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
1804
1805#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
1806
1807#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
1808
1809#define A_MC5_DB_SYN_LRN_CMD 0x7e0
1810
1811#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
1812
1813#define A_MC5_DB_ACK_LRN_CMD 0x7e8
1814
1815#define A_MC5_DB_ILOOKUP_CMD 0x7ec
1816
1817#define A_MC5_DB_ELOOKUP_CMD 0x7f0
1818
1819#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
1820
1821#define A_MC5_DB_DATA_READ_CMD 0x7f8
1822
1823#define XGMAC0_0_BASE_ADDR 0x800
1824
1825#define A_XGM_TX_CTRL 0x800
1826
1827#define S_TXEN 0
1828#define V_TXEN(x) ((x) << S_TXEN)
1829#define F_TXEN V_TXEN(1U)
1830
1831#define A_XGM_TX_CFG 0x804
1832
1833#define S_TXPAUSEEN 0
1834#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
1835#define F_TXPAUSEEN V_TXPAUSEEN(1U)
1836
1837#define A_XGM_RX_CTRL 0x80c
1838
1839#define S_RXEN 0
1840#define V_RXEN(x) ((x) << S_RXEN)
1841#define F_RXEN V_RXEN(1U)
1842
1843#define A_XGM_RX_CFG 0x810
1844
1845#define S_DISPAUSEFRAMES 9
1846#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
1847#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
1848
1849#define S_EN1536BFRAMES 8
1850#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
1851#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
1852
1853#define S_ENJUMBO 7
1854#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
1855#define F_ENJUMBO V_ENJUMBO(1U)
1856
1857#define S_RMFCS 6
1858#define V_RMFCS(x) ((x) << S_RMFCS)
1859#define F_RMFCS V_RMFCS(1U)
1860
1861#define S_ENHASHMCAST 2
1862#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
1863#define F_ENHASHMCAST V_ENHASHMCAST(1U)
1864
1865#define S_COPYALLFRAMES 0
1866#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
1867#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
1868
1869#define A_XGM_RX_HASH_LOW 0x814
1870
1871#define A_XGM_RX_HASH_HIGH 0x818
1872
1873#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
1874
1875#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
1876
1877#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
1878
1879#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
1880
1881#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
1882
1883#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
1884
1885#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
1886
1887#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
1888
1889#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
1890
1891#define A_XGM_STAT_CTRL 0x880
1892
1893#define S_CLRSTATS 2
1894#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
1895#define F_CLRSTATS V_CLRSTATS(1U)
1896
1897#define A_XGM_RXFIFO_CFG 0x884
1898
1899#define S_RXFIFOPAUSEHWM 17
1900#define M_RXFIFOPAUSEHWM 0xfff
1901
1902#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
1903
1904#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
1905
1906#define S_RXFIFOPAUSELWM 5
1907#define M_RXFIFOPAUSELWM 0xfff
1908
1909#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
1910
1911#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
1912
1913#define S_RXSTRFRWRD 1
1914#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
1915#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
1916
1917#define S_DISERRFRAMES 0
1918#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
1919#define F_DISERRFRAMES V_DISERRFRAMES(1U)
1920
1921#define A_XGM_TXFIFO_CFG 0x888
1922
1923#define S_TXFIFOTHRESH 4
1924#define M_TXFIFOTHRESH 0x1ff
1925
1926#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
1927
1928#define A_XGM_SERDES_CTRL 0x890
1929#define A_XGM_SERDES_CTRL0 0x8e0
1930
1931#define S_SERDESRESET_ 24
1932#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
1933#define F_SERDESRESET_ V_SERDESRESET_(1U)
1934
1935#define S_RXENABLE 4
1936#define V_RXENABLE(x) ((x) << S_RXENABLE)
1937#define F_RXENABLE V_RXENABLE(1U)
1938
1939#define S_TXENABLE 3
1940#define V_TXENABLE(x) ((x) << S_TXENABLE)
1941#define F_TXENABLE V_TXENABLE(1U)
1942
1943#define A_XGM_PAUSE_TIMER 0x890
1944
1945#define A_XGM_RGMII_IMP 0x89c
1946
1947#define S_XGM_IMPSETUPDATE 6
1948#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
1949#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
1950
1951#define S_RGMIIIMPPD 3
1952#define M_RGMIIIMPPD 0x7
1953#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
1954
1955#define S_RGMIIIMPPU 0
1956#define M_RGMIIIMPPU 0x7
1957#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
1958
1959#define S_CALRESET 8
1960#define V_CALRESET(x) ((x) << S_CALRESET)
1961#define F_CALRESET V_CALRESET(1U)
1962
1963#define S_CALUPDATE 7
1964#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
1965#define F_CALUPDATE V_CALUPDATE(1U)
1966
1967#define A_XGM_XAUI_IMP 0x8a0
1968
1969#define S_CALBUSY 31
1970#define V_CALBUSY(x) ((x) << S_CALBUSY)
1971#define F_CALBUSY V_CALBUSY(1U)
1972
1973#define S_XGM_CALFAULT 29
1974#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
1975#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
1976
1977#define S_CALIMP 24
1978#define M_CALIMP 0x1f
1979#define V_CALIMP(x) ((x) << S_CALIMP)
1980#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
1981
1982#define S_XAUIIMP 0
1983#define M_XAUIIMP 0x7
1984#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
1985
1986#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
1987#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
1988
1989#define A_XGM_RESET_CTRL 0x8ac
1990
1991#define S_XG2G_RESET_ 3
1992#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
1993#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
1994
1995#define S_RGMII_RESET_ 2
1996#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
1997#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
1998
1999#define S_PCS_RESET_ 1
2000#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
2001#define F_PCS_RESET_ V_PCS_RESET_(1U)
2002
2003#define S_MAC_RESET_ 0
2004#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
2005#define F_MAC_RESET_ V_MAC_RESET_(1U)
2006
2007#define A_XGM_PORT_CFG 0x8b8
2008
2009#define S_CLKDIVRESET_ 3
2010#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
2011#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
2012
2013#define S_PORTSPEED 1
2014#define M_PORTSPEED 0x3
2015
2016#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
2017
2018#define S_ENRGMII 0
2019#define V_ENRGMII(x) ((x) << S_ENRGMII)
2020#define F_ENRGMII V_ENRGMII(1U)
2021
2022#define A_XGM_INT_ENABLE 0x8d4
2023
2024#define S_TXFIFO_PRTY_ERR 17
2025#define M_TXFIFO_PRTY_ERR 0x7
2026
2027#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
2028
2029#define S_RXFIFO_PRTY_ERR 14
2030#define M_RXFIFO_PRTY_ERR 0x7
2031
2032#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
2033
2034#define S_TXFIFO_UNDERRUN 13
2035#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
2036#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
2037
2038#define S_RXFIFO_OVERFLOW 12
2039#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
2040#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
2041
2042#define S_SERDES_LOS 4
2043#define M_SERDES_LOS 0xf
2044
2045#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
2046
2047#define S_XAUIPCSCTCERR 3
2048#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
2049#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
2050
2051#define S_XAUIPCSALIGNCHANGE 2
2052#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
2053#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
2054
2055#define A_XGM_INT_CAUSE 0x8d8
2056
2057#define A_XGM_XAUI_ACT_CTRL 0x8dc
2058
2059#define S_TXACTENABLE 1
2060#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
2061#define F_TXACTENABLE V_TXACTENABLE(1U)
2062
2063#define A_XGM_SERDES_CTRL0 0x8e0
2064
2065#define S_RESET3 23
2066#define V_RESET3(x) ((x) << S_RESET3)
2067#define F_RESET3 V_RESET3(1U)
2068
2069#define S_RESET2 22
2070#define V_RESET2(x) ((x) << S_RESET2)
2071#define F_RESET2 V_RESET2(1U)
2072
2073#define S_RESET1 21
2074#define V_RESET1(x) ((x) << S_RESET1)
2075#define F_RESET1 V_RESET1(1U)
2076
2077#define S_RESET0 20
2078#define V_RESET0(x) ((x) << S_RESET0)
2079#define F_RESET0 V_RESET0(1U)
2080
2081#define S_PWRDN3 19
2082#define V_PWRDN3(x) ((x) << S_PWRDN3)
2083#define F_PWRDN3 V_PWRDN3(1U)
2084
2085#define S_PWRDN2 18
2086#define V_PWRDN2(x) ((x) << S_PWRDN2)
2087#define F_PWRDN2 V_PWRDN2(1U)
2088
2089#define S_PWRDN1 17
2090#define V_PWRDN1(x) ((x) << S_PWRDN1)
2091#define F_PWRDN1 V_PWRDN1(1U)
2092
2093#define S_PWRDN0 16
2094#define V_PWRDN0(x) ((x) << S_PWRDN0)
2095#define F_PWRDN0 V_PWRDN0(1U)
2096
2097#define S_RESETPLL23 15
2098#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
2099#define F_RESETPLL23 V_RESETPLL23(1U)
2100
2101#define S_RESETPLL01 14
2102#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
2103#define F_RESETPLL01 V_RESETPLL01(1U)
2104
2105#define A_XGM_SERDES_STAT0 0x8f0
2106
2107#define S_LOWSIG0 0
2108#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
2109#define F_LOWSIG0 V_LOWSIG0(1U)
2110
2111#define A_XGM_SERDES_STAT3 0x8fc
2112
2113#define A_XGM_STAT_TX_BYTE_LOW 0x900
2114
2115#define A_XGM_STAT_TX_BYTE_HIGH 0x904
2116
2117#define A_XGM_STAT_TX_FRAME_LOW 0x908
2118
2119#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
2120
2121#define A_XGM_STAT_TX_BCAST 0x910
2122
2123#define A_XGM_STAT_TX_MCAST 0x914
2124
2125#define A_XGM_STAT_TX_PAUSE 0x918
2126
2127#define A_XGM_STAT_TX_64B_FRAMES 0x91c
2128
2129#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
2130
2131#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
2132
2133#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
2134
2135#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
2136
2137#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
2138
2139#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
2140
2141#define A_XGM_STAT_TX_ERR_FRAMES 0x938
2142
2143#define A_XGM_STAT_RX_BYTES_LOW 0x93c
2144
2145#define A_XGM_STAT_RX_BYTES_HIGH 0x940
2146
2147#define A_XGM_STAT_RX_FRAMES_LOW 0x944
2148
2149#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
2150
2151#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
2152
2153#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
2154
2155#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
2156
2157#define A_XGM_STAT_RX_64B_FRAMES 0x958
2158
2159#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
2160
2161#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
2162
2163#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
2164
2165#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
2166
2167#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
2168
2169#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
2170
2171#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
2172
2173#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
2174
2175#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
2176
2177#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
2178
2179#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
2180
2181#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
2182
2183#define A_XGM_SERDES_STATUS0 0x98c
2184
2185#define A_XGM_SERDES_STATUS1 0x990
2186
2187#define S_CMULOCK 31
2188#define V_CMULOCK(x) ((x) << S_CMULOCK)
2189#define F_CMULOCK V_CMULOCK(1U)
2190
2191#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
2192
2193#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
2194
2195#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
new file mode 100644
index 000000000000..3f2cf8a07c61
--- /dev/null
+++ b/drivers/net/cxgb3/sge.c
@@ -0,0 +1,2681 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include "common.h"
40#include "regs.h"
41#include "sge_defs.h"
42#include "t3_cpl.h"
43#include "firmware_exports.h"
44
45#define USE_GTS 0
46
47#define SGE_RX_SM_BUF_SIZE 1536
48#define SGE_RX_COPY_THRES 256
49
50# define SGE_RX_DROP_THRES 16
51
52/*
53 * Period of the Tx buffer reclaim timer. This timer does not need to run
54 * frequently as Tx buffers are usually reclaimed by new Tx packets.
55 */
56#define TX_RECLAIM_PERIOD (HZ / 4)
57
58/* WR size in bytes */
59#define WR_LEN (WR_FLITS * 8)
60
61/*
62 * Types of Tx queues in each queue set. Order here matters, do not change.
63 */
64enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
65
66/* Values for sge_txq.flags */
67enum {
68 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
69 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
70};
71
72struct tx_desc {
73 u64 flit[TX_DESC_FLITS];
74};
75
76struct rx_desc {
77 __be32 addr_lo;
78 __be32 len_gen;
79 __be32 gen2;
80 __be32 addr_hi;
81};
82
83struct tx_sw_desc { /* SW state per Tx descriptor */
84 struct sk_buff *skb;
85};
86
87struct rx_sw_desc { /* SW state per Rx descriptor */
88 struct sk_buff *skb;
89 DECLARE_PCI_UNMAP_ADDR(dma_addr);
90};
91
92struct rsp_desc { /* response queue descriptor */
93 struct rss_header rss_hdr;
94 __be32 flags;
95 __be32 len_cq;
96 u8 imm_data[47];
97 u8 intr_gen;
98};
99
100struct unmap_info { /* packet unmapping info, overlays skb->cb */
101 int sflit; /* start flit of first SGL entry in Tx descriptor */
102 u16 fragidx; /* first page fragment in current Tx descriptor */
103 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
104 u32 len; /* mapped length of skb main body */
105};
106
107/*
108 * Maps a number of flits to the number of Tx descriptors that can hold them.
109 * The formula is
110 *
111 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
112 *
113 * HW allows up to 4 descriptors to be combined into a WR.
114 */
115static u8 flit_desc_map[] = {
116 0,
117#if SGE_NUM_GENBITS == 1
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
120 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
121 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
122#elif SGE_NUM_GENBITS == 2
123 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
124 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
125 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
126 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
127#else
128# error "SGE_NUM_GENBITS must be 1 or 2"
129#endif
130};
131
132static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
133{
134 return container_of(q, struct sge_qset, fl[qidx]);
135}
136
137static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
138{
139 return container_of(q, struct sge_qset, rspq);
140}
141
142static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
143{
144 return container_of(q, struct sge_qset, txq[qidx]);
145}
146
147/**
148 * refill_rspq - replenish an SGE response queue
149 * @adapter: the adapter
150 * @q: the response queue to replenish
151 * @credits: how many new responses to make available
152 *
153 * Replenishes a response queue by making the supplied number of responses
154 * available to HW.
155 */
156static inline void refill_rspq(struct adapter *adapter,
157 const struct sge_rspq *q, unsigned int credits)
158{
159 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
160 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
161}
162
163/**
164 * need_skb_unmap - does the platform need unmapping of sk_buffs?
165 *
166 * Returns true if the platfrom needs sk_buff unmapping. The compiler
167 * optimizes away unecessary code if this returns true.
168 */
169static inline int need_skb_unmap(void)
170{
171 /*
172 * This structure is used to tell if the platfrom needs buffer
173 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
174 */
175 struct dummy {
176 DECLARE_PCI_UNMAP_ADDR(addr);
177 };
178
179 return sizeof(struct dummy) != 0;
180}
181
182/**
183 * unmap_skb - unmap a packet main body and its page fragments
184 * @skb: the packet
185 * @q: the Tx queue containing Tx descriptors for the packet
186 * @cidx: index of Tx descriptor
187 * @pdev: the PCI device
188 *
189 * Unmap the main body of an sk_buff and its page fragments, if any.
190 * Because of the fairly complicated structure of our SGLs and the desire
191 * to conserve space for metadata, we keep the information necessary to
192 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
193 * in the Tx descriptors (the physical addresses of the various data
194 * buffers). The send functions initialize the state in skb->cb so we
195 * can unmap the buffers held in the first Tx descriptor here, and we
196 * have enough information at this point to update the state for the next
197 * Tx descriptor.
198 */
199static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
200 unsigned int cidx, struct pci_dev *pdev)
201{
202 const struct sg_ent *sgp;
203 struct unmap_info *ui = (struct unmap_info *)skb->cb;
204 int nfrags, frag_idx, curflit, j = ui->addr_idx;
205
206 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
207
208 if (ui->len) {
209 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
210 PCI_DMA_TODEVICE);
211 ui->len = 0; /* so we know for next descriptor for this skb */
212 j = 1;
213 }
214
215 frag_idx = ui->fragidx;
216 curflit = ui->sflit + 1 + j;
217 nfrags = skb_shinfo(skb)->nr_frags;
218
219 while (frag_idx < nfrags && curflit < WR_FLITS) {
220 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
221 skb_shinfo(skb)->frags[frag_idx].size,
222 PCI_DMA_TODEVICE);
223 j ^= 1;
224 if (j == 0) {
225 sgp++;
226 curflit++;
227 }
228 curflit++;
229 frag_idx++;
230 }
231
232 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
233 ui->fragidx = frag_idx;
234 ui->addr_idx = j;
235 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
236 }
237}
238
239/**
240 * free_tx_desc - reclaims Tx descriptors and their buffers
241 * @adapter: the adapter
242 * @q: the Tx queue to reclaim descriptors from
243 * @n: the number of descriptors to reclaim
244 *
245 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
246 * Tx buffers. Called with the Tx queue lock held.
247 */
248static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
249 unsigned int n)
250{
251 struct tx_sw_desc *d;
252 struct pci_dev *pdev = adapter->pdev;
253 unsigned int cidx = q->cidx;
254
255 d = &q->sdesc[cidx];
256 while (n--) {
257 if (d->skb) { /* an SGL is present */
258 if (need_skb_unmap())
259 unmap_skb(d->skb, q, cidx, pdev);
260 if (d->skb->priority == cidx)
261 kfree_skb(d->skb);
262 }
263 ++d;
264 if (++cidx == q->size) {
265 cidx = 0;
266 d = q->sdesc;
267 }
268 }
269 q->cidx = cidx;
270}
271
272/**
273 * reclaim_completed_tx - reclaims completed Tx descriptors
274 * @adapter: the adapter
275 * @q: the Tx queue to reclaim completed descriptors from
276 *
277 * Reclaims Tx descriptors that the SGE has indicated it has processed,
278 * and frees the associated buffers if possible. Called with the Tx
279 * queue's lock held.
280 */
281static inline void reclaim_completed_tx(struct adapter *adapter,
282 struct sge_txq *q)
283{
284 unsigned int reclaim = q->processed - q->cleaned;
285
286 if (reclaim) {
287 free_tx_desc(adapter, q, reclaim);
288 q->cleaned += reclaim;
289 q->in_use -= reclaim;
290 }
291}
292
293/**
294 * should_restart_tx - are there enough resources to restart a Tx queue?
295 * @q: the Tx queue
296 *
297 * Checks if there are enough descriptors to restart a suspended Tx queue.
298 */
299static inline int should_restart_tx(const struct sge_txq *q)
300{
301 unsigned int r = q->processed - q->cleaned;
302
303 return q->in_use - r < (q->size >> 1);
304}
305
306/**
307 * free_rx_bufs - free the Rx buffers on an SGE free list
308 * @pdev: the PCI device associated with the adapter
309 * @rxq: the SGE free list to clean up
310 *
311 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
312 * this queue should be stopped before calling this function.
313 */
314static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
315{
316 unsigned int cidx = q->cidx;
317
318 while (q->credits--) {
319 struct rx_sw_desc *d = &q->sdesc[cidx];
320
321 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
322 q->buf_size, PCI_DMA_FROMDEVICE);
323 kfree_skb(d->skb);
324 d->skb = NULL;
325 if (++cidx == q->size)
326 cidx = 0;
327 }
328}
329
330/**
331 * add_one_rx_buf - add a packet buffer to a free-buffer list
332 * @skb: the buffer to add
333 * @len: the buffer length
334 * @d: the HW Rx descriptor to write
335 * @sd: the SW Rx descriptor to write
336 * @gen: the generation bit value
337 * @pdev: the PCI device associated with the adapter
338 *
339 * Add a buffer of the given length to the supplied HW and SW Rx
340 * descriptors.
341 */
342static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
343 struct rx_desc *d, struct rx_sw_desc *sd,
344 unsigned int gen, struct pci_dev *pdev)
345{
346 dma_addr_t mapping;
347
348 sd->skb = skb;
349 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
350 pci_unmap_addr_set(sd, dma_addr, mapping);
351
352 d->addr_lo = cpu_to_be32(mapping);
353 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
354 wmb();
355 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
356 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
357}
358
359/**
360 * refill_fl - refill an SGE free-buffer list
361 * @adapter: the adapter
362 * @q: the free-list to refill
363 * @n: the number of new buffers to allocate
364 * @gfp: the gfp flags for allocating new buffers
365 *
366 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
367 * allocated with the supplied gfp flags. The caller must assure that
368 * @n does not exceed the queue's capacity.
369 */
370static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
371{
372 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
373 struct rx_desc *d = &q->desc[q->pidx];
374
375 while (n--) {
376 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
377
378 if (!skb)
379 break;
380
381 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
382 d++;
383 sd++;
384 if (++q->pidx == q->size) {
385 q->pidx = 0;
386 q->gen ^= 1;
387 sd = q->sdesc;
388 d = q->desc;
389 }
390 q->credits++;
391 }
392
393 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
394}
395
396static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
397{
398 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
399}
400
401/**
402 * recycle_rx_buf - recycle a receive buffer
403 * @adapter: the adapter
404 * @q: the SGE free list
405 * @idx: index of buffer to recycle
406 *
407 * Recycles the specified buffer on the given free list by adding it at
408 * the next available slot on the list.
409 */
410static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
411 unsigned int idx)
412{
413 struct rx_desc *from = &q->desc[idx];
414 struct rx_desc *to = &q->desc[q->pidx];
415
416 q->sdesc[q->pidx] = q->sdesc[idx];
417 to->addr_lo = from->addr_lo; /* already big endian */
418 to->addr_hi = from->addr_hi; /* likewise */
419 wmb();
420 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
421 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
422 q->credits++;
423
424 if (++q->pidx == q->size) {
425 q->pidx = 0;
426 q->gen ^= 1;
427 }
428 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
429}
430
431/**
432 * alloc_ring - allocate resources for an SGE descriptor ring
433 * @pdev: the PCI device
434 * @nelem: the number of descriptors
435 * @elem_size: the size of each descriptor
436 * @sw_size: the size of the SW state associated with each ring element
437 * @phys: the physical address of the allocated ring
438 * @metadata: address of the array holding the SW state for the ring
439 *
440 * Allocates resources for an SGE descriptor ring, such as Tx queues,
441 * free buffer lists, or response queues. Each SGE ring requires
442 * space for its HW descriptors plus, optionally, space for the SW state
443 * associated with each HW entry (the metadata). The function returns
444 * three values: the virtual address for the HW ring (the return value
445 * of the function), the physical address of the HW ring, and the address
446 * of the SW ring.
447 */
448static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
449 size_t sw_size, dma_addr_t *phys, void *metadata)
450{
451 size_t len = nelem * elem_size;
452 void *s = NULL;
453 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
454
455 if (!p)
456 return NULL;
457 if (sw_size) {
458 s = kcalloc(nelem, sw_size, GFP_KERNEL);
459
460 if (!s) {
461 dma_free_coherent(&pdev->dev, len, p, *phys);
462 return NULL;
463 }
464 }
465 if (metadata)
466 *(void **)metadata = s;
467 memset(p, 0, len);
468 return p;
469}
470
471/**
472 * free_qset - free the resources of an SGE queue set
473 * @adapter: the adapter owning the queue set
474 * @q: the queue set
475 *
476 * Release the HW and SW resources associated with an SGE queue set, such
477 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
478 * queue set must be quiesced prior to calling this.
479 */
480void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
481{
482 int i;
483 struct pci_dev *pdev = adapter->pdev;
484
485 if (q->tx_reclaim_timer.function)
486 del_timer_sync(&q->tx_reclaim_timer);
487
488 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
489 if (q->fl[i].desc) {
490 spin_lock(&adapter->sge.reg_lock);
491 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
492 spin_unlock(&adapter->sge.reg_lock);
493 free_rx_bufs(pdev, &q->fl[i]);
494 kfree(q->fl[i].sdesc);
495 dma_free_coherent(&pdev->dev,
496 q->fl[i].size *
497 sizeof(struct rx_desc), q->fl[i].desc,
498 q->fl[i].phys_addr);
499 }
500
501 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
502 if (q->txq[i].desc) {
503 spin_lock(&adapter->sge.reg_lock);
504 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
505 spin_unlock(&adapter->sge.reg_lock);
506 if (q->txq[i].sdesc) {
507 free_tx_desc(adapter, &q->txq[i],
508 q->txq[i].in_use);
509 kfree(q->txq[i].sdesc);
510 }
511 dma_free_coherent(&pdev->dev,
512 q->txq[i].size *
513 sizeof(struct tx_desc),
514 q->txq[i].desc, q->txq[i].phys_addr);
515 __skb_queue_purge(&q->txq[i].sendq);
516 }
517
518 if (q->rspq.desc) {
519 spin_lock(&adapter->sge.reg_lock);
520 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
521 spin_unlock(&adapter->sge.reg_lock);
522 dma_free_coherent(&pdev->dev,
523 q->rspq.size * sizeof(struct rsp_desc),
524 q->rspq.desc, q->rspq.phys_addr);
525 }
526
527 if (q->netdev)
528 q->netdev->atalk_ptr = NULL;
529
530 memset(q, 0, sizeof(*q));
531}
532
533/**
534 * init_qset_cntxt - initialize an SGE queue set context info
535 * @qs: the queue set
536 * @id: the queue set id
537 *
538 * Initializes the TIDs and context ids for the queues of a queue set.
539 */
540static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
541{
542 qs->rspq.cntxt_id = id;
543 qs->fl[0].cntxt_id = 2 * id;
544 qs->fl[1].cntxt_id = 2 * id + 1;
545 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
546 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
547 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
548 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
549 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
550}
551
552/**
553 * sgl_len - calculates the size of an SGL of the given capacity
554 * @n: the number of SGL entries
555 *
556 * Calculates the number of flits needed for a scatter/gather list that
557 * can hold the given number of entries.
558 */
559static inline unsigned int sgl_len(unsigned int n)
560{
561 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
562 return (3 * n) / 2 + (n & 1);
563}
564
565/**
566 * flits_to_desc - returns the num of Tx descriptors for the given flits
567 * @n: the number of flits
568 *
569 * Calculates the number of Tx descriptors needed for the supplied number
570 * of flits.
571 */
572static inline unsigned int flits_to_desc(unsigned int n)
573{
574 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
575 return flit_desc_map[n];
576}
577
578/**
579 * get_packet - return the next ingress packet buffer from a free list
580 * @adap: the adapter that received the packet
581 * @fl: the SGE free list holding the packet
582 * @len: the packet length including any SGE padding
583 * @drop_thres: # of remaining buffers before we start dropping packets
584 *
585 * Get the next packet from a free list and complete setup of the
586 * sk_buff. If the packet is small we make a copy and recycle the
587 * original buffer, otherwise we use the original buffer itself. If a
588 * positive drop threshold is supplied packets are dropped and their
589 * buffers recycled if (a) the number of remaining buffers is under the
590 * threshold and the packet is too big to copy, or (b) the packet should
591 * be copied but there is no memory for the copy.
592 */
593static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
594 unsigned int len, unsigned int drop_thres)
595{
596 struct sk_buff *skb = NULL;
597 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
598
599 prefetch(sd->skb->data);
600
601 if (len <= SGE_RX_COPY_THRES) {
602 skb = alloc_skb(len, GFP_ATOMIC);
603 if (likely(skb != NULL)) {
604 __skb_put(skb, len);
605 pci_dma_sync_single_for_cpu(adap->pdev,
606 pci_unmap_addr(sd,
607 dma_addr),
608 len, PCI_DMA_FROMDEVICE);
609 memcpy(skb->data, sd->skb->data, len);
610 pci_dma_sync_single_for_device(adap->pdev,
611 pci_unmap_addr(sd,
612 dma_addr),
613 len, PCI_DMA_FROMDEVICE);
614 } else if (!drop_thres)
615 goto use_orig_buf;
616 recycle:
617 recycle_rx_buf(adap, fl, fl->cidx);
618 return skb;
619 }
620
621 if (unlikely(fl->credits < drop_thres))
622 goto recycle;
623
624 use_orig_buf:
625 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
626 fl->buf_size, PCI_DMA_FROMDEVICE);
627 skb = sd->skb;
628 skb_put(skb, len);
629 __refill_fl(adap, fl);
630 return skb;
631}
632
633/**
634 * get_imm_packet - return the next ingress packet buffer from a response
635 * @resp: the response descriptor containing the packet data
636 *
637 * Return a packet containing the immediate data of the given response.
638 */
639static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
640{
641 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
642
643 if (skb) {
644 __skb_put(skb, IMMED_PKT_SIZE);
645 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
646 }
647 return skb;
648}
649
650/**
651 * calc_tx_descs - calculate the number of Tx descriptors for a packet
652 * @skb: the packet
653 *
654 * Returns the number of Tx descriptors needed for the given Ethernet
655 * packet. Ethernet packets require addition of WR and CPL headers.
656 */
657static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
658{
659 unsigned int flits;
660
661 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
662 return 1;
663
664 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
665 if (skb_shinfo(skb)->gso_size)
666 flits++;
667 return flits_to_desc(flits);
668}
669
670/**
671 * make_sgl - populate a scatter/gather list for a packet
672 * @skb: the packet
673 * @sgp: the SGL to populate
674 * @start: start address of skb main body data to include in the SGL
675 * @len: length of skb main body data to include in the SGL
676 * @pdev: the PCI device
677 *
678 * Generates a scatter/gather list for the buffers that make up a packet
679 * and returns the SGL size in 8-byte words. The caller must size the SGL
680 * appropriately.
681 */
682static inline unsigned int make_sgl(const struct sk_buff *skb,
683 struct sg_ent *sgp, unsigned char *start,
684 unsigned int len, struct pci_dev *pdev)
685{
686 dma_addr_t mapping;
687 unsigned int i, j = 0, nfrags;
688
689 if (len) {
690 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
691 sgp->len[0] = cpu_to_be32(len);
692 sgp->addr[0] = cpu_to_be64(mapping);
693 j = 1;
694 }
695
696 nfrags = skb_shinfo(skb)->nr_frags;
697 for (i = 0; i < nfrags; i++) {
698 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
699
700 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
701 frag->size, PCI_DMA_TODEVICE);
702 sgp->len[j] = cpu_to_be32(frag->size);
703 sgp->addr[j] = cpu_to_be64(mapping);
704 j ^= 1;
705 if (j == 0)
706 ++sgp;
707 }
708 if (j)
709 sgp->len[j] = 0;
710 return ((nfrags + (len != 0)) * 3) / 2 + j;
711}
712
713/**
714 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
715 * @adap: the adapter
716 * @q: the Tx queue
717 *
718 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
719 * where the HW is going to sleep just after we checked, however,
720 * then the interrupt handler will detect the outstanding TX packet
721 * and ring the doorbell for us.
722 *
723 * When GTS is disabled we unconditionally ring the doorbell.
724 */
725static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
726{
727#if USE_GTS
728 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
729 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
730 set_bit(TXQ_LAST_PKT_DB, &q->flags);
731 t3_write_reg(adap, A_SG_KDOORBELL,
732 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
733 }
734#else
735 wmb(); /* write descriptors before telling HW */
736 t3_write_reg(adap, A_SG_KDOORBELL,
737 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
738#endif
739}
740
741static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
742{
743#if SGE_NUM_GENBITS == 2
744 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
745#endif
746}
747
748/**
749 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
750 * @ndesc: number of Tx descriptors spanned by the SGL
751 * @skb: the packet corresponding to the WR
752 * @d: first Tx descriptor to be written
753 * @pidx: index of above descriptors
754 * @q: the SGE Tx queue
755 * @sgl: the SGL
756 * @flits: number of flits to the start of the SGL in the first descriptor
757 * @sgl_flits: the SGL size in flits
758 * @gen: the Tx descriptor generation
759 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
760 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
761 *
762 * Write a work request header and an associated SGL. If the SGL is
763 * small enough to fit into one Tx descriptor it has already been written
764 * and we just need to write the WR header. Otherwise we distribute the
765 * SGL across the number of descriptors it spans.
766 */
767static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
768 struct tx_desc *d, unsigned int pidx,
769 const struct sge_txq *q,
770 const struct sg_ent *sgl,
771 unsigned int flits, unsigned int sgl_flits,
772 unsigned int gen, unsigned int wr_hi,
773 unsigned int wr_lo)
774{
775 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
776 struct tx_sw_desc *sd = &q->sdesc[pidx];
777
778 sd->skb = skb;
779 if (need_skb_unmap()) {
780 struct unmap_info *ui = (struct unmap_info *)skb->cb;
781
782 ui->fragidx = 0;
783 ui->addr_idx = 0;
784 ui->sflit = flits;
785 }
786
787 if (likely(ndesc == 1)) {
788 skb->priority = pidx;
789 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
790 V_WR_SGLSFLT(flits)) | wr_hi;
791 wmb();
792 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
793 V_WR_GEN(gen)) | wr_lo;
794 wr_gen2(d, gen);
795 } else {
796 unsigned int ogen = gen;
797 const u64 *fp = (const u64 *)sgl;
798 struct work_request_hdr *wp = wrp;
799
800 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
801 V_WR_SGLSFLT(flits)) | wr_hi;
802
803 while (sgl_flits) {
804 unsigned int avail = WR_FLITS - flits;
805
806 if (avail > sgl_flits)
807 avail = sgl_flits;
808 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
809 sgl_flits -= avail;
810 ndesc--;
811 if (!sgl_flits)
812 break;
813
814 fp += avail;
815 d++;
816 sd++;
817 if (++pidx == q->size) {
818 pidx = 0;
819 gen ^= 1;
820 d = q->desc;
821 sd = q->sdesc;
822 }
823
824 sd->skb = skb;
825 wrp = (struct work_request_hdr *)d;
826 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
827 V_WR_SGLSFLT(1)) | wr_hi;
828 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
829 sgl_flits + 1)) |
830 V_WR_GEN(gen)) | wr_lo;
831 wr_gen2(d, gen);
832 flits = 1;
833 }
834 skb->priority = pidx;
835 wrp->wr_hi |= htonl(F_WR_EOP);
836 wmb();
837 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
838 wr_gen2((struct tx_desc *)wp, ogen);
839 WARN_ON(ndesc != 0);
840 }
841}
842
843/**
844 * write_tx_pkt_wr - write a TX_PKT work request
845 * @adap: the adapter
846 * @skb: the packet to send
847 * @pi: the egress interface
848 * @pidx: index of the first Tx descriptor to write
849 * @gen: the generation value to use
850 * @q: the Tx queue
851 * @ndesc: number of descriptors the packet will occupy
852 * @compl: the value of the COMPL bit to use
853 *
854 * Generate a TX_PKT work request to send the supplied packet.
855 */
856static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
857 const struct port_info *pi,
858 unsigned int pidx, unsigned int gen,
859 struct sge_txq *q, unsigned int ndesc,
860 unsigned int compl)
861{
862 unsigned int flits, sgl_flits, cntrl, tso_info;
863 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
864 struct tx_desc *d = &q->desc[pidx];
865 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
866
867 cpl->len = htonl(skb->len | 0x80000000);
868 cntrl = V_TXPKT_INTF(pi->port_id);
869
870 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
871 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
872
873 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
874 if (tso_info) {
875 int eth_type;
876 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
877
878 d->flit[2] = 0;
879 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
880 hdr->cntrl = htonl(cntrl);
881 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
882 CPL_ETH_II : CPL_ETH_II_VLAN;
883 tso_info |= V_LSO_ETH_TYPE(eth_type) |
884 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
885 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
886 hdr->lso_info = htonl(tso_info);
887 flits = 3;
888 } else {
889 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
890 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
891 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
892 cpl->cntrl = htonl(cntrl);
893
894 if (skb->len <= WR_LEN - sizeof(*cpl)) {
895 q->sdesc[pidx].skb = NULL;
896 if (!skb->data_len)
897 memcpy(&d->flit[2], skb->data, skb->len);
898 else
899 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
900
901 flits = (skb->len + 7) / 8 + 2;
902 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
903 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
904 | F_WR_SOP | F_WR_EOP | compl);
905 wmb();
906 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
907 V_WR_TID(q->token));
908 wr_gen2(d, gen);
909 kfree_skb(skb);
910 return;
911 }
912
913 flits = 2;
914 }
915
916 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
917 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
918 if (need_skb_unmap())
919 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
920
921 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
922 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
923 htonl(V_WR_TID(q->token)));
924}
925
926/**
927 * eth_xmit - add a packet to the Ethernet Tx queue
928 * @skb: the packet
929 * @dev: the egress net device
930 *
931 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
932 */
933int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
934{
935 unsigned int ndesc, pidx, credits, gen, compl;
936 const struct port_info *pi = netdev_priv(dev);
937 struct adapter *adap = dev->priv;
938 struct sge_qset *qs = dev2qset(dev);
939 struct sge_txq *q = &qs->txq[TXQ_ETH];
940
941 /*
942 * The chip min packet length is 9 octets but play safe and reject
943 * anything shorter than an Ethernet header.
944 */
945 if (unlikely(skb->len < ETH_HLEN)) {
946 dev_kfree_skb(skb);
947 return NETDEV_TX_OK;
948 }
949
950 spin_lock(&q->lock);
951 reclaim_completed_tx(adap, q);
952
953 credits = q->size - q->in_use;
954 ndesc = calc_tx_descs(skb);
955
956 if (unlikely(credits < ndesc)) {
957 if (!netif_queue_stopped(dev)) {
958 netif_stop_queue(dev);
959 set_bit(TXQ_ETH, &qs->txq_stopped);
960 q->stops++;
961 dev_err(&adap->pdev->dev,
962 "%s: Tx ring %u full while queue awake!\n",
963 dev->name, q->cntxt_id & 7);
964 }
965 spin_unlock(&q->lock);
966 return NETDEV_TX_BUSY;
967 }
968
969 q->in_use += ndesc;
970 if (unlikely(credits - ndesc < q->stop_thres)) {
971 q->stops++;
972 netif_stop_queue(dev);
973 set_bit(TXQ_ETH, &qs->txq_stopped);
974#if !USE_GTS
975 if (should_restart_tx(q) &&
976 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
977 q->restarts++;
978 netif_wake_queue(dev);
979 }
980#endif
981 }
982
983 gen = q->gen;
984 q->unacked += ndesc;
985 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
986 q->unacked &= 7;
987 pidx = q->pidx;
988 q->pidx += ndesc;
989 if (q->pidx >= q->size) {
990 q->pidx -= q->size;
991 q->gen ^= 1;
992 }
993
994 /* update port statistics */
995 if (skb->ip_summed == CHECKSUM_COMPLETE)
996 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
997 if (skb_shinfo(skb)->gso_size)
998 qs->port_stats[SGE_PSTAT_TSO]++;
999 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1000 qs->port_stats[SGE_PSTAT_VLANINS]++;
1001
1002 dev->trans_start = jiffies;
1003 spin_unlock(&q->lock);
1004
1005 /*
1006 * We do not use Tx completion interrupts to free DMAd Tx packets.
1007 * This is good for performamce but means that we rely on new Tx
1008 * packets arriving to run the destructors of completed packets,
1009 * which open up space in their sockets' send queues. Sometimes
1010 * we do not get such new packets causing Tx to stall. A single
1011 * UDP transmitter is a good example of this situation. We have
1012 * a clean up timer that periodically reclaims completed packets
1013 * but it doesn't run often enough (nor do we want it to) to prevent
1014 * lengthy stalls. A solution to this problem is to run the
1015 * destructor early, after the packet is queued but before it's DMAd.
1016 * A cons is that we lie to socket memory accounting, but the amount
1017 * of extra memory is reasonable (limited by the number of Tx
1018 * descriptors), the packets do actually get freed quickly by new
1019 * packets almost always, and for protocols like TCP that wait for
1020 * acks to really free up the data the extra memory is even less.
1021 * On the positive side we run the destructors on the sending CPU
1022 * rather than on a potentially different completing CPU, usually a
1023 * good thing. We also run them without holding our Tx queue lock,
1024 * unlike what reclaim_completed_tx() would otherwise do.
1025 *
1026 * Run the destructor before telling the DMA engine about the packet
1027 * to make sure it doesn't complete and get freed prematurely.
1028 */
1029 if (likely(!skb_shared(skb)))
1030 skb_orphan(skb);
1031
1032 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1033 check_ring_tx_db(adap, q);
1034 return NETDEV_TX_OK;
1035}
1036
1037/**
1038 * write_imm - write a packet into a Tx descriptor as immediate data
1039 * @d: the Tx descriptor to write
1040 * @skb: the packet
1041 * @len: the length of packet data to write as immediate data
1042 * @gen: the generation bit value to write
1043 *
1044 * Writes a packet as immediate data into a Tx descriptor. The packet
1045 * contains a work request at its beginning. We must write the packet
1046 * carefully so the SGE doesn't read accidentally before it's written in
1047 * its entirety.
1048 */
1049static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1050 unsigned int len, unsigned int gen)
1051{
1052 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1053 struct work_request_hdr *to = (struct work_request_hdr *)d;
1054
1055 memcpy(&to[1], &from[1], len - sizeof(*from));
1056 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1057 V_WR_BCNTLFLT(len & 7));
1058 wmb();
1059 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1060 V_WR_LEN((len + 7) / 8));
1061 wr_gen2(d, gen);
1062 kfree_skb(skb);
1063}
1064
1065/**
1066 * check_desc_avail - check descriptor availability on a send queue
1067 * @adap: the adapter
1068 * @q: the send queue
1069 * @skb: the packet needing the descriptors
1070 * @ndesc: the number of Tx descriptors needed
1071 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1072 *
1073 * Checks if the requested number of Tx descriptors is available on an
1074 * SGE send queue. If the queue is already suspended or not enough
1075 * descriptors are available the packet is queued for later transmission.
1076 * Must be called with the Tx queue locked.
1077 *
1078 * Returns 0 if enough descriptors are available, 1 if there aren't
1079 * enough descriptors and the packet has been queued, and 2 if the caller
1080 * needs to retry because there weren't enough descriptors at the
1081 * beginning of the call but some freed up in the mean time.
1082 */
1083static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1084 struct sk_buff *skb, unsigned int ndesc,
1085 unsigned int qid)
1086{
1087 if (unlikely(!skb_queue_empty(&q->sendq))) {
1088 addq_exit:__skb_queue_tail(&q->sendq, skb);
1089 return 1;
1090 }
1091 if (unlikely(q->size - q->in_use < ndesc)) {
1092 struct sge_qset *qs = txq_to_qset(q, qid);
1093
1094 set_bit(qid, &qs->txq_stopped);
1095 smp_mb__after_clear_bit();
1096
1097 if (should_restart_tx(q) &&
1098 test_and_clear_bit(qid, &qs->txq_stopped))
1099 return 2;
1100
1101 q->stops++;
1102 goto addq_exit;
1103 }
1104 return 0;
1105}
1106
1107/**
1108 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1109 * @q: the SGE control Tx queue
1110 *
1111 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1112 * that send only immediate data (presently just the control queues) and
1113 * thus do not have any sk_buffs to release.
1114 */
1115static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1116{
1117 unsigned int reclaim = q->processed - q->cleaned;
1118
1119 q->in_use -= reclaim;
1120 q->cleaned += reclaim;
1121}
1122
1123static inline int immediate(const struct sk_buff *skb)
1124{
1125 return skb->len <= WR_LEN && !skb->data_len;
1126}
1127
1128/**
1129 * ctrl_xmit - send a packet through an SGE control Tx queue
1130 * @adap: the adapter
1131 * @q: the control queue
1132 * @skb: the packet
1133 *
1134 * Send a packet through an SGE control Tx queue. Packets sent through
1135 * a control queue must fit entirely as immediate data in a single Tx
1136 * descriptor and have no page fragments.
1137 */
1138static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1139 struct sk_buff *skb)
1140{
1141 int ret;
1142 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1143
1144 if (unlikely(!immediate(skb))) {
1145 WARN_ON(1);
1146 dev_kfree_skb(skb);
1147 return NET_XMIT_SUCCESS;
1148 }
1149
1150 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1151 wrp->wr_lo = htonl(V_WR_TID(q->token));
1152
1153 spin_lock(&q->lock);
1154 again:reclaim_completed_tx_imm(q);
1155
1156 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1157 if (unlikely(ret)) {
1158 if (ret == 1) {
1159 spin_unlock(&q->lock);
1160 return NET_XMIT_CN;
1161 }
1162 goto again;
1163 }
1164
1165 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1166
1167 q->in_use++;
1168 if (++q->pidx >= q->size) {
1169 q->pidx = 0;
1170 q->gen ^= 1;
1171 }
1172 spin_unlock(&q->lock);
1173 wmb();
1174 t3_write_reg(adap, A_SG_KDOORBELL,
1175 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1176 return NET_XMIT_SUCCESS;
1177}
1178
1179/**
1180 * restart_ctrlq - restart a suspended control queue
1181 * @qs: the queue set cotaining the control queue
1182 *
1183 * Resumes transmission on a suspended Tx control queue.
1184 */
1185static void restart_ctrlq(unsigned long data)
1186{
1187 struct sk_buff *skb;
1188 struct sge_qset *qs = (struct sge_qset *)data;
1189 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1190 struct adapter *adap = qs->netdev->priv;
1191
1192 spin_lock(&q->lock);
1193 again:reclaim_completed_tx_imm(q);
1194
1195 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1196
1197 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1198
1199 if (++q->pidx >= q->size) {
1200 q->pidx = 0;
1201 q->gen ^= 1;
1202 }
1203 q->in_use++;
1204 }
1205
1206 if (!skb_queue_empty(&q->sendq)) {
1207 set_bit(TXQ_CTRL, &qs->txq_stopped);
1208 smp_mb__after_clear_bit();
1209
1210 if (should_restart_tx(q) &&
1211 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1212 goto again;
1213 q->stops++;
1214 }
1215
1216 spin_unlock(&q->lock);
1217 t3_write_reg(adap, A_SG_KDOORBELL,
1218 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1219}
1220
1221/*
1222 * Send a management message through control queue 0
1223 */
1224int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1225{
1226 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1227}
1228
1229/**
1230 * write_ofld_wr - write an offload work request
1231 * @adap: the adapter
1232 * @skb: the packet to send
1233 * @q: the Tx queue
1234 * @pidx: index of the first Tx descriptor to write
1235 * @gen: the generation value to use
1236 * @ndesc: number of descriptors the packet will occupy
1237 *
1238 * Write an offload work request to send the supplied packet. The packet
1239 * data already carry the work request with most fields populated.
1240 */
1241static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1242 struct sge_txq *q, unsigned int pidx,
1243 unsigned int gen, unsigned int ndesc)
1244{
1245 unsigned int sgl_flits, flits;
1246 struct work_request_hdr *from;
1247 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1248 struct tx_desc *d = &q->desc[pidx];
1249
1250 if (immediate(skb)) {
1251 q->sdesc[pidx].skb = NULL;
1252 write_imm(d, skb, skb->len, gen);
1253 return;
1254 }
1255
1256 /* Only TX_DATA builds SGLs */
1257
1258 from = (struct work_request_hdr *)skb->data;
1259 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1260
1261 flits = (skb->h.raw - skb->data) / 8;
1262 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1263 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1264 adap->pdev);
1265 if (need_skb_unmap())
1266 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1267
1268 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1269 gen, from->wr_hi, from->wr_lo);
1270}
1271
1272/**
1273 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1274 * @skb: the packet
1275 *
1276 * Returns the number of Tx descriptors needed for the given offload
1277 * packet. These packets are already fully constructed.
1278 */
1279static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1280{
1281 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1282
1283 if (skb->len <= WR_LEN && cnt == 0)
1284 return 1; /* packet fits as immediate data */
1285
1286 flits = (skb->h.raw - skb->data) / 8; /* headers */
1287 if (skb->tail != skb->h.raw)
1288 cnt++;
1289 return flits_to_desc(flits + sgl_len(cnt));
1290}
1291
1292/**
1293 * ofld_xmit - send a packet through an offload queue
1294 * @adap: the adapter
1295 * @q: the Tx offload queue
1296 * @skb: the packet
1297 *
1298 * Send an offload packet through an SGE offload queue.
1299 */
1300static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1301 struct sk_buff *skb)
1302{
1303 int ret;
1304 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1305
1306 spin_lock(&q->lock);
1307 again:reclaim_completed_tx(adap, q);
1308
1309 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1310 if (unlikely(ret)) {
1311 if (ret == 1) {
1312 skb->priority = ndesc; /* save for restart */
1313 spin_unlock(&q->lock);
1314 return NET_XMIT_CN;
1315 }
1316 goto again;
1317 }
1318
1319 gen = q->gen;
1320 q->in_use += ndesc;
1321 pidx = q->pidx;
1322 q->pidx += ndesc;
1323 if (q->pidx >= q->size) {
1324 q->pidx -= q->size;
1325 q->gen ^= 1;
1326 }
1327 spin_unlock(&q->lock);
1328
1329 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1330 check_ring_tx_db(adap, q);
1331 return NET_XMIT_SUCCESS;
1332}
1333
1334/**
1335 * restart_offloadq - restart a suspended offload queue
1336 * @qs: the queue set cotaining the offload queue
1337 *
1338 * Resumes transmission on a suspended Tx offload queue.
1339 */
1340static void restart_offloadq(unsigned long data)
1341{
1342 struct sk_buff *skb;
1343 struct sge_qset *qs = (struct sge_qset *)data;
1344 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1345 struct adapter *adap = qs->netdev->priv;
1346
1347 spin_lock(&q->lock);
1348 again:reclaim_completed_tx(adap, q);
1349
1350 while ((skb = skb_peek(&q->sendq)) != NULL) {
1351 unsigned int gen, pidx;
1352 unsigned int ndesc = skb->priority;
1353
1354 if (unlikely(q->size - q->in_use < ndesc)) {
1355 set_bit(TXQ_OFLD, &qs->txq_stopped);
1356 smp_mb__after_clear_bit();
1357
1358 if (should_restart_tx(q) &&
1359 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1360 goto again;
1361 q->stops++;
1362 break;
1363 }
1364
1365 gen = q->gen;
1366 q->in_use += ndesc;
1367 pidx = q->pidx;
1368 q->pidx += ndesc;
1369 if (q->pidx >= q->size) {
1370 q->pidx -= q->size;
1371 q->gen ^= 1;
1372 }
1373 __skb_unlink(skb, &q->sendq);
1374 spin_unlock(&q->lock);
1375
1376 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1377 spin_lock(&q->lock);
1378 }
1379 spin_unlock(&q->lock);
1380
1381#if USE_GTS
1382 set_bit(TXQ_RUNNING, &q->flags);
1383 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1384#endif
1385 t3_write_reg(adap, A_SG_KDOORBELL,
1386 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1387}
1388
1389/**
1390 * queue_set - return the queue set a packet should use
1391 * @skb: the packet
1392 *
1393 * Maps a packet to the SGE queue set it should use. The desired queue
1394 * set is carried in bits 1-3 in the packet's priority.
1395 */
1396static inline int queue_set(const struct sk_buff *skb)
1397{
1398 return skb->priority >> 1;
1399}
1400
1401/**
1402 * is_ctrl_pkt - return whether an offload packet is a control packet
1403 * @skb: the packet
1404 *
1405 * Determines whether an offload packet should use an OFLD or a CTRL
1406 * Tx queue. This is indicated by bit 0 in the packet's priority.
1407 */
1408static inline int is_ctrl_pkt(const struct sk_buff *skb)
1409{
1410 return skb->priority & 1;
1411}
1412
1413/**
1414 * t3_offload_tx - send an offload packet
1415 * @tdev: the offload device to send to
1416 * @skb: the packet
1417 *
1418 * Sends an offload packet. We use the packet priority to select the
1419 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1420 * should be sent as regular or control, bits 1-3 select the queue set.
1421 */
1422int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1423{
1424 struct adapter *adap = tdev2adap(tdev);
1425 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1426
1427 if (unlikely(is_ctrl_pkt(skb)))
1428 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1429
1430 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1431}
1432
1433/**
1434 * offload_enqueue - add an offload packet to an SGE offload receive queue
1435 * @q: the SGE response queue
1436 * @skb: the packet
1437 *
1438 * Add a new offload packet to an SGE response queue's offload packet
1439 * queue. If the packet is the first on the queue it schedules the RX
1440 * softirq to process the queue.
1441 */
1442static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1443{
1444 skb->next = skb->prev = NULL;
1445 if (q->rx_tail)
1446 q->rx_tail->next = skb;
1447 else {
1448 struct sge_qset *qs = rspq_to_qset(q);
1449
1450 if (__netif_rx_schedule_prep(qs->netdev))
1451 __netif_rx_schedule(qs->netdev);
1452 q->rx_head = skb;
1453 }
1454 q->rx_tail = skb;
1455}
1456
1457/**
1458 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1459 * @tdev: the offload device that will be receiving the packets
1460 * @q: the SGE response queue that assembled the bundle
1461 * @skbs: the partial bundle
1462 * @n: the number of packets in the bundle
1463 *
1464 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1465 */
1466static inline void deliver_partial_bundle(struct t3cdev *tdev,
1467 struct sge_rspq *q,
1468 struct sk_buff *skbs[], int n)
1469{
1470 if (n) {
1471 q->offload_bundles++;
1472 tdev->recv(tdev, skbs, n);
1473 }
1474}
1475
1476/**
1477 * ofld_poll - NAPI handler for offload packets in interrupt mode
1478 * @dev: the network device doing the polling
1479 * @budget: polling budget
1480 *
1481 * The NAPI handler for offload packets when a response queue is serviced
1482 * by the hard interrupt handler, i.e., when it's operating in non-polling
1483 * mode. Creates small packet batches and sends them through the offload
1484 * receive handler. Batches need to be of modest size as we do prefetches
1485 * on the packets in each.
1486 */
1487static int ofld_poll(struct net_device *dev, int *budget)
1488{
1489 struct adapter *adapter = dev->priv;
1490 struct sge_qset *qs = dev2qset(dev);
1491 struct sge_rspq *q = &qs->rspq;
1492 int work_done, limit = min(*budget, dev->quota), avail = limit;
1493
1494 while (avail) {
1495 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1496 int ngathered;
1497
1498 spin_lock_irq(&q->lock);
1499 head = q->rx_head;
1500 if (!head) {
1501 work_done = limit - avail;
1502 *budget -= work_done;
1503 dev->quota -= work_done;
1504 __netif_rx_complete(dev);
1505 spin_unlock_irq(&q->lock);
1506 return 0;
1507 }
1508
1509 tail = q->rx_tail;
1510 q->rx_head = q->rx_tail = NULL;
1511 spin_unlock_irq(&q->lock);
1512
1513 for (ngathered = 0; avail && head; avail--) {
1514 prefetch(head->data);
1515 skbs[ngathered] = head;
1516 head = head->next;
1517 skbs[ngathered]->next = NULL;
1518 if (++ngathered == RX_BUNDLE_SIZE) {
1519 q->offload_bundles++;
1520 adapter->tdev.recv(&adapter->tdev, skbs,
1521 ngathered);
1522 ngathered = 0;
1523 }
1524 }
1525 if (head) { /* splice remaining packets back onto Rx queue */
1526 spin_lock_irq(&q->lock);
1527 tail->next = q->rx_head;
1528 if (!q->rx_head)
1529 q->rx_tail = tail;
1530 q->rx_head = head;
1531 spin_unlock_irq(&q->lock);
1532 }
1533 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1534 }
1535 work_done = limit - avail;
1536 *budget -= work_done;
1537 dev->quota -= work_done;
1538 return 1;
1539}
1540
1541/**
1542 * rx_offload - process a received offload packet
1543 * @tdev: the offload device receiving the packet
1544 * @rq: the response queue that received the packet
1545 * @skb: the packet
1546 * @rx_gather: a gather list of packets if we are building a bundle
1547 * @gather_idx: index of the next available slot in the bundle
1548 *
1549 * Process an ingress offload pakcet and add it to the offload ingress
1550 * queue. Returns the index of the next available slot in the bundle.
1551 */
1552static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1553 struct sk_buff *skb, struct sk_buff *rx_gather[],
1554 unsigned int gather_idx)
1555{
1556 rq->offload_pkts++;
1557 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
1558
1559 if (rq->polling) {
1560 rx_gather[gather_idx++] = skb;
1561 if (gather_idx == RX_BUNDLE_SIZE) {
1562 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1563 gather_idx = 0;
1564 rq->offload_bundles++;
1565 }
1566 } else
1567 offload_enqueue(rq, skb);
1568
1569 return gather_idx;
1570}
1571
1572/**
1573 * restart_tx - check whether to restart suspended Tx queues
1574 * @qs: the queue set to resume
1575 *
1576 * Restarts suspended Tx queues of an SGE queue set if they have enough
1577 * free resources to resume operation.
1578 */
1579static void restart_tx(struct sge_qset *qs)
1580{
1581 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1582 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1583 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1584 qs->txq[TXQ_ETH].restarts++;
1585 if (netif_running(qs->netdev))
1586 netif_wake_queue(qs->netdev);
1587 }
1588
1589 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1590 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1591 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1592 qs->txq[TXQ_OFLD].restarts++;
1593 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1594 }
1595 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1596 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1597 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1598 qs->txq[TXQ_CTRL].restarts++;
1599 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1600 }
1601}
1602
1603/**
1604 * rx_eth - process an ingress ethernet packet
1605 * @adap: the adapter
1606 * @rq: the response queue that received the packet
1607 * @skb: the packet
1608 * @pad: amount of padding at the start of the buffer
1609 *
1610 * Process an ingress ethernet pakcet and deliver it to the stack.
1611 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1612 * if it was immediate data in a response.
1613 */
1614static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1615 struct sk_buff *skb, int pad)
1616{
1617 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1618 struct port_info *pi;
1619
1620 rq->eth_pkts++;
1621 skb_pull(skb, sizeof(*p) + pad);
1622 skb->dev = adap->port[p->iff];
1623 skb->dev->last_rx = jiffies;
1624 skb->protocol = eth_type_trans(skb, skb->dev);
1625 pi = netdev_priv(skb->dev);
1626 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1627 !p->fragment) {
1628 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1629 skb->ip_summed = CHECKSUM_UNNECESSARY;
1630 } else
1631 skb->ip_summed = CHECKSUM_NONE;
1632
1633 if (unlikely(p->vlan_valid)) {
1634 struct vlan_group *grp = pi->vlan_grp;
1635
1636 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1637 if (likely(grp))
1638 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1639 rq->polling);
1640 else
1641 dev_kfree_skb_any(skb);
1642 } else if (rq->polling)
1643 netif_receive_skb(skb);
1644 else
1645 netif_rx(skb);
1646}
1647
1648/**
1649 * handle_rsp_cntrl_info - handles control information in a response
1650 * @qs: the queue set corresponding to the response
1651 * @flags: the response control flags
1652 *
1653 * Handles the control information of an SGE response, such as GTS
1654 * indications and completion credits for the queue set's Tx queues.
1655 * HW coalesces credits, we don't do any extra SW coalescing.
1656 */
1657static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1658{
1659 unsigned int credits;
1660
1661#if USE_GTS
1662 if (flags & F_RSPD_TXQ0_GTS)
1663 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1664#endif
1665
1666 credits = G_RSPD_TXQ0_CR(flags);
1667 if (credits)
1668 qs->txq[TXQ_ETH].processed += credits;
1669
1670 credits = G_RSPD_TXQ2_CR(flags);
1671 if (credits)
1672 qs->txq[TXQ_CTRL].processed += credits;
1673
1674# if USE_GTS
1675 if (flags & F_RSPD_TXQ1_GTS)
1676 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1677# endif
1678 credits = G_RSPD_TXQ1_CR(flags);
1679 if (credits)
1680 qs->txq[TXQ_OFLD].processed += credits;
1681}
1682
1683/**
1684 * check_ring_db - check if we need to ring any doorbells
1685 * @adapter: the adapter
1686 * @qs: the queue set whose Tx queues are to be examined
1687 * @sleeping: indicates which Tx queue sent GTS
1688 *
1689 * Checks if some of a queue set's Tx queues need to ring their doorbells
1690 * to resume transmission after idling while they still have unprocessed
1691 * descriptors.
1692 */
1693static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1694 unsigned int sleeping)
1695{
1696 if (sleeping & F_RSPD_TXQ0_GTS) {
1697 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1698
1699 if (txq->cleaned + txq->in_use != txq->processed &&
1700 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1701 set_bit(TXQ_RUNNING, &txq->flags);
1702 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1703 V_EGRCNTX(txq->cntxt_id));
1704 }
1705 }
1706
1707 if (sleeping & F_RSPD_TXQ1_GTS) {
1708 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1709
1710 if (txq->cleaned + txq->in_use != txq->processed &&
1711 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1712 set_bit(TXQ_RUNNING, &txq->flags);
1713 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1714 V_EGRCNTX(txq->cntxt_id));
1715 }
1716 }
1717}
1718
1719/**
1720 * is_new_response - check if a response is newly written
1721 * @r: the response descriptor
1722 * @q: the response queue
1723 *
1724 * Returns true if a response descriptor contains a yet unprocessed
1725 * response.
1726 */
1727static inline int is_new_response(const struct rsp_desc *r,
1728 const struct sge_rspq *q)
1729{
1730 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1731}
1732
1733#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1734#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1735 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1736 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1737 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1738
1739/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1740#define NOMEM_INTR_DELAY 2500
1741
1742/**
1743 * process_responses - process responses from an SGE response queue
1744 * @adap: the adapter
1745 * @qs: the queue set to which the response queue belongs
1746 * @budget: how many responses can be processed in this round
1747 *
1748 * Process responses from an SGE response queue up to the supplied budget.
1749 * Responses include received packets as well as credits and other events
1750 * for the queues that belong to the response queue's queue set.
1751 * A negative budget is effectively unlimited.
1752 *
1753 * Additionally choose the interrupt holdoff time for the next interrupt
1754 * on this queue. If the system is under memory shortage use a fairly
1755 * long delay to help recovery.
1756 */
1757static int process_responses(struct adapter *adap, struct sge_qset *qs,
1758 int budget)
1759{
1760 struct sge_rspq *q = &qs->rspq;
1761 struct rsp_desc *r = &q->desc[q->cidx];
1762 int budget_left = budget;
1763 unsigned int sleeping = 0;
1764 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1765 int ngathered = 0;
1766
1767 q->next_holdoff = q->holdoff_tmr;
1768
1769 while (likely(budget_left && is_new_response(r, q))) {
1770 int eth, ethpad = 0;
1771 struct sk_buff *skb = NULL;
1772 u32 len, flags = ntohl(r->flags);
1773 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1774
1775 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1776
1777 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1778 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1779 if (!skb)
1780 goto no_mem;
1781
1782 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1783 skb->data[0] = CPL_ASYNC_NOTIF;
1784 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1785 q->async_notif++;
1786 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1787 skb = get_imm_packet(r);
1788 if (unlikely(!skb)) {
1789 no_mem:
1790 q->next_holdoff = NOMEM_INTR_DELAY;
1791 q->nomem++;
1792 /* consume one credit since we tried */
1793 budget_left--;
1794 break;
1795 }
1796 q->imm_data++;
1797 } else if ((len = ntohl(r->len_cq)) != 0) {
1798 struct sge_fl *fl;
1799
1800 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1801 fl->credits--;
1802 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1803 eth ? SGE_RX_DROP_THRES : 0);
1804 if (!skb)
1805 q->rx_drops++;
1806 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1807 __skb_pull(skb, 2);
1808 ethpad = 2;
1809 if (++fl->cidx == fl->size)
1810 fl->cidx = 0;
1811 } else
1812 q->pure_rsps++;
1813
1814 if (flags & RSPD_CTRL_MASK) {
1815 sleeping |= flags & RSPD_GTS_MASK;
1816 handle_rsp_cntrl_info(qs, flags);
1817 }
1818
1819 r++;
1820 if (unlikely(++q->cidx == q->size)) {
1821 q->cidx = 0;
1822 q->gen ^= 1;
1823 r = q->desc;
1824 }
1825 prefetch(r);
1826
1827 if (++q->credits >= (q->size / 4)) {
1828 refill_rspq(adap, q, q->credits);
1829 q->credits = 0;
1830 }
1831
1832 if (likely(skb != NULL)) {
1833 if (eth)
1834 rx_eth(adap, q, skb, ethpad);
1835 else {
1836 /* Preserve the RSS info in csum & priority */
1837 skb->csum = rss_hi;
1838 skb->priority = rss_lo;
1839 ngathered = rx_offload(&adap->tdev, q, skb,
1840 offload_skbs, ngathered);
1841 }
1842 }
1843
1844 --budget_left;
1845 }
1846
1847 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
1848 if (sleeping)
1849 check_ring_db(adap, qs, sleeping);
1850
1851 smp_mb(); /* commit Tx queue .processed updates */
1852 if (unlikely(qs->txq_stopped != 0))
1853 restart_tx(qs);
1854
1855 budget -= budget_left;
1856 return budget;
1857}
1858
1859static inline int is_pure_response(const struct rsp_desc *r)
1860{
1861 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
1862
1863 return (n | r->len_cq) == 0;
1864}
1865
1866/**
1867 * napi_rx_handler - the NAPI handler for Rx processing
1868 * @dev: the net device
1869 * @budget: how many packets we can process in this round
1870 *
1871 * Handler for new data events when using NAPI.
1872 */
1873static int napi_rx_handler(struct net_device *dev, int *budget)
1874{
1875 struct adapter *adap = dev->priv;
1876 struct sge_qset *qs = dev2qset(dev);
1877 int effective_budget = min(*budget, dev->quota);
1878
1879 int work_done = process_responses(adap, qs, effective_budget);
1880 *budget -= work_done;
1881 dev->quota -= work_done;
1882
1883 if (work_done >= effective_budget)
1884 return 1;
1885
1886 netif_rx_complete(dev);
1887
1888 /*
1889 * Because we don't atomically flush the following write it is
1890 * possible that in very rare cases it can reach the device in a way
1891 * that races with a new response being written plus an error interrupt
1892 * causing the NAPI interrupt handler below to return unhandled status
1893 * to the OS. To protect against this would require flushing the write
1894 * and doing both the write and the flush with interrupts off. Way too
1895 * expensive and unjustifiable given the rarity of the race.
1896 *
1897 * The race cannot happen at all with MSI-X.
1898 */
1899 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
1900 V_NEWTIMER(qs->rspq.next_holdoff) |
1901 V_NEWINDEX(qs->rspq.cidx));
1902 return 0;
1903}
1904
1905/*
1906 * Returns true if the device is already scheduled for polling.
1907 */
1908static inline int napi_is_scheduled(struct net_device *dev)
1909{
1910 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1911}
1912
1913/**
1914 * process_pure_responses - process pure responses from a response queue
1915 * @adap: the adapter
1916 * @qs: the queue set owning the response queue
1917 * @r: the first pure response to process
1918 *
1919 * A simpler version of process_responses() that handles only pure (i.e.,
1920 * non data-carrying) responses. Such respones are too light-weight to
1921 * justify calling a softirq under NAPI, so we handle them specially in
1922 * the interrupt handler. The function is called with a pointer to a
1923 * response, which the caller must ensure is a valid pure response.
1924 *
1925 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
1926 */
1927static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
1928 struct rsp_desc *r)
1929{
1930 struct sge_rspq *q = &qs->rspq;
1931 unsigned int sleeping = 0;
1932
1933 do {
1934 u32 flags = ntohl(r->flags);
1935
1936 r++;
1937 if (unlikely(++q->cidx == q->size)) {
1938 q->cidx = 0;
1939 q->gen ^= 1;
1940 r = q->desc;
1941 }
1942 prefetch(r);
1943
1944 if (flags & RSPD_CTRL_MASK) {
1945 sleeping |= flags & RSPD_GTS_MASK;
1946 handle_rsp_cntrl_info(qs, flags);
1947 }
1948
1949 q->pure_rsps++;
1950 if (++q->credits >= (q->size / 4)) {
1951 refill_rspq(adap, q, q->credits);
1952 q->credits = 0;
1953 }
1954 } while (is_new_response(r, q) && is_pure_response(r));
1955
1956 if (sleeping)
1957 check_ring_db(adap, qs, sleeping);
1958
1959 smp_mb(); /* commit Tx queue .processed updates */
1960 if (unlikely(qs->txq_stopped != 0))
1961 restart_tx(qs);
1962
1963 return is_new_response(r, q);
1964}
1965
1966/**
1967 * handle_responses - decide what to do with new responses in NAPI mode
1968 * @adap: the adapter
1969 * @q: the response queue
1970 *
1971 * This is used by the NAPI interrupt handlers to decide what to do with
1972 * new SGE responses. If there are no new responses it returns -1. If
1973 * there are new responses and they are pure (i.e., non-data carrying)
1974 * it handles them straight in hard interrupt context as they are very
1975 * cheap and don't deliver any packets. Finally, if there are any data
1976 * signaling responses it schedules the NAPI handler. Returns 1 if it
1977 * schedules NAPI, 0 if all new responses were pure.
1978 *
1979 * The caller must ascertain NAPI is not already running.
1980 */
1981static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
1982{
1983 struct sge_qset *qs = rspq_to_qset(q);
1984 struct rsp_desc *r = &q->desc[q->cidx];
1985
1986 if (!is_new_response(r, q))
1987 return -1;
1988 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
1989 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
1990 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
1991 return 0;
1992 }
1993 if (likely(__netif_rx_schedule_prep(qs->netdev)))
1994 __netif_rx_schedule(qs->netdev);
1995 return 1;
1996}
1997
1998/*
1999 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2000 * (i.e., response queue serviced in hard interrupt).
2001 */
2002irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2003{
2004 struct sge_qset *qs = cookie;
2005 struct adapter *adap = qs->netdev->priv;
2006 struct sge_rspq *q = &qs->rspq;
2007
2008 spin_lock(&q->lock);
2009 if (process_responses(adap, qs, -1) == 0)
2010 q->unhandled_irqs++;
2011 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2012 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2013 spin_unlock(&q->lock);
2014 return IRQ_HANDLED;
2015}
2016
2017/*
2018 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2019 * (i.e., response queue serviced by NAPI polling).
2020 */
2021irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2022{
2023 struct sge_qset *qs = cookie;
2024 struct adapter *adap = qs->netdev->priv;
2025 struct sge_rspq *q = &qs->rspq;
2026
2027 spin_lock(&q->lock);
2028 BUG_ON(napi_is_scheduled(qs->netdev));
2029
2030 if (handle_responses(adap, q) < 0)
2031 q->unhandled_irqs++;
2032 spin_unlock(&q->lock);
2033 return IRQ_HANDLED;
2034}
2035
2036/*
2037 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2038 * SGE response queues as well as error and other async events as they all use
2039 * the same MSI vector. We use one SGE response queue per port in this mode
2040 * and protect all response queues with queue 0's lock.
2041 */
2042static irqreturn_t t3_intr_msi(int irq, void *cookie)
2043{
2044 int new_packets = 0;
2045 struct adapter *adap = cookie;
2046 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2047
2048 spin_lock(&q->lock);
2049
2050 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2051 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2052 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2053 new_packets = 1;
2054 }
2055
2056 if (adap->params.nports == 2 &&
2057 process_responses(adap, &adap->sge.qs[1], -1)) {
2058 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2059
2060 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2061 V_NEWTIMER(q1->next_holdoff) |
2062 V_NEWINDEX(q1->cidx));
2063 new_packets = 1;
2064 }
2065
2066 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2067 q->unhandled_irqs++;
2068
2069 spin_unlock(&q->lock);
2070 return IRQ_HANDLED;
2071}
2072
2073static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2074{
2075 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2076 if (likely(__netif_rx_schedule_prep(dev)))
2077 __netif_rx_schedule(dev);
2078 return 1;
2079 }
2080 return 0;
2081}
2082
2083/*
2084 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2085 * by NAPI polling). Handles data events from SGE response queues as well as
2086 * error and other async events as they all use the same MSI vector. We use
2087 * one SGE response queue per port in this mode and protect all response
2088 * queues with queue 0's lock.
2089 */
2090irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2091{
2092 int new_packets;
2093 struct adapter *adap = cookie;
2094 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2095
2096 spin_lock(&q->lock);
2097
2098 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2099 if (adap->params.nports == 2)
2100 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2101 &adap->sge.qs[1].rspq);
2102 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2103 q->unhandled_irqs++;
2104
2105 spin_unlock(&q->lock);
2106 return IRQ_HANDLED;
2107}
2108
2109/*
2110 * A helper function that processes responses and issues GTS.
2111 */
2112static inline int process_responses_gts(struct adapter *adap,
2113 struct sge_rspq *rq)
2114{
2115 int work;
2116
2117 work = process_responses(adap, rspq_to_qset(rq), -1);
2118 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2119 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2120 return work;
2121}
2122
2123/*
2124 * The legacy INTx interrupt handler. This needs to handle data events from
2125 * SGE response queues as well as error and other async events as they all use
2126 * the same interrupt pin. We use one SGE response queue per port in this mode
2127 * and protect all response queues with queue 0's lock.
2128 */
2129static irqreturn_t t3_intr(int irq, void *cookie)
2130{
2131 int work_done, w0, w1;
2132 struct adapter *adap = cookie;
2133 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2134 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2135
2136 spin_lock(&q0->lock);
2137
2138 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2139 w1 = adap->params.nports == 2 &&
2140 is_new_response(&q1->desc[q1->cidx], q1);
2141
2142 if (likely(w0 | w1)) {
2143 t3_write_reg(adap, A_PL_CLI, 0);
2144 t3_read_reg(adap, A_PL_CLI); /* flush */
2145
2146 if (likely(w0))
2147 process_responses_gts(adap, q0);
2148
2149 if (w1)
2150 process_responses_gts(adap, q1);
2151
2152 work_done = w0 | w1;
2153 } else
2154 work_done = t3_slow_intr_handler(adap);
2155
2156 spin_unlock(&q0->lock);
2157 return IRQ_RETVAL(work_done != 0);
2158}
2159
2160/*
2161 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2162 * Handles data events from SGE response queues as well as error and other
2163 * async events as they all use the same interrupt pin. We use one SGE
2164 * response queue per port in this mode and protect all response queues with
2165 * queue 0's lock.
2166 */
2167static irqreturn_t t3b_intr(int irq, void *cookie)
2168{
2169 u32 map;
2170 struct adapter *adap = cookie;
2171 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2172
2173 t3_write_reg(adap, A_PL_CLI, 0);
2174 map = t3_read_reg(adap, A_SG_DATA_INTR);
2175
2176 if (unlikely(!map)) /* shared interrupt, most likely */
2177 return IRQ_NONE;
2178
2179 spin_lock(&q0->lock);
2180
2181 if (unlikely(map & F_ERRINTR))
2182 t3_slow_intr_handler(adap);
2183
2184 if (likely(map & 1))
2185 process_responses_gts(adap, q0);
2186
2187 if (map & 2)
2188 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2189
2190 spin_unlock(&q0->lock);
2191 return IRQ_HANDLED;
2192}
2193
2194/*
2195 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2196 * Handles data events from SGE response queues as well as error and other
2197 * async events as they all use the same interrupt pin. We use one SGE
2198 * response queue per port in this mode and protect all response queues with
2199 * queue 0's lock.
2200 */
2201static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2202{
2203 u32 map;
2204 struct net_device *dev;
2205 struct adapter *adap = cookie;
2206 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2207
2208 t3_write_reg(adap, A_PL_CLI, 0);
2209 map = t3_read_reg(adap, A_SG_DATA_INTR);
2210
2211 if (unlikely(!map)) /* shared interrupt, most likely */
2212 return IRQ_NONE;
2213
2214 spin_lock(&q0->lock);
2215
2216 if (unlikely(map & F_ERRINTR))
2217 t3_slow_intr_handler(adap);
2218
2219 if (likely(map & 1)) {
2220 dev = adap->sge.qs[0].netdev;
2221
2222 if (likely(__netif_rx_schedule_prep(dev)))
2223 __netif_rx_schedule(dev);
2224 }
2225 if (map & 2) {
2226 dev = adap->sge.qs[1].netdev;
2227
2228 if (likely(__netif_rx_schedule_prep(dev)))
2229 __netif_rx_schedule(dev);
2230 }
2231
2232 spin_unlock(&q0->lock);
2233 return IRQ_HANDLED;
2234}
2235
2236/**
2237 * t3_intr_handler - select the top-level interrupt handler
2238 * @adap: the adapter
2239 * @polling: whether using NAPI to service response queues
2240 *
2241 * Selects the top-level interrupt handler based on the type of interrupts
2242 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2243 * response queues.
2244 */
2245intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2246{
2247 if (adap->flags & USING_MSIX)
2248 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2249 if (adap->flags & USING_MSI)
2250 return polling ? t3_intr_msi_napi : t3_intr_msi;
2251 if (adap->params.rev > 0)
2252 return polling ? t3b_intr_napi : t3b_intr;
2253 return t3_intr;
2254}
2255
2256/**
2257 * t3_sge_err_intr_handler - SGE async event interrupt handler
2258 * @adapter: the adapter
2259 *
2260 * Interrupt handler for SGE asynchronous (non-data) events.
2261 */
2262void t3_sge_err_intr_handler(struct adapter *adapter)
2263{
2264 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2265
2266 if (status & F_RSPQCREDITOVERFOW)
2267 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2268
2269 if (status & F_RSPQDISABLED) {
2270 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2271
2272 CH_ALERT(adapter,
2273 "packet delivered to disabled response queue "
2274 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2275 }
2276
2277 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2278 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2279 t3_fatal_err(adapter);
2280}
2281
2282/**
2283 * sge_timer_cb - perform periodic maintenance of an SGE qset
2284 * @data: the SGE queue set to maintain
2285 *
2286 * Runs periodically from a timer to perform maintenance of an SGE queue
2287 * set. It performs two tasks:
2288 *
2289 * a) Cleans up any completed Tx descriptors that may still be pending.
2290 * Normal descriptor cleanup happens when new packets are added to a Tx
2291 * queue so this timer is relatively infrequent and does any cleanup only
2292 * if the Tx queue has not seen any new packets in a while. We make a
2293 * best effort attempt to reclaim descriptors, in that we don't wait
2294 * around if we cannot get a queue's lock (which most likely is because
2295 * someone else is queueing new packets and so will also handle the clean
2296 * up). Since control queues use immediate data exclusively we don't
2297 * bother cleaning them up here.
2298 *
2299 * b) Replenishes Rx queues that have run out due to memory shortage.
2300 * Normally new Rx buffers are added when existing ones are consumed but
2301 * when out of memory a queue can become empty. We try to add only a few
2302 * buffers here, the queue will be replenished fully as these new buffers
2303 * are used up if memory shortage has subsided.
2304 */
2305static void sge_timer_cb(unsigned long data)
2306{
2307 spinlock_t *lock;
2308 struct sge_qset *qs = (struct sge_qset *)data;
2309 struct adapter *adap = qs->netdev->priv;
2310
2311 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2312 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2313 spin_unlock(&qs->txq[TXQ_ETH].lock);
2314 }
2315 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2316 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2317 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2318 }
2319 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2320 &adap->sge.qs[0].rspq.lock;
2321 if (spin_trylock_irq(lock)) {
2322 if (!napi_is_scheduled(qs->netdev)) {
2323 if (qs->fl[0].credits < qs->fl[0].size)
2324 __refill_fl(adap, &qs->fl[0]);
2325 if (qs->fl[1].credits < qs->fl[1].size)
2326 __refill_fl(adap, &qs->fl[1]);
2327 }
2328 spin_unlock_irq(lock);
2329 }
2330 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2331}
2332
2333/**
2334 * t3_update_qset_coalesce - update coalescing settings for a queue set
2335 * @qs: the SGE queue set
2336 * @p: new queue set parameters
2337 *
2338 * Update the coalescing settings for an SGE queue set. Nothing is done
2339 * if the queue set is not initialized yet.
2340 */
2341void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2342{
2343 if (!qs->netdev)
2344 return;
2345
2346 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2347 qs->rspq.polling = p->polling;
2348 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2349}
2350
2351/**
2352 * t3_sge_alloc_qset - initialize an SGE queue set
2353 * @adapter: the adapter
2354 * @id: the queue set id
2355 * @nports: how many Ethernet ports will be using this queue set
2356 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2357 * @p: configuration parameters for this queue set
2358 * @ntxq: number of Tx queues for the queue set
2359 * @netdev: net device associated with this queue set
2360 *
2361 * Allocate resources and initialize an SGE queue set. A queue set
2362 * comprises a response queue, two Rx free-buffer queues, and up to 3
2363 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2364 * queue, offload queue, and control queue.
2365 */
2366int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2367 int irq_vec_idx, const struct qset_params *p,
2368 int ntxq, struct net_device *netdev)
2369{
2370 int i, ret = -ENOMEM;
2371 struct sge_qset *q = &adapter->sge.qs[id];
2372
2373 init_qset_cntxt(q, id);
2374 init_timer(&q->tx_reclaim_timer);
2375 q->tx_reclaim_timer.data = (unsigned long)q;
2376 q->tx_reclaim_timer.function = sge_timer_cb;
2377
2378 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2379 sizeof(struct rx_desc),
2380 sizeof(struct rx_sw_desc),
2381 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2382 if (!q->fl[0].desc)
2383 goto err;
2384
2385 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2386 sizeof(struct rx_desc),
2387 sizeof(struct rx_sw_desc),
2388 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2389 if (!q->fl[1].desc)
2390 goto err;
2391
2392 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2393 sizeof(struct rsp_desc), 0,
2394 &q->rspq.phys_addr, NULL);
2395 if (!q->rspq.desc)
2396 goto err;
2397
2398 for (i = 0; i < ntxq; ++i) {
2399 /*
2400 * The control queue always uses immediate data so does not
2401 * need to keep track of any sk_buffs.
2402 */
2403 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2404
2405 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2406 sizeof(struct tx_desc), sz,
2407 &q->txq[i].phys_addr,
2408 &q->txq[i].sdesc);
2409 if (!q->txq[i].desc)
2410 goto err;
2411
2412 q->txq[i].gen = 1;
2413 q->txq[i].size = p->txq_size[i];
2414 spin_lock_init(&q->txq[i].lock);
2415 skb_queue_head_init(&q->txq[i].sendq);
2416 }
2417
2418 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2419 (unsigned long)q);
2420 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2421 (unsigned long)q);
2422
2423 q->fl[0].gen = q->fl[1].gen = 1;
2424 q->fl[0].size = p->fl_size;
2425 q->fl[1].size = p->jumbo_size;
2426
2427 q->rspq.gen = 1;
2428 q->rspq.size = p->rspq_size;
2429 spin_lock_init(&q->rspq.lock);
2430
2431 q->txq[TXQ_ETH].stop_thres = nports *
2432 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2433
2434 if (ntxq == 1) {
2435 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2436 sizeof(struct cpl_rx_pkt);
2437 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2438 sizeof(struct cpl_rx_pkt);
2439 } else {
2440 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2441 sizeof(struct cpl_rx_data);
2442 q->fl[1].buf_size = (16 * 1024) -
2443 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2444 }
2445
2446 spin_lock(&adapter->sge.reg_lock);
2447
2448 /* FL threshold comparison uses < */
2449 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2450 q->rspq.phys_addr, q->rspq.size,
2451 q->fl[0].buf_size, 1, 0);
2452 if (ret)
2453 goto err_unlock;
2454
2455 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2456 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2457 q->fl[i].phys_addr, q->fl[i].size,
2458 q->fl[i].buf_size, p->cong_thres, 1,
2459 0);
2460 if (ret)
2461 goto err_unlock;
2462 }
2463
2464 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2465 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2466 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2467 1, 0);
2468 if (ret)
2469 goto err_unlock;
2470
2471 if (ntxq > 1) {
2472 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2473 USE_GTS, SGE_CNTXT_OFLD, id,
2474 q->txq[TXQ_OFLD].phys_addr,
2475 q->txq[TXQ_OFLD].size, 0, 1, 0);
2476 if (ret)
2477 goto err_unlock;
2478 }
2479
2480 if (ntxq > 2) {
2481 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2482 SGE_CNTXT_CTRL, id,
2483 q->txq[TXQ_CTRL].phys_addr,
2484 q->txq[TXQ_CTRL].size,
2485 q->txq[TXQ_CTRL].token, 1, 0);
2486 if (ret)
2487 goto err_unlock;
2488 }
2489
2490 spin_unlock(&adapter->sge.reg_lock);
2491 q->netdev = netdev;
2492 t3_update_qset_coalesce(q, p);
2493
2494 /*
2495 * We use atalk_ptr as a backpointer to a qset. In case a device is
2496 * associated with multiple queue sets only the first one sets
2497 * atalk_ptr.
2498 */
2499 if (netdev->atalk_ptr == NULL)
2500 netdev->atalk_ptr = q;
2501
2502 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2503 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2504 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2505
2506 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2507 V_NEWTIMER(q->rspq.holdoff_tmr));
2508
2509 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2510 return 0;
2511
2512 err_unlock:
2513 spin_unlock(&adapter->sge.reg_lock);
2514 err:
2515 t3_free_qset(adapter, q);
2516 return ret;
2517}
2518
2519/**
2520 * t3_free_sge_resources - free SGE resources
2521 * @adap: the adapter
2522 *
2523 * Frees resources used by the SGE queue sets.
2524 */
2525void t3_free_sge_resources(struct adapter *adap)
2526{
2527 int i;
2528
2529 for (i = 0; i < SGE_QSETS; ++i)
2530 t3_free_qset(adap, &adap->sge.qs[i]);
2531}
2532
2533/**
2534 * t3_sge_start - enable SGE
2535 * @adap: the adapter
2536 *
2537 * Enables the SGE for DMAs. This is the last step in starting packet
2538 * transfers.
2539 */
2540void t3_sge_start(struct adapter *adap)
2541{
2542 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2543}
2544
2545/**
2546 * t3_sge_stop - disable SGE operation
2547 * @adap: the adapter
2548 *
2549 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2550 * from error interrupts) or from normal process context. In the latter
2551 * case it also disables any pending queue restart tasklets. Note that
2552 * if it is called in interrupt context it cannot disable the restart
2553 * tasklets as it cannot wait, however the tasklets will have no effect
2554 * since the doorbells are disabled and the driver will call this again
2555 * later from process context, at which time the tasklets will be stopped
2556 * if they are still running.
2557 */
2558void t3_sge_stop(struct adapter *adap)
2559{
2560 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2561 if (!in_interrupt()) {
2562 int i;
2563
2564 for (i = 0; i < SGE_QSETS; ++i) {
2565 struct sge_qset *qs = &adap->sge.qs[i];
2566
2567 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2568 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2569 }
2570 }
2571}
2572
2573/**
2574 * t3_sge_init - initialize SGE
2575 * @adap: the adapter
2576 * @p: the SGE parameters
2577 *
2578 * Performs SGE initialization needed every time after a chip reset.
2579 * We do not initialize any of the queue sets here, instead the driver
2580 * top-level must request those individually. We also do not enable DMA
2581 * here, that should be done after the queues have been set up.
2582 */
2583void t3_sge_init(struct adapter *adap, struct sge_params *p)
2584{
2585 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2586
2587 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2588 F_CQCRDTCTRL |
2589 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2590 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2591#if SGE_NUM_GENBITS == 1
2592 ctrl |= F_EGRGENCTRL;
2593#endif
2594 if (adap->params.rev > 0) {
2595 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2596 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2597 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2598 }
2599 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2600 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2601 V_LORCQDRBTHRSH(512));
2602 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2603 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2604 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2605 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2606 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2607 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2608 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2609 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2610 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2611}
2612
2613/**
2614 * t3_sge_prep - one-time SGE initialization
2615 * @adap: the associated adapter
2616 * @p: SGE parameters
2617 *
2618 * Performs one-time initialization of SGE SW state. Includes determining
2619 * defaults for the assorted SGE parameters, which admins can change until
2620 * they are used to initialize the SGE.
2621 */
2622void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2623{
2624 int i;
2625
2626 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2627 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2628
2629 for (i = 0; i < SGE_QSETS; ++i) {
2630 struct qset_params *q = p->qset + i;
2631
2632 q->polling = adap->params.rev > 0;
2633 q->coalesce_usecs = 5;
2634 q->rspq_size = 1024;
2635 q->fl_size = 4096;
2636 q->jumbo_size = 512;
2637 q->txq_size[TXQ_ETH] = 1024;
2638 q->txq_size[TXQ_OFLD] = 1024;
2639 q->txq_size[TXQ_CTRL] = 256;
2640 q->cong_thres = 0;
2641 }
2642
2643 spin_lock_init(&adap->sge.reg_lock);
2644}
2645
2646/**
2647 * t3_get_desc - dump an SGE descriptor for debugging purposes
2648 * @qs: the queue set
2649 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2650 * @idx: the descriptor index in the queue
2651 * @data: where to dump the descriptor contents
2652 *
2653 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2654 * size of the descriptor.
2655 */
2656int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2657 unsigned char *data)
2658{
2659 if (qnum >= 6)
2660 return -EINVAL;
2661
2662 if (qnum < 3) {
2663 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2664 return -EINVAL;
2665 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2666 return sizeof(struct tx_desc);
2667 }
2668
2669 if (qnum == 3) {
2670 if (!qs->rspq.desc || idx >= qs->rspq.size)
2671 return -EINVAL;
2672 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2673 return sizeof(struct rsp_desc);
2674 }
2675
2676 qnum -= 4;
2677 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2678 return -EINVAL;
2679 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2680 return sizeof(struct rx_desc);
2681}
diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/cxgb3/sge_defs.h
new file mode 100644
index 000000000000..514869e26a76
--- /dev/null
+++ b/drivers/net/cxgb3/sge_defs.h
@@ -0,0 +1,251 @@
1/*
2 * This file is automatically generated --- any changes will be lost.
3 */
4
5#ifndef _SGE_DEFS_H
6#define _SGE_DEFS_H
7
8#define S_EC_CREDITS 0
9#define M_EC_CREDITS 0x7FFF
10#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
11#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
12
13#define S_EC_GTS 15
14#define V_EC_GTS(x) ((x) << S_EC_GTS)
15#define F_EC_GTS V_EC_GTS(1U)
16
17#define S_EC_INDEX 16
18#define M_EC_INDEX 0xFFFF
19#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
20#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
21
22#define S_EC_SIZE 0
23#define M_EC_SIZE 0xFFFF
24#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
25#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
26
27#define S_EC_BASE_LO 16
28#define M_EC_BASE_LO 0xFFFF
29#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
30#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
31
32#define S_EC_BASE_HI 0
33#define M_EC_BASE_HI 0xF
34#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
35#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
36
37#define S_EC_RESPQ 4
38#define M_EC_RESPQ 0x7
39#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
40#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
41
42#define S_EC_TYPE 7
43#define M_EC_TYPE 0x7
44#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
45#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
46
47#define S_EC_GEN 10
48#define V_EC_GEN(x) ((x) << S_EC_GEN)
49#define F_EC_GEN V_EC_GEN(1U)
50
51#define S_EC_UP_TOKEN 11
52#define M_EC_UP_TOKEN 0xFFFFF
53#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
54#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
55
56#define S_EC_VALID 31
57#define V_EC_VALID(x) ((x) << S_EC_VALID)
58#define F_EC_VALID V_EC_VALID(1U)
59
60#define S_RQ_MSI_VEC 20
61#define M_RQ_MSI_VEC 0x3F
62#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
63#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
64
65#define S_RQ_INTR_EN 26
66#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
67#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
68
69#define S_RQ_GEN 28
70#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
71#define F_RQ_GEN V_RQ_GEN(1U)
72
73#define S_CQ_INDEX 0
74#define M_CQ_INDEX 0xFFFF
75#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
76#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
77
78#define S_CQ_SIZE 16
79#define M_CQ_SIZE 0xFFFF
80#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
81#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
82
83#define S_CQ_BASE_HI 0
84#define M_CQ_BASE_HI 0xFFFFF
85#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
86#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
87
88#define S_CQ_RSPQ 20
89#define M_CQ_RSPQ 0x3F
90#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
91#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
92
93#define S_CQ_ASYNC_NOTIF 26
94#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
95#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
96
97#define S_CQ_ARMED 27
98#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
99#define F_CQ_ARMED V_CQ_ARMED(1U)
100
101#define S_CQ_ASYNC_NOTIF_SOL 28
102#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
103#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
104
105#define S_CQ_GEN 29
106#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
107#define F_CQ_GEN V_CQ_GEN(1U)
108
109#define S_CQ_OVERFLOW_MODE 31
110#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
111#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
112
113#define S_CQ_CREDITS 0
114#define M_CQ_CREDITS 0xFFFF
115#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
116#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
117
118#define S_CQ_CREDIT_THRES 16
119#define M_CQ_CREDIT_THRES 0x1FFF
120#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
121#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
122
123#define S_FL_BASE_HI 0
124#define M_FL_BASE_HI 0xFFFFF
125#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
126#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
127
128#define S_FL_INDEX_LO 20
129#define M_FL_INDEX_LO 0xFFF
130#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
131#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
132
133#define S_FL_INDEX_HI 0
134#define M_FL_INDEX_HI 0xF
135#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
136#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
137
138#define S_FL_SIZE 4
139#define M_FL_SIZE 0xFFFF
140#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
141#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
142
143#define S_FL_GEN 20
144#define V_FL_GEN(x) ((x) << S_FL_GEN)
145#define F_FL_GEN V_FL_GEN(1U)
146
147#define S_FL_ENTRY_SIZE_LO 21
148#define M_FL_ENTRY_SIZE_LO 0x7FF
149#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
150#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
151
152#define S_FL_ENTRY_SIZE_HI 0
153#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
154#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
155#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
156
157#define S_FL_CONG_THRES 21
158#define M_FL_CONG_THRES 0x3FF
159#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
160#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
161
162#define S_FL_GTS 31
163#define V_FL_GTS(x) ((x) << S_FL_GTS)
164#define F_FL_GTS V_FL_GTS(1U)
165
166#define S_FLD_GEN1 31
167#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
168#define F_FLD_GEN1 V_FLD_GEN1(1U)
169
170#define S_FLD_GEN2 0
171#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
172#define F_FLD_GEN2 V_FLD_GEN2(1U)
173
174#define S_RSPD_TXQ1_CR 0
175#define M_RSPD_TXQ1_CR 0x7F
176#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
177#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
178
179#define S_RSPD_TXQ1_GTS 7
180#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
181#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
182
183#define S_RSPD_TXQ2_CR 8
184#define M_RSPD_TXQ2_CR 0x7F
185#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
186#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
187
188#define S_RSPD_TXQ2_GTS 15
189#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
190#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
191
192#define S_RSPD_TXQ0_CR 16
193#define M_RSPD_TXQ0_CR 0x7F
194#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
195#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
196
197#define S_RSPD_TXQ0_GTS 23
198#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
199#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
200
201#define S_RSPD_EOP 24
202#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
203#define F_RSPD_EOP V_RSPD_EOP(1U)
204
205#define S_RSPD_SOP 25
206#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
207#define F_RSPD_SOP V_RSPD_SOP(1U)
208
209#define S_RSPD_ASYNC_NOTIF 26
210#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
211#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
212
213#define S_RSPD_FL0_GTS 27
214#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
215#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
216
217#define S_RSPD_FL1_GTS 28
218#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
219#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
220
221#define S_RSPD_IMM_DATA_VALID 29
222#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
223#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
224
225#define S_RSPD_OFFLOAD 30
226#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
227#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
228
229#define S_RSPD_GEN1 31
230#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
231#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
232
233#define S_RSPD_LEN 0
234#define M_RSPD_LEN 0x7FFFFFFF
235#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
236#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
237
238#define S_RSPD_FLQ 31
239#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
240#define F_RSPD_FLQ V_RSPD_FLQ(1U)
241
242#define S_RSPD_GEN2 0
243#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
244#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
245
246#define S_RSPD_INR_VEC 1
247#define M_RSPD_INR_VEC 0x7F
248#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
249#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
250
251#endif /* _SGE_DEFS_H */
diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h
new file mode 100644
index 000000000000..b7a1a310dfd4
--- /dev/null
+++ b/drivers/net/cxgb3/t3_cpl.h
@@ -0,0 +1,1444 @@
1/*
2 * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef T3_CPL_H
33#define T3_CPL_H
34
35#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
36# include <asm/byteorder.h>
37#endif
38
39enum CPL_opcode {
40 CPL_PASS_OPEN_REQ = 0x1,
41 CPL_PASS_ACCEPT_RPL = 0x2,
42 CPL_ACT_OPEN_REQ = 0x3,
43 CPL_SET_TCB = 0x4,
44 CPL_SET_TCB_FIELD = 0x5,
45 CPL_GET_TCB = 0x6,
46 CPL_PCMD = 0x7,
47 CPL_CLOSE_CON_REQ = 0x8,
48 CPL_CLOSE_LISTSRV_REQ = 0x9,
49 CPL_ABORT_REQ = 0xA,
50 CPL_ABORT_RPL = 0xB,
51 CPL_TX_DATA = 0xC,
52 CPL_RX_DATA_ACK = 0xD,
53 CPL_TX_PKT = 0xE,
54 CPL_RTE_DELETE_REQ = 0xF,
55 CPL_RTE_WRITE_REQ = 0x10,
56 CPL_RTE_READ_REQ = 0x11,
57 CPL_L2T_WRITE_REQ = 0x12,
58 CPL_L2T_READ_REQ = 0x13,
59 CPL_SMT_WRITE_REQ = 0x14,
60 CPL_SMT_READ_REQ = 0x15,
61 CPL_TX_PKT_LSO = 0x16,
62 CPL_PCMD_READ = 0x17,
63 CPL_BARRIER = 0x18,
64 CPL_TID_RELEASE = 0x1A,
65
66 CPL_CLOSE_LISTSRV_RPL = 0x20,
67 CPL_ERROR = 0x21,
68 CPL_GET_TCB_RPL = 0x22,
69 CPL_L2T_WRITE_RPL = 0x23,
70 CPL_PCMD_READ_RPL = 0x24,
71 CPL_PCMD_RPL = 0x25,
72 CPL_PEER_CLOSE = 0x26,
73 CPL_RTE_DELETE_RPL = 0x27,
74 CPL_RTE_WRITE_RPL = 0x28,
75 CPL_RX_DDP_COMPLETE = 0x29,
76 CPL_RX_PHYS_ADDR = 0x2A,
77 CPL_RX_PKT = 0x2B,
78 CPL_RX_URG_NOTIFY = 0x2C,
79 CPL_SET_TCB_RPL = 0x2D,
80 CPL_SMT_WRITE_RPL = 0x2E,
81 CPL_TX_DATA_ACK = 0x2F,
82
83 CPL_ABORT_REQ_RSS = 0x30,
84 CPL_ABORT_RPL_RSS = 0x31,
85 CPL_CLOSE_CON_RPL = 0x32,
86 CPL_ISCSI_HDR = 0x33,
87 CPL_L2T_READ_RPL = 0x34,
88 CPL_RDMA_CQE = 0x35,
89 CPL_RDMA_CQE_READ_RSP = 0x36,
90 CPL_RDMA_CQE_ERR = 0x37,
91 CPL_RTE_READ_RPL = 0x38,
92 CPL_RX_DATA = 0x39,
93
94 CPL_ACT_OPEN_RPL = 0x40,
95 CPL_PASS_OPEN_RPL = 0x41,
96 CPL_RX_DATA_DDP = 0x42,
97 CPL_SMT_READ_RPL = 0x43,
98
99 CPL_ACT_ESTABLISH = 0x50,
100 CPL_PASS_ESTABLISH = 0x51,
101
102 CPL_PASS_ACCEPT_REQ = 0x70,
103
104 CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
105
106 CPL_TX_DMA_ACK = 0xA0,
107 CPL_RDMA_READ_REQ = 0xA1,
108 CPL_RDMA_TERMINATE = 0xA2,
109 CPL_TRACE_PKT = 0xA3,
110 CPL_RDMA_EC_STATUS = 0xA5,
111
112 NUM_CPL_CMDS /* must be last and previous entries must be sorted */
113};
114
115enum CPL_error {
116 CPL_ERR_NONE = 0,
117 CPL_ERR_TCAM_PARITY = 1,
118 CPL_ERR_TCAM_FULL = 3,
119 CPL_ERR_CONN_RESET = 20,
120 CPL_ERR_CONN_EXIST = 22,
121 CPL_ERR_ARP_MISS = 23,
122 CPL_ERR_BAD_SYN = 24,
123 CPL_ERR_CONN_TIMEDOUT = 30,
124 CPL_ERR_XMIT_TIMEDOUT = 31,
125 CPL_ERR_PERSIST_TIMEDOUT = 32,
126 CPL_ERR_FINWAIT2_TIMEDOUT = 33,
127 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
128 CPL_ERR_RTX_NEG_ADVICE = 35,
129 CPL_ERR_PERSIST_NEG_ADVICE = 36,
130 CPL_ERR_ABORT_FAILED = 42,
131 CPL_ERR_GENERAL = 99
132};
133
134enum {
135 CPL_CONN_POLICY_AUTO = 0,
136 CPL_CONN_POLICY_ASK = 1,
137 CPL_CONN_POLICY_DENY = 3
138};
139
140enum {
141 ULP_MODE_NONE = 0,
142 ULP_MODE_ISCSI = 2,
143 ULP_MODE_RDMA = 4,
144 ULP_MODE_TCPDDP = 5
145};
146
147enum {
148 ULP_CRC_HEADER = 1 << 0,
149 ULP_CRC_DATA = 1 << 1
150};
151
152enum {
153 CPL_PASS_OPEN_ACCEPT,
154 CPL_PASS_OPEN_REJECT
155};
156
157enum {
158 CPL_ABORT_SEND_RST = 0,
159 CPL_ABORT_NO_RST,
160 CPL_ABORT_POST_CLOSE_REQ = 2
161};
162
163enum { /* TX_PKT_LSO ethernet types */
164 CPL_ETH_II,
165 CPL_ETH_II_VLAN,
166 CPL_ETH_802_3,
167 CPL_ETH_802_3_VLAN
168};
169
170enum { /* TCP congestion control algorithms */
171 CONG_ALG_RENO,
172 CONG_ALG_TAHOE,
173 CONG_ALG_NEWRENO,
174 CONG_ALG_HIGHSPEED
175};
176
177union opcode_tid {
178 __be32 opcode_tid;
179 __u8 opcode;
180};
181
182#define S_OPCODE 24
183#define V_OPCODE(x) ((x) << S_OPCODE)
184#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
185#define G_TID(x) ((x) & 0xFFFFFF)
186
187/* tid is assumed to be 24-bits */
188#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
189
190#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
191
192/* extract the TID from a CPL command */
193#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
194
195struct tcp_options {
196 __be16 mss;
197 __u8 wsf;
198#if defined(__LITTLE_ENDIAN_BITFIELD)
199 __u8:5;
200 __u8 ecn:1;
201 __u8 sack:1;
202 __u8 tstamp:1;
203#else
204 __u8 tstamp:1;
205 __u8 sack:1;
206 __u8 ecn:1;
207 __u8:5;
208#endif
209};
210
211struct rss_header {
212 __u8 opcode;
213#if defined(__LITTLE_ENDIAN_BITFIELD)
214 __u8 cpu_idx:6;
215 __u8 hash_type:2;
216#else
217 __u8 hash_type:2;
218 __u8 cpu_idx:6;
219#endif
220 __be16 cq_idx;
221 __be32 rss_hash_val;
222};
223
224#ifndef CHELSIO_FW
225struct work_request_hdr {
226 __be32 wr_hi;
227 __be32 wr_lo;
228};
229
230/* wr_hi fields */
231#define S_WR_SGE_CREDITS 0
232#define M_WR_SGE_CREDITS 0xFF
233#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
234#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
235
236#define S_WR_SGLSFLT 8
237#define M_WR_SGLSFLT 0xFF
238#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
239#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
240
241#define S_WR_BCNTLFLT 16
242#define M_WR_BCNTLFLT 0xF
243#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
244#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
245
246#define S_WR_DATATYPE 20
247#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
248#define F_WR_DATATYPE V_WR_DATATYPE(1U)
249
250#define S_WR_COMPL 21
251#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
252#define F_WR_COMPL V_WR_COMPL(1U)
253
254#define S_WR_EOP 22
255#define V_WR_EOP(x) ((x) << S_WR_EOP)
256#define F_WR_EOP V_WR_EOP(1U)
257
258#define S_WR_SOP 23
259#define V_WR_SOP(x) ((x) << S_WR_SOP)
260#define F_WR_SOP V_WR_SOP(1U)
261
262#define S_WR_OP 24
263#define M_WR_OP 0xFF
264#define V_WR_OP(x) ((x) << S_WR_OP)
265#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
266
267/* wr_lo fields */
268#define S_WR_LEN 0
269#define M_WR_LEN 0xFF
270#define V_WR_LEN(x) ((x) << S_WR_LEN)
271#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
272
273#define S_WR_TID 8
274#define M_WR_TID 0xFFFFF
275#define V_WR_TID(x) ((x) << S_WR_TID)
276#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
277
278#define S_WR_CR_FLUSH 30
279#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
280#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U)
281
282#define S_WR_GEN 31
283#define V_WR_GEN(x) ((x) << S_WR_GEN)
284#define F_WR_GEN V_WR_GEN(1U)
285
286# define WR_HDR struct work_request_hdr wr
287# define RSS_HDR
288#else
289# define WR_HDR
290# define RSS_HDR struct rss_header rss_hdr;
291#endif
292
293/* option 0 lower-half fields */
294#define S_CPL_STATUS 0
295#define M_CPL_STATUS 0xFF
296#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
297#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
298
299#define S_INJECT_TIMER 6
300#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
301#define F_INJECT_TIMER V_INJECT_TIMER(1U)
302
303#define S_NO_OFFLOAD 7
304#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
305#define F_NO_OFFLOAD V_NO_OFFLOAD(1U)
306
307#define S_ULP_MODE 8
308#define M_ULP_MODE 0xF
309#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
310#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
311
312#define S_RCV_BUFSIZ 12
313#define M_RCV_BUFSIZ 0x3FFF
314#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
315#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
316
317#define S_TOS 26
318#define M_TOS 0x3F
319#define V_TOS(x) ((x) << S_TOS)
320#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
321
322/* option 0 upper-half fields */
323#define S_DELACK 0
324#define V_DELACK(x) ((x) << S_DELACK)
325#define F_DELACK V_DELACK(1U)
326
327#define S_NO_CONG 1
328#define V_NO_CONG(x) ((x) << S_NO_CONG)
329#define F_NO_CONG V_NO_CONG(1U)
330
331#define S_SRC_MAC_SEL 2
332#define M_SRC_MAC_SEL 0x3
333#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
334#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
335
336#define S_L2T_IDX 4
337#define M_L2T_IDX 0x7FF
338#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
339#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
340
341#define S_TX_CHANNEL 15
342#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
343#define F_TX_CHANNEL V_TX_CHANNEL(1U)
344
345#define S_TCAM_BYPASS 16
346#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
347#define F_TCAM_BYPASS V_TCAM_BYPASS(1U)
348
349#define S_NAGLE 17
350#define V_NAGLE(x) ((x) << S_NAGLE)
351#define F_NAGLE V_NAGLE(1U)
352
353#define S_WND_SCALE 18
354#define M_WND_SCALE 0xF
355#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
356#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
357
358#define S_KEEP_ALIVE 22
359#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
360#define F_KEEP_ALIVE V_KEEP_ALIVE(1U)
361
362#define S_MAX_RETRANS 23
363#define M_MAX_RETRANS 0xF
364#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
365#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
366
367#define S_MAX_RETRANS_OVERRIDE 27
368#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
369#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U)
370
371#define S_MSS_IDX 28
372#define M_MSS_IDX 0xF
373#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
374#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
375
376/* option 1 fields */
377#define S_RSS_ENABLE 0
378#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
379#define F_RSS_ENABLE V_RSS_ENABLE(1U)
380
381#define S_RSS_MASK_LEN 1
382#define M_RSS_MASK_LEN 0x7
383#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
384#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
385
386#define S_CPU_IDX 4
387#define M_CPU_IDX 0x3F
388#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
389#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
390
391#define S_MAC_MATCH_VALID 18
392#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
393#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
394
395#define S_CONN_POLICY 19
396#define M_CONN_POLICY 0x3
397#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
398#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
399
400#define S_SYN_DEFENSE 21
401#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
402#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
403
404#define S_VLAN_PRI 22
405#define M_VLAN_PRI 0x3
406#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
407#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
408
409#define S_VLAN_PRI_VALID 24
410#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
411#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U)
412
413#define S_PKT_TYPE 25
414#define M_PKT_TYPE 0x3
415#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
416#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
417
418#define S_MAC_MATCH 27
419#define M_MAC_MATCH 0x1F
420#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
421#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
422
423/* option 2 fields */
424#define S_CPU_INDEX 0
425#define M_CPU_INDEX 0x7F
426#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
427#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
428
429#define S_CPU_INDEX_VALID 7
430#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
431#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U)
432
433#define S_RX_COALESCE 8
434#define M_RX_COALESCE 0x3
435#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
436#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
437
438#define S_RX_COALESCE_VALID 10
439#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
440#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
441
442#define S_CONG_CONTROL_FLAVOR 11
443#define M_CONG_CONTROL_FLAVOR 0x3
444#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
445#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
446
447#define S_PACING_FLAVOR 13
448#define M_PACING_FLAVOR 0x3
449#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
450#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
451
452#define S_FLAVORS_VALID 15
453#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
454#define F_FLAVORS_VALID V_FLAVORS_VALID(1U)
455
456#define S_RX_FC_DISABLE 16
457#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
458#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
459
460#define S_RX_FC_VALID 17
461#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
462#define F_RX_FC_VALID V_RX_FC_VALID(1U)
463
464struct cpl_pass_open_req {
465 WR_HDR;
466 union opcode_tid ot;
467 __be16 local_port;
468 __be16 peer_port;
469 __be32 local_ip;
470 __be32 peer_ip;
471 __be32 opt0h;
472 __be32 opt0l;
473 __be32 peer_netmask;
474 __be32 opt1;
475};
476
477struct cpl_pass_open_rpl {
478 RSS_HDR union opcode_tid ot;
479 __be16 local_port;
480 __be16 peer_port;
481 __be32 local_ip;
482 __be32 peer_ip;
483 __u8 resvd[7];
484 __u8 status;
485};
486
487struct cpl_pass_establish {
488 RSS_HDR union opcode_tid ot;
489 __be16 local_port;
490 __be16 peer_port;
491 __be32 local_ip;
492 __be32 peer_ip;
493 __be32 tos_tid;
494 __be16 l2t_idx;
495 __be16 tcp_opt;
496 __be32 snd_isn;
497 __be32 rcv_isn;
498};
499
500/* cpl_pass_establish.tos_tid fields */
501#define S_PASS_OPEN_TID 0
502#define M_PASS_OPEN_TID 0xFFFFFF
503#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
504#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
505
506#define S_PASS_OPEN_TOS 24
507#define M_PASS_OPEN_TOS 0xFF
508#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
509#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
510
511/* cpl_pass_establish.l2t_idx fields */
512#define S_L2T_IDX16 5
513#define M_L2T_IDX16 0x7FF
514#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
515#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
516
517/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
518#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
519#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
520#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
521#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
522#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
523
524struct cpl_pass_accept_req {
525 RSS_HDR union opcode_tid ot;
526 __be16 local_port;
527 __be16 peer_port;
528 __be32 local_ip;
529 __be32 peer_ip;
530 __be32 tos_tid;
531 struct tcp_options tcp_options;
532 __u8 dst_mac[6];
533 __be16 vlan_tag;
534 __u8 src_mac[6];
535#if defined(__LITTLE_ENDIAN_BITFIELD)
536 __u8:3;
537 __u8 addr_idx:3;
538 __u8 port_idx:1;
539 __u8 exact_match:1;
540#else
541 __u8 exact_match:1;
542 __u8 port_idx:1;
543 __u8 addr_idx:3;
544 __u8:3;
545#endif
546 __u8 rsvd;
547 __be32 rcv_isn;
548 __be32 rsvd2;
549};
550
551struct cpl_pass_accept_rpl {
552 WR_HDR;
553 union opcode_tid ot;
554 __be32 opt2;
555 __be32 rsvd;
556 __be32 peer_ip;
557 __be32 opt0h;
558 __be32 opt0l_status;
559};
560
561struct cpl_act_open_req {
562 WR_HDR;
563 union opcode_tid ot;
564 __be16 local_port;
565 __be16 peer_port;
566 __be32 local_ip;
567 __be32 peer_ip;
568 __be32 opt0h;
569 __be32 opt0l;
570 __be32 params;
571 __be32 opt2;
572};
573
574/* cpl_act_open_req.params fields */
575#define S_AOPEN_VLAN_PRI 9
576#define M_AOPEN_VLAN_PRI 0x3
577#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
578#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
579
580#define S_AOPEN_VLAN_PRI_VALID 11
581#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
582#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
583
584#define S_AOPEN_PKT_TYPE 12
585#define M_AOPEN_PKT_TYPE 0x3
586#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
587#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
588
589#define S_AOPEN_MAC_MATCH 14
590#define M_AOPEN_MAC_MATCH 0x1F
591#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
592#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
593
594#define S_AOPEN_MAC_MATCH_VALID 19
595#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
596#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
597
598#define S_AOPEN_IFF_VLAN 20
599#define M_AOPEN_IFF_VLAN 0xFFF
600#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
601#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
602
603struct cpl_act_open_rpl {
604 RSS_HDR union opcode_tid ot;
605 __be16 local_port;
606 __be16 peer_port;
607 __be32 local_ip;
608 __be32 peer_ip;
609 __be32 atid;
610 __u8 rsvd[3];
611 __u8 status;
612};
613
614struct cpl_act_establish {
615 RSS_HDR union opcode_tid ot;
616 __be16 local_port;
617 __be16 peer_port;
618 __be32 local_ip;
619 __be32 peer_ip;
620 __be32 tos_tid;
621 __be16 l2t_idx;
622 __be16 tcp_opt;
623 __be32 snd_isn;
624 __be32 rcv_isn;
625};
626
627struct cpl_get_tcb {
628 WR_HDR;
629 union opcode_tid ot;
630 __be16 cpuno;
631 __be16 rsvd;
632};
633
634struct cpl_get_tcb_rpl {
635 RSS_HDR union opcode_tid ot;
636 __u8 rsvd;
637 __u8 status;
638 __be16 len;
639};
640
641struct cpl_set_tcb {
642 WR_HDR;
643 union opcode_tid ot;
644 __u8 reply;
645 __u8 cpu_idx;
646 __be16 len;
647};
648
649/* cpl_set_tcb.reply fields */
650#define S_NO_REPLY 7
651#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
652#define F_NO_REPLY V_NO_REPLY(1U)
653
654struct cpl_set_tcb_field {
655 WR_HDR;
656 union opcode_tid ot;
657 __u8 reply;
658 __u8 cpu_idx;
659 __be16 word;
660 __be64 mask;
661 __be64 val;
662};
663
664struct cpl_set_tcb_rpl {
665 RSS_HDR union opcode_tid ot;
666 __u8 rsvd[3];
667 __u8 status;
668};
669
670struct cpl_pcmd {
671 WR_HDR;
672 union opcode_tid ot;
673 __u8 rsvd[3];
674#if defined(__LITTLE_ENDIAN_BITFIELD)
675 __u8 src:1;
676 __u8 bundle:1;
677 __u8 channel:1;
678 __u8:5;
679#else
680 __u8:5;
681 __u8 channel:1;
682 __u8 bundle:1;
683 __u8 src:1;
684#endif
685 __be32 pcmd_parm[2];
686};
687
688struct cpl_pcmd_reply {
689 RSS_HDR union opcode_tid ot;
690 __u8 status;
691 __u8 rsvd;
692 __be16 len;
693};
694
695struct cpl_close_con_req {
696 WR_HDR;
697 union opcode_tid ot;
698 __be32 rsvd;
699};
700
701struct cpl_close_con_rpl {
702 RSS_HDR union opcode_tid ot;
703 __u8 rsvd[3];
704 __u8 status;
705 __be32 snd_nxt;
706 __be32 rcv_nxt;
707};
708
709struct cpl_close_listserv_req {
710 WR_HDR;
711 union opcode_tid ot;
712 __u8 rsvd0;
713 __u8 cpu_idx;
714 __be16 rsvd1;
715};
716
717struct cpl_close_listserv_rpl {
718 RSS_HDR union opcode_tid ot;
719 __u8 rsvd[3];
720 __u8 status;
721};
722
723struct cpl_abort_req_rss {
724 RSS_HDR union opcode_tid ot;
725 __be32 rsvd0;
726 __u8 rsvd1;
727 __u8 status;
728 __u8 rsvd2[6];
729};
730
731struct cpl_abort_req {
732 WR_HDR;
733 union opcode_tid ot;
734 __be32 rsvd0;
735 __u8 rsvd1;
736 __u8 cmd;
737 __u8 rsvd2[6];
738};
739
740struct cpl_abort_rpl_rss {
741 RSS_HDR union opcode_tid ot;
742 __be32 rsvd0;
743 __u8 rsvd1;
744 __u8 status;
745 __u8 rsvd2[6];
746};
747
748struct cpl_abort_rpl {
749 WR_HDR;
750 union opcode_tid ot;
751 __be32 rsvd0;
752 __u8 rsvd1;
753 __u8 cmd;
754 __u8 rsvd2[6];
755};
756
757struct cpl_peer_close {
758 RSS_HDR union opcode_tid ot;
759 __be32 rcv_nxt;
760};
761
762struct tx_data_wr {
763 __be32 wr_hi;
764 __be32 wr_lo;
765 __be32 len;
766 __be32 flags;
767 __be32 sndseq;
768 __be32 param;
769};
770
771/* tx_data_wr.param fields */
772#define S_TX_PORT 0
773#define M_TX_PORT 0x7
774#define V_TX_PORT(x) ((x) << S_TX_PORT)
775#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
776
777#define S_TX_MSS 4
778#define M_TX_MSS 0xF
779#define V_TX_MSS(x) ((x) << S_TX_MSS)
780#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
781
782#define S_TX_QOS 8
783#define M_TX_QOS 0xFF
784#define V_TX_QOS(x) ((x) << S_TX_QOS)
785#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
786
787#define S_TX_SNDBUF 16
788#define M_TX_SNDBUF 0xFFFF
789#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
790#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
791
792struct cpl_tx_data {
793 union opcode_tid ot;
794 __be32 len;
795 __be32 rsvd;
796 __be16 urg;
797 __be16 flags;
798};
799
800/* cpl_tx_data.flags fields */
801#define S_TX_ULP_SUBMODE 6
802#define M_TX_ULP_SUBMODE 0xF
803#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
804#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
805
806#define S_TX_ULP_MODE 10
807#define M_TX_ULP_MODE 0xF
808#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
809#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
810
811#define S_TX_SHOVE 14
812#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
813#define F_TX_SHOVE V_TX_SHOVE(1U)
814
815#define S_TX_MORE 15
816#define V_TX_MORE(x) ((x) << S_TX_MORE)
817#define F_TX_MORE V_TX_MORE(1U)
818
819/* additional tx_data_wr.flags fields */
820#define S_TX_CPU_IDX 0
821#define M_TX_CPU_IDX 0x3F
822#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
823#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
824
825#define S_TX_URG 16
826#define V_TX_URG(x) ((x) << S_TX_URG)
827#define F_TX_URG V_TX_URG(1U)
828
829#define S_TX_CLOSE 17
830#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
831#define F_TX_CLOSE V_TX_CLOSE(1U)
832
833#define S_TX_INIT 18
834#define V_TX_INIT(x) ((x) << S_TX_INIT)
835#define F_TX_INIT V_TX_INIT(1U)
836
837#define S_TX_IMM_ACK 19
838#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
839#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
840
841#define S_TX_IMM_DMA 20
842#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
843#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
844
845struct cpl_tx_data_ack {
846 RSS_HDR union opcode_tid ot;
847 __be32 ack_seq;
848};
849
850struct cpl_wr_ack {
851 RSS_HDR union opcode_tid ot;
852 __be16 credits;
853 __be16 rsvd;
854 __be32 snd_nxt;
855 __be32 snd_una;
856};
857
858struct cpl_rdma_ec_status {
859 RSS_HDR union opcode_tid ot;
860 __u8 rsvd[3];
861 __u8 status;
862};
863
864struct mngt_pktsched_wr {
865 __be32 wr_hi;
866 __be32 wr_lo;
867 __u8 mngt_opcode;
868 __u8 rsvd[7];
869 __u8 sched;
870 __u8 idx;
871 __u8 min;
872 __u8 max;
873 __u8 binding;
874 __u8 rsvd1[3];
875};
876
877struct cpl_iscsi_hdr {
878 RSS_HDR union opcode_tid ot;
879 __be16 pdu_len_ddp;
880 __be16 len;
881 __be32 seq;
882 __be16 urg;
883 __u8 rsvd;
884 __u8 status;
885};
886
887/* cpl_iscsi_hdr.pdu_len_ddp fields */
888#define S_ISCSI_PDU_LEN 0
889#define M_ISCSI_PDU_LEN 0x7FFF
890#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
891#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
892
893#define S_ISCSI_DDP 15
894#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
895#define F_ISCSI_DDP V_ISCSI_DDP(1U)
896
897struct cpl_rx_data {
898 RSS_HDR union opcode_tid ot;
899 __be16 rsvd;
900 __be16 len;
901 __be32 seq;
902 __be16 urg;
903#if defined(__LITTLE_ENDIAN_BITFIELD)
904 __u8 dack_mode:2;
905 __u8 psh:1;
906 __u8 heartbeat:1;
907 __u8:4;
908#else
909 __u8:4;
910 __u8 heartbeat:1;
911 __u8 psh:1;
912 __u8 dack_mode:2;
913#endif
914 __u8 status;
915};
916
917struct cpl_rx_data_ack {
918 WR_HDR;
919 union opcode_tid ot;
920 __be32 credit_dack;
921};
922
923/* cpl_rx_data_ack.ack_seq fields */
924#define S_RX_CREDITS 0
925#define M_RX_CREDITS 0x7FFFFFF
926#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
927#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
928
929#define S_RX_MODULATE 27
930#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
931#define F_RX_MODULATE V_RX_MODULATE(1U)
932
933#define S_RX_FORCE_ACK 28
934#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
935#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
936
937#define S_RX_DACK_MODE 29
938#define M_RX_DACK_MODE 0x3
939#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
940#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
941
942#define S_RX_DACK_CHANGE 31
943#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
944#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
945
946struct cpl_rx_urg_notify {
947 RSS_HDR union opcode_tid ot;
948 __be32 seq;
949};
950
951struct cpl_rx_ddp_complete {
952 RSS_HDR union opcode_tid ot;
953 __be32 ddp_report;
954};
955
956struct cpl_rx_data_ddp {
957 RSS_HDR union opcode_tid ot;
958 __be16 urg;
959 __be16 len;
960 __be32 seq;
961 union {
962 __be32 nxt_seq;
963 __be32 ddp_report;
964 };
965 __be32 ulp_crc;
966 __be32 ddpvld_status;
967};
968
969/* cpl_rx_data_ddp.ddpvld_status fields */
970#define S_DDP_STATUS 0
971#define M_DDP_STATUS 0xFF
972#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
973#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
974
975#define S_DDP_VALID 15
976#define M_DDP_VALID 0x1FFFF
977#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
978#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
979
980#define S_DDP_PPOD_MISMATCH 15
981#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
982#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
983
984#define S_DDP_PDU 16
985#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
986#define F_DDP_PDU V_DDP_PDU(1U)
987
988#define S_DDP_LLIMIT_ERR 17
989#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
990#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
991
992#define S_DDP_PPOD_PARITY_ERR 18
993#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
994#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
995
996#define S_DDP_PADDING_ERR 19
997#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
998#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
999
1000#define S_DDP_HDRCRC_ERR 20
1001#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
1002#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
1003
1004#define S_DDP_DATACRC_ERR 21
1005#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
1006#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
1007
1008#define S_DDP_INVALID_TAG 22
1009#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
1010#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
1011
1012#define S_DDP_ULIMIT_ERR 23
1013#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
1014#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
1015
1016#define S_DDP_OFFSET_ERR 24
1017#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
1018#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
1019
1020#define S_DDP_COLOR_ERR 25
1021#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
1022#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
1023
1024#define S_DDP_TID_MISMATCH 26
1025#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
1026#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
1027
1028#define S_DDP_INVALID_PPOD 27
1029#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
1030#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
1031
1032#define S_DDP_ULP_MODE 28
1033#define M_DDP_ULP_MODE 0xF
1034#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
1035#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
1036
1037/* cpl_rx_data_ddp.ddp_report fields */
1038#define S_DDP_OFFSET 0
1039#define M_DDP_OFFSET 0x3FFFFF
1040#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
1041#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
1042
1043#define S_DDP_URG 24
1044#define V_DDP_URG(x) ((x) << S_DDP_URG)
1045#define F_DDP_URG V_DDP_URG(1U)
1046
1047#define S_DDP_PSH 25
1048#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
1049#define F_DDP_PSH V_DDP_PSH(1U)
1050
1051#define S_DDP_BUF_COMPLETE 26
1052#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
1053#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
1054
1055#define S_DDP_BUF_TIMED_OUT 27
1056#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
1057#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
1058
1059#define S_DDP_BUF_IDX 28
1060#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
1061#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
1062
1063struct cpl_tx_pkt {
1064 WR_HDR;
1065 __be32 cntrl;
1066 __be32 len;
1067};
1068
1069struct cpl_tx_pkt_lso {
1070 WR_HDR;
1071 __be32 cntrl;
1072 __be32 len;
1073
1074 __be32 rsvd;
1075 __be32 lso_info;
1076};
1077
1078/* cpl_tx_pkt*.cntrl fields */
1079#define S_TXPKT_VLAN 0
1080#define M_TXPKT_VLAN 0xFFFF
1081#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
1082#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
1083
1084#define S_TXPKT_INTF 16
1085#define M_TXPKT_INTF 0xF
1086#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
1087#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
1088
1089#define S_TXPKT_IPCSUM_DIS 20
1090#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
1091#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U)
1092
1093#define S_TXPKT_L4CSUM_DIS 21
1094#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
1095#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U)
1096
1097#define S_TXPKT_VLAN_VLD 22
1098#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
1099#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U)
1100
1101#define S_TXPKT_LOOPBACK 23
1102#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
1103#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
1104
1105#define S_TXPKT_OPCODE 24
1106#define M_TXPKT_OPCODE 0xFF
1107#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
1108#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
1109
1110/* cpl_tx_pkt_lso.lso_info fields */
1111#define S_LSO_MSS 0
1112#define M_LSO_MSS 0x3FFF
1113#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
1114#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
1115
1116#define S_LSO_ETH_TYPE 14
1117#define M_LSO_ETH_TYPE 0x3
1118#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
1119#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
1120
1121#define S_LSO_TCPHDR_WORDS 16
1122#define M_LSO_TCPHDR_WORDS 0xF
1123#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
1124#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
1125
1126#define S_LSO_IPHDR_WORDS 20
1127#define M_LSO_IPHDR_WORDS 0xF
1128#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
1129#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
1130
1131#define S_LSO_IPV6 24
1132#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
1133#define F_LSO_IPV6 V_LSO_IPV6(1U)
1134
1135struct cpl_trace_pkt {
1136#ifdef CHELSIO_FW
1137 __u8 rss_opcode;
1138#if defined(__LITTLE_ENDIAN_BITFIELD)
1139 __u8 err:1;
1140 __u8:7;
1141#else
1142 __u8:7;
1143 __u8 err:1;
1144#endif
1145 __u8 rsvd0;
1146#if defined(__LITTLE_ENDIAN_BITFIELD)
1147 __u8 qid:4;
1148 __u8:4;
1149#else
1150 __u8:4;
1151 __u8 qid:4;
1152#endif
1153 __be32 tstamp;
1154#endif /* CHELSIO_FW */
1155
1156 __u8 opcode;
1157#if defined(__LITTLE_ENDIAN_BITFIELD)
1158 __u8 iff:4;
1159 __u8:4;
1160#else
1161 __u8:4;
1162 __u8 iff:4;
1163#endif
1164 __u8 rsvd[4];
1165 __be16 len;
1166};
1167
1168struct cpl_rx_pkt {
1169 RSS_HDR __u8 opcode;
1170#if defined(__LITTLE_ENDIAN_BITFIELD)
1171 __u8 iff:4;
1172 __u8 csum_valid:1;
1173 __u8 ipmi_pkt:1;
1174 __u8 vlan_valid:1;
1175 __u8 fragment:1;
1176#else
1177 __u8 fragment:1;
1178 __u8 vlan_valid:1;
1179 __u8 ipmi_pkt:1;
1180 __u8 csum_valid:1;
1181 __u8 iff:4;
1182#endif
1183 __be16 csum;
1184 __be16 vlan;
1185 __be16 len;
1186};
1187
1188struct cpl_l2t_write_req {
1189 WR_HDR;
1190 union opcode_tid ot;
1191 __be32 params;
1192 __u8 rsvd[2];
1193 __u8 dst_mac[6];
1194};
1195
1196/* cpl_l2t_write_req.params fields */
1197#define S_L2T_W_IDX 0
1198#define M_L2T_W_IDX 0x7FF
1199#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
1200#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
1201
1202#define S_L2T_W_VLAN 11
1203#define M_L2T_W_VLAN 0xFFF
1204#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
1205#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
1206
1207#define S_L2T_W_IFF 23
1208#define M_L2T_W_IFF 0xF
1209#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
1210#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
1211
1212#define S_L2T_W_PRIO 27
1213#define M_L2T_W_PRIO 0x7
1214#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
1215#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
1216
1217struct cpl_l2t_write_rpl {
1218 RSS_HDR union opcode_tid ot;
1219 __u8 status;
1220 __u8 rsvd[3];
1221};
1222
1223struct cpl_l2t_read_req {
1224 WR_HDR;
1225 union opcode_tid ot;
1226 __be16 rsvd;
1227 __be16 l2t_idx;
1228};
1229
1230struct cpl_l2t_read_rpl {
1231 RSS_HDR union opcode_tid ot;
1232 __be32 params;
1233 __u8 rsvd[2];
1234 __u8 dst_mac[6];
1235};
1236
1237/* cpl_l2t_read_rpl.params fields */
1238#define S_L2T_R_PRIO 0
1239#define M_L2T_R_PRIO 0x7
1240#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
1241#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
1242
1243#define S_L2T_R_VLAN 8
1244#define M_L2T_R_VLAN 0xFFF
1245#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
1246#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
1247
1248#define S_L2T_R_IFF 20
1249#define M_L2T_R_IFF 0xF
1250#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
1251#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
1252
1253#define S_L2T_STATUS 24
1254#define M_L2T_STATUS 0xFF
1255#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
1256#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
1257
1258struct cpl_smt_write_req {
1259 WR_HDR;
1260 union opcode_tid ot;
1261 __u8 rsvd0;
1262#if defined(__LITTLE_ENDIAN_BITFIELD)
1263 __u8 mtu_idx:4;
1264 __u8 iff:4;
1265#else
1266 __u8 iff:4;
1267 __u8 mtu_idx:4;
1268#endif
1269 __be16 rsvd2;
1270 __be16 rsvd3;
1271 __u8 src_mac1[6];
1272 __be16 rsvd4;
1273 __u8 src_mac0[6];
1274};
1275
1276struct cpl_smt_write_rpl {
1277 RSS_HDR union opcode_tid ot;
1278 __u8 status;
1279 __u8 rsvd[3];
1280};
1281
1282struct cpl_smt_read_req {
1283 WR_HDR;
1284 union opcode_tid ot;
1285 __u8 rsvd0;
1286#if defined(__LITTLE_ENDIAN_BITFIELD)
1287 __u8:4;
1288 __u8 iff:4;
1289#else
1290 __u8 iff:4;
1291 __u8:4;
1292#endif
1293 __be16 rsvd2;
1294};
1295
1296struct cpl_smt_read_rpl {
1297 RSS_HDR union opcode_tid ot;
1298 __u8 status;
1299#if defined(__LITTLE_ENDIAN_BITFIELD)
1300 __u8 mtu_idx:4;
1301 __u8:4;
1302#else
1303 __u8:4;
1304 __u8 mtu_idx:4;
1305#endif
1306 __be16 rsvd2;
1307 __be16 rsvd3;
1308 __u8 src_mac1[6];
1309 __be16 rsvd4;
1310 __u8 src_mac0[6];
1311};
1312
1313struct cpl_rte_delete_req {
1314 WR_HDR;
1315 union opcode_tid ot;
1316 __be32 params;
1317};
1318
1319/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
1320#define S_RTE_REQ_LUT_IX 8
1321#define M_RTE_REQ_LUT_IX 0x7FF
1322#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
1323#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
1324
1325#define S_RTE_REQ_LUT_BASE 19
1326#define M_RTE_REQ_LUT_BASE 0x7FF
1327#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
1328#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
1329
1330#define S_RTE_READ_REQ_SELECT 31
1331#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
1332#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
1333
1334struct cpl_rte_delete_rpl {
1335 RSS_HDR union opcode_tid ot;
1336 __u8 status;
1337 __u8 rsvd[3];
1338};
1339
1340struct cpl_rte_write_req {
1341 WR_HDR;
1342 union opcode_tid ot;
1343#if defined(__LITTLE_ENDIAN_BITFIELD)
1344 __u8:6;
1345 __u8 write_tcam:1;
1346 __u8 write_l2t_lut:1;
1347#else
1348 __u8 write_l2t_lut:1;
1349 __u8 write_tcam:1;
1350 __u8:6;
1351#endif
1352 __u8 rsvd[3];
1353 __be32 lut_params;
1354 __be16 rsvd2;
1355 __be16 l2t_idx;
1356 __be32 netmask;
1357 __be32 faddr;
1358};
1359
1360/* cpl_rte_write_req.lut_params fields */
1361#define S_RTE_WRITE_REQ_LUT_IX 10
1362#define M_RTE_WRITE_REQ_LUT_IX 0x7FF
1363#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
1364#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
1365
1366#define S_RTE_WRITE_REQ_LUT_BASE 21
1367#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF
1368#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
1369#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
1370
1371struct cpl_rte_write_rpl {
1372 RSS_HDR union opcode_tid ot;
1373 __u8 status;
1374 __u8 rsvd[3];
1375};
1376
1377struct cpl_rte_read_req {
1378 WR_HDR;
1379 union opcode_tid ot;
1380 __be32 params;
1381};
1382
1383struct cpl_rte_read_rpl {
1384 RSS_HDR union opcode_tid ot;
1385 __u8 status;
1386 __u8 rsvd0;
1387 __be16 l2t_idx;
1388#if defined(__LITTLE_ENDIAN_BITFIELD)
1389 __u8:7;
1390 __u8 select:1;
1391#else
1392 __u8 select:1;
1393 __u8:7;
1394#endif
1395 __u8 rsvd2[3];
1396 __be32 addr;
1397};
1398
1399struct cpl_tid_release {
1400 WR_HDR;
1401 union opcode_tid ot;
1402 __be32 rsvd;
1403};
1404
1405struct cpl_barrier {
1406 WR_HDR;
1407 __u8 opcode;
1408 __u8 rsvd[7];
1409};
1410
1411struct cpl_rdma_read_req {
1412 __u8 opcode;
1413 __u8 rsvd[15];
1414};
1415
1416struct cpl_rdma_terminate {
1417#ifdef CHELSIO_FW
1418 __u8 opcode;
1419 __u8 rsvd[2];
1420#if defined(__LITTLE_ENDIAN_BITFIELD)
1421 __u8 rspq:3;
1422 __u8:5;
1423#else
1424 __u8:5;
1425 __u8 rspq:3;
1426#endif
1427 __be32 tid_len;
1428#endif
1429 __be32 msn;
1430 __be32 mo;
1431 __u8 data[0];
1432};
1433
1434/* cpl_rdma_terminate.tid_len fields */
1435#define S_FLIT_CNT 0
1436#define M_FLIT_CNT 0xFF
1437#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
1438#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
1439
1440#define S_TERM_TID 8
1441#define M_TERM_TID 0xFFFFF
1442#define V_TERM_TID(x) ((x) << S_TERM_TID)
1443#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
1444#endif /* T3_CPL_H */
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
new file mode 100644
index 000000000000..365a7f5b1f94
--- /dev/null
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -0,0 +1,3375 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
122void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
441 SUPPORTED_OFFLOAD,
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
446 SUPPORTED_OFFLOAD,
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, 16); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
540int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
545
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
548
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
554
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
558 }
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
562}
563
564/**
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
569 *
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
572 */
573int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574{
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
578
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
581
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
590
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
594 }
595 return 0;
596}
597
598/**
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t3_seeprom_wp(struct adapter *adapter, int enable)
606{
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
608}
609
610/*
611 * Convert a character holding a hex digit to a number.
612 */
613static unsigned int hex2int(unsigned char c)
614{
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
616}
617
618/**
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
622 *
623 * Reads card parameters stored in VPD EEPROM.
624 */
625static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
626{
627 int i, addr, ret;
628 struct t3_vpd vpd;
629
630 /*
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
633 */
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
638
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
644 }
645
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
651
652 /* Old eeproms didn't have port information */
653 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
654 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
655 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
656 } else {
657 p->port_type[0] = hex2int(vpd.port0_data[0]);
658 p->port_type[1] = hex2int(vpd.port1_data[0]);
659 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
660 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
661 }
662
663 for (i = 0; i < 6; i++)
664 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
665 hex2int(vpd.na_data[2 * i + 1]);
666 return 0;
667}
668
669/* serial flash and firmware constants */
670enum {
671 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
672 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
673 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
674
675 /* flash command opcodes */
676 SF_PROG_PAGE = 2, /* program page */
677 SF_WR_DISABLE = 4, /* disable writes */
678 SF_RD_STATUS = 5, /* read status register */
679 SF_WR_ENABLE = 6, /* enable writes */
680 SF_RD_DATA_FAST = 0xb, /* read flash */
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
682
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
684 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
685};
686
687/**
688 * sf1_read - read data from the serial flash
689 * @adapter: the adapter
690 * @byte_cnt: number of bytes to read
691 * @cont: whether another operation will be chained
692 * @valp: where to store the read data
693 *
694 * Reads up to 4 bytes of data from the serial flash. The location of
695 * the read needs to be specified prior to calling this by issuing the
696 * appropriate commands to the serial flash.
697 */
698static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
699 u32 *valp)
700{
701 int ret;
702
703 if (!byte_cnt || byte_cnt > 4)
704 return -EINVAL;
705 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
706 return -EBUSY;
707 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
708 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
709 if (!ret)
710 *valp = t3_read_reg(adapter, A_SF_DATA);
711 return ret;
712}
713
714/**
715 * sf1_write - write data to the serial flash
716 * @adapter: the adapter
717 * @byte_cnt: number of bytes to write
718 * @cont: whether another operation will be chained
719 * @val: value to write
720 *
721 * Writes up to 4 bytes of data to the serial flash. The location of
722 * the write needs to be specified prior to calling this by issuing the
723 * appropriate commands to the serial flash.
724 */
725static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
726 u32 val)
727{
728 if (!byte_cnt || byte_cnt > 4)
729 return -EINVAL;
730 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
731 return -EBUSY;
732 t3_write_reg(adapter, A_SF_DATA, val);
733 t3_write_reg(adapter, A_SF_OP,
734 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
735 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
736}
737
738/**
739 * flash_wait_op - wait for a flash operation to complete
740 * @adapter: the adapter
741 * @attempts: max number of polls of the status register
742 * @delay: delay between polls in ms
743 *
744 * Wait for a flash operation to complete by polling the status register.
745 */
746static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
747{
748 int ret;
749 u32 status;
750
751 while (1) {
752 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
753 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
754 return ret;
755 if (!(status & 1))
756 return 0;
757 if (--attempts == 0)
758 return -EAGAIN;
759 if (delay)
760 msleep(delay);
761 }
762}
763
764/**
765 * t3_read_flash - read words from serial flash
766 * @adapter: the adapter
767 * @addr: the start address for the read
768 * @nwords: how many 32-bit words to read
769 * @data: where to store the read data
770 * @byte_oriented: whether to store data as bytes or as words
771 *
772 * Read the specified number of 32-bit words from the serial flash.
773 * If @byte_oriented is set the read data is stored as a byte array
774 * (i.e., big-endian), otherwise as 32-bit words in the platform's
775 * natural endianess.
776 */
777int t3_read_flash(struct adapter *adapter, unsigned int addr,
778 unsigned int nwords, u32 *data, int byte_oriented)
779{
780 int ret;
781
782 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
783 return -EINVAL;
784
785 addr = swab32(addr) | SF_RD_DATA_FAST;
786
787 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
788 (ret = sf1_read(adapter, 1, 1, data)) != 0)
789 return ret;
790
791 for (; nwords; nwords--, data++) {
792 ret = sf1_read(adapter, 4, nwords > 1, data);
793 if (ret)
794 return ret;
795 if (byte_oriented)
796 *data = htonl(*data);
797 }
798 return 0;
799}
800
801/**
802 * t3_write_flash - write up to a page of data to the serial flash
803 * @adapter: the adapter
804 * @addr: the start address to write
805 * @n: length of data to write
806 * @data: the data to write
807 *
808 * Writes up to a page of data (256 bytes) to the serial flash starting
809 * at the given address.
810 */
811static int t3_write_flash(struct adapter *adapter, unsigned int addr,
812 unsigned int n, const u8 *data)
813{
814 int ret;
815 u32 buf[64];
816 unsigned int i, c, left, val, offset = addr & 0xff;
817
818 if (addr + n > SF_SIZE || offset + n > 256)
819 return -EINVAL;
820
821 val = swab32(addr) | SF_PROG_PAGE;
822
823 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
824 (ret = sf1_write(adapter, 4, 1, val)) != 0)
825 return ret;
826
827 for (left = n; left; left -= c) {
828 c = min(left, 4U);
829 for (val = 0, i = 0; i < c; ++i)
830 val = (val << 8) + *data++;
831
832 ret = sf1_write(adapter, c, c != left, val);
833 if (ret)
834 return ret;
835 }
836 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
837 return ret;
838
839 /* Read the page to verify the write succeeded */
840 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
841 if (ret)
842 return ret;
843
844 if (memcmp(data - n, (u8 *) buf + offset, n))
845 return -EIO;
846 return 0;
847}
848
849enum fw_version_type {
850 FW_VERSION_N3,
851 FW_VERSION_T3
852};
853
854/**
855 * t3_get_fw_version - read the firmware version
856 * @adapter: the adapter
857 * @vers: where to place the version
858 *
859 * Reads the FW version from flash.
860 */
861int t3_get_fw_version(struct adapter *adapter, u32 *vers)
862{
863 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
864}
865
866/**
867 * t3_check_fw_version - check if the FW is compatible with this driver
868 * @adapter: the adapter
869 *
870 * Checks if an adapter's FW is compatible with the driver. Returns 0
871 * if the versions are compatible, a negative error otherwise.
872 */
873int t3_check_fw_version(struct adapter *adapter)
874{
875 int ret;
876 u32 vers;
877 unsigned int type, major, minor;
878
879 ret = t3_get_fw_version(adapter, &vers);
880 if (ret)
881 return ret;
882
883 type = G_FW_VERSION_TYPE(vers);
884 major = G_FW_VERSION_MAJOR(vers);
885 minor = G_FW_VERSION_MINOR(vers);
886
887 if (type == FW_VERSION_T3 && major == 3 && minor == 1)
888 return 0;
889
890 CH_ERR(adapter, "found wrong FW version(%u.%u), "
891 "driver needs version 3.1\n", major, minor);
892 return -EINVAL;
893}
894
895/**
896 * t3_flash_erase_sectors - erase a range of flash sectors
897 * @adapter: the adapter
898 * @start: the first sector to erase
899 * @end: the last sector to erase
900 *
901 * Erases the sectors in the given range.
902 */
903static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
904{
905 while (start <= end) {
906 int ret;
907
908 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
909 (ret = sf1_write(adapter, 4, 0,
910 SF_ERASE_SECTOR | (start << 8))) != 0 ||
911 (ret = flash_wait_op(adapter, 5, 500)) != 0)
912 return ret;
913 start++;
914 }
915 return 0;
916}
917
918/*
919 * t3_load_fw - download firmware
920 * @adapter: the adapter
921 * @fw_data: the firrware image to write
922 * @size: image size
923 *
924 * Write the supplied firmware image to the card's serial flash.
925 * The FW image has the following sections: @size - 8 bytes of code and
926 * data, followed by 4 bytes of FW version, followed by the 32-bit
927 * 1's complement checksum of the whole image.
928 */
929int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
930{
931 u32 csum;
932 unsigned int i;
933 const u32 *p = (const u32 *)fw_data;
934 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
935
936 if (size & 3)
937 return -EINVAL;
938 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
939 return -EFBIG;
940
941 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
942 csum += ntohl(p[i]);
943 if (csum != 0xffffffff) {
944 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
945 csum);
946 return -EINVAL;
947 }
948
949 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
950 if (ret)
951 goto out;
952
953 size -= 8; /* trim off version and checksum */
954 for (addr = FW_FLASH_BOOT_ADDR; size;) {
955 unsigned int chunk_size = min(size, 256U);
956
957 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
958 if (ret)
959 goto out;
960
961 addr += chunk_size;
962 fw_data += chunk_size;
963 size -= chunk_size;
964 }
965
966 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
967out:
968 if (ret)
969 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
970 return ret;
971}
972
973#define CIM_CTL_BASE 0x2000
974
975/**
976 * t3_cim_ctl_blk_read - read a block from CIM control region
977 *
978 * @adap: the adapter
979 * @addr: the start address within the CIM control region
980 * @n: number of words to read
981 * @valp: where to store the result
982 *
983 * Reads a block of 4-byte words from the CIM control region.
984 */
985int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
986 unsigned int n, unsigned int *valp)
987{
988 int ret = 0;
989
990 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
991 return -EBUSY;
992
993 for ( ; !ret && n--; addr += 4) {
994 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
995 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
996 0, 5, 2);
997 if (!ret)
998 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
999 }
1000 return ret;
1001}
1002
1003
1004/**
1005 * t3_link_changed - handle interface link changes
1006 * @adapter: the adapter
1007 * @port_id: the port index that changed link state
1008 *
1009 * Called when a port's link settings change to propagate the new values
1010 * to the associated PHY and MAC. After performing the common tasks it
1011 * invokes an OS-specific handler.
1012 */
1013void t3_link_changed(struct adapter *adapter, int port_id)
1014{
1015 int link_ok, speed, duplex, fc;
1016 struct port_info *pi = adap2pinfo(adapter, port_id);
1017 struct cphy *phy = &pi->phy;
1018 struct cmac *mac = &pi->mac;
1019 struct link_config *lc = &pi->link_config;
1020
1021 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1022
1023 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1024 uses_xaui(adapter)) {
1025 if (link_ok)
1026 t3b_pcs_reset(mac);
1027 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1028 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1029 }
1030 lc->link_ok = link_ok;
1031 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1032 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1033 if (lc->requested_fc & PAUSE_AUTONEG)
1034 fc &= lc->requested_fc;
1035 else
1036 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1037
1038 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1039 /* Set MAC speed, duplex, and flow control to match PHY. */
1040 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1041 lc->fc = fc;
1042 }
1043
1044 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1045}
1046
1047/**
1048 * t3_link_start - apply link configuration to MAC/PHY
1049 * @phy: the PHY to setup
1050 * @mac: the MAC to setup
1051 * @lc: the requested link configuration
1052 *
1053 * Set up a port's MAC and PHY according to a desired link configuration.
1054 * - If the PHY can auto-negotiate first decide what to advertise, then
1055 * enable/disable auto-negotiation as desired, and reset.
1056 * - If the PHY does not auto-negotiate just reset it.
1057 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1058 * otherwise do it later based on the outcome of auto-negotiation.
1059 */
1060int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1061{
1062 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1063
1064 lc->link_ok = 0;
1065 if (lc->supported & SUPPORTED_Autoneg) {
1066 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1067 if (fc) {
1068 lc->advertising |= ADVERTISED_Asym_Pause;
1069 if (fc & PAUSE_RX)
1070 lc->advertising |= ADVERTISED_Pause;
1071 }
1072 phy->ops->advertise(phy, lc->advertising);
1073
1074 if (lc->autoneg == AUTONEG_DISABLE) {
1075 lc->speed = lc->requested_speed;
1076 lc->duplex = lc->requested_duplex;
1077 lc->fc = (unsigned char)fc;
1078 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1079 fc);
1080 /* Also disables autoneg */
1081 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1082 phy->ops->reset(phy, 0);
1083 } else
1084 phy->ops->autoneg_enable(phy);
1085 } else {
1086 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1087 lc->fc = (unsigned char)fc;
1088 phy->ops->reset(phy, 0);
1089 }
1090 return 0;
1091}
1092
1093/**
1094 * t3_set_vlan_accel - control HW VLAN extraction
1095 * @adapter: the adapter
1096 * @ports: bitmap of adapter ports to operate on
1097 * @on: enable (1) or disable (0) HW VLAN extraction
1098 *
1099 * Enables or disables HW extraction of VLAN tags for the given port.
1100 */
1101void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1102{
1103 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1104 ports << S_VLANEXTRACTIONENABLE,
1105 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1106}
1107
1108struct intr_info {
1109 unsigned int mask; /* bits to check in interrupt status */
1110 const char *msg; /* message to print or NULL */
1111 short stat_idx; /* stat counter to increment or -1 */
1112 unsigned short fatal:1; /* whether the condition reported is fatal */
1113};
1114
1115/**
1116 * t3_handle_intr_status - table driven interrupt handler
1117 * @adapter: the adapter that generated the interrupt
1118 * @reg: the interrupt status register to process
1119 * @mask: a mask to apply to the interrupt status
1120 * @acts: table of interrupt actions
1121 * @stats: statistics counters tracking interrupt occurences
1122 *
1123 * A table driven interrupt handler that applies a set of masks to an
1124 * interrupt status word and performs the corresponding actions if the
1125 * interrupts described by the mask have occured. The actions include
1126 * optionally printing a warning or alert message, and optionally
1127 * incrementing a stat counter. The table is terminated by an entry
1128 * specifying mask 0. Returns the number of fatal interrupt conditions.
1129 */
1130static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1131 unsigned int mask,
1132 const struct intr_info *acts,
1133 unsigned long *stats)
1134{
1135 int fatal = 0;
1136 unsigned int status = t3_read_reg(adapter, reg) & mask;
1137
1138 for (; acts->mask; ++acts) {
1139 if (!(status & acts->mask))
1140 continue;
1141 if (acts->fatal) {
1142 fatal++;
1143 CH_ALERT(adapter, "%s (0x%x)\n",
1144 acts->msg, status & acts->mask);
1145 } else if (acts->msg)
1146 CH_WARN(adapter, "%s (0x%x)\n",
1147 acts->msg, status & acts->mask);
1148 if (acts->stat_idx >= 0)
1149 stats[acts->stat_idx]++;
1150 }
1151 if (status) /* clear processed interrupts */
1152 t3_write_reg(adapter, reg, status);
1153 return fatal;
1154}
1155
1156#define SGE_INTR_MASK (F_RSPQDISABLED)
1157#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1158 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1159 F_NFASRCHFAIL)
1160#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1161#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1162 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1163 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1164#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1165 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1166 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1167 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1168 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1169 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1170#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1171 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1172 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1173 V_BISTERR(M_BISTERR) | F_PEXERR)
1174#define ULPRX_INTR_MASK F_PARERR
1175#define ULPTX_INTR_MASK 0
1176#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1177 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1178 F_ZERO_SWITCH_ERROR)
1179#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1180 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1181 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1182 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1183#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1184 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1185 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1186#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1187 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1188 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1189#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1190 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1191 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1192 V_MCAPARERRENB(M_MCAPARERRENB))
1193#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1194 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1195 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1196 F_MPS0 | F_CPL_SWITCH)
1197
1198/*
1199 * Interrupt handler for the PCIX1 module.
1200 */
1201static void pci_intr_handler(struct adapter *adapter)
1202{
1203 static const struct intr_info pcix1_intr_info[] = {
1204 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1205 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1206 {F_RCVTARABT, "PCI received target abort", -1, 1},
1207 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1208 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1209 {F_DETPARERR, "PCI detected parity error", -1, 1},
1210 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1211 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1212 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1213 1},
1214 {F_DETCORECCERR, "PCI correctable ECC error",
1215 STAT_PCI_CORR_ECC, 0},
1216 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1217 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1218 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1219 1},
1220 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1221 1},
1222 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1223 1},
1224 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1225 "error", -1, 1},
1226 {0}
1227 };
1228
1229 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1230 pcix1_intr_info, adapter->irq_stats))
1231 t3_fatal_err(adapter);
1232}
1233
1234/*
1235 * Interrupt handler for the PCIE module.
1236 */
1237static void pcie_intr_handler(struct adapter *adapter)
1238{
1239 static const struct intr_info pcie_intr_info[] = {
1240 {F_PEXERR, "PCI PEX error", -1, 1},
1241 {F_UNXSPLCPLERRR,
1242 "PCI unexpected split completion DMA read error", -1, 1},
1243 {F_UNXSPLCPLERRC,
1244 "PCI unexpected split completion DMA command error", -1, 1},
1245 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1246 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1247 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1248 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1249 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1250 "PCI MSI-X table/PBA parity error", -1, 1},
1251 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1252 {0}
1253 };
1254
1255 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1256 pcie_intr_info, adapter->irq_stats))
1257 t3_fatal_err(adapter);
1258}
1259
1260/*
1261 * TP interrupt handler.
1262 */
1263static void tp_intr_handler(struct adapter *adapter)
1264{
1265 static const struct intr_info tp_intr_info[] = {
1266 {0xffffff, "TP parity error", -1, 1},
1267 {0x1000000, "TP out of Rx pages", -1, 1},
1268 {0x2000000, "TP out of Tx pages", -1, 1},
1269 {0}
1270 };
1271
1272 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1273 tp_intr_info, NULL))
1274 t3_fatal_err(adapter);
1275}
1276
1277/*
1278 * CIM interrupt handler.
1279 */
1280static void cim_intr_handler(struct adapter *adapter)
1281{
1282 static const struct intr_info cim_intr_info[] = {
1283 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1284 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1285 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1286 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1287 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1288 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1289 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1290 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1291 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1292 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1293 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1294 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1295 {0}
1296 };
1297
1298 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1299 cim_intr_info, NULL))
1300 t3_fatal_err(adapter);
1301}
1302
1303/*
1304 * ULP RX interrupt handler.
1305 */
1306static void ulprx_intr_handler(struct adapter *adapter)
1307{
1308 static const struct intr_info ulprx_intr_info[] = {
1309 {F_PARERR, "ULP RX parity error", -1, 1},
1310 {0}
1311 };
1312
1313 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1314 ulprx_intr_info, NULL))
1315 t3_fatal_err(adapter);
1316}
1317
1318/*
1319 * ULP TX interrupt handler.
1320 */
1321static void ulptx_intr_handler(struct adapter *adapter)
1322{
1323 static const struct intr_info ulptx_intr_info[] = {
1324 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1325 STAT_ULP_CH0_PBL_OOB, 0},
1326 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1327 STAT_ULP_CH1_PBL_OOB, 0},
1328 {0}
1329 };
1330
1331 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1332 ulptx_intr_info, adapter->irq_stats))
1333 t3_fatal_err(adapter);
1334}
1335
1336#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1337 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1338 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1339 F_ICSPI1_TX_FRAMING_ERROR)
1340#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1341 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1342 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1343 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1344
1345/*
1346 * PM TX interrupt handler.
1347 */
1348static void pmtx_intr_handler(struct adapter *adapter)
1349{
1350 static const struct intr_info pmtx_intr_info[] = {
1351 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1352 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1353 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1354 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1355 "PMTX ispi parity error", -1, 1},
1356 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1357 "PMTX ospi parity error", -1, 1},
1358 {0}
1359 };
1360
1361 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1362 pmtx_intr_info, NULL))
1363 t3_fatal_err(adapter);
1364}
1365
1366#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1367 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1368 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1369 F_IESPI1_TX_FRAMING_ERROR)
1370#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1371 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1372 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1373 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1374
1375/*
1376 * PM RX interrupt handler.
1377 */
1378static void pmrx_intr_handler(struct adapter *adapter)
1379{
1380 static const struct intr_info pmrx_intr_info[] = {
1381 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1382 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1383 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1384 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1385 "PMRX ispi parity error", -1, 1},
1386 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1387 "PMRX ospi parity error", -1, 1},
1388 {0}
1389 };
1390
1391 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1392 pmrx_intr_info, NULL))
1393 t3_fatal_err(adapter);
1394}
1395
1396/*
1397 * CPL switch interrupt handler.
1398 */
1399static void cplsw_intr_handler(struct adapter *adapter)
1400{
1401 static const struct intr_info cplsw_intr_info[] = {
1402/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1403 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1404 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1405 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1406 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1407 {0}
1408 };
1409
1410 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1411 cplsw_intr_info, NULL))
1412 t3_fatal_err(adapter);
1413}
1414
1415/*
1416 * MPS interrupt handler.
1417 */
1418static void mps_intr_handler(struct adapter *adapter)
1419{
1420 static const struct intr_info mps_intr_info[] = {
1421 {0x1ff, "MPS parity error", -1, 1},
1422 {0}
1423 };
1424
1425 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1426 mps_intr_info, NULL))
1427 t3_fatal_err(adapter);
1428}
1429
1430#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1431
1432/*
1433 * MC7 interrupt handler.
1434 */
1435static void mc7_intr_handler(struct mc7 *mc7)
1436{
1437 struct adapter *adapter = mc7->adapter;
1438 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1439
1440 if (cause & F_CE) {
1441 mc7->stats.corr_err++;
1442 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1443 "data 0x%x 0x%x 0x%x\n", mc7->name,
1444 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1445 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1446 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1447 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1448 }
1449
1450 if (cause & F_UE) {
1451 mc7->stats.uncorr_err++;
1452 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1453 "data 0x%x 0x%x 0x%x\n", mc7->name,
1454 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1455 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1456 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1457 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1458 }
1459
1460 if (G_PE(cause)) {
1461 mc7->stats.parity_err++;
1462 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1463 mc7->name, G_PE(cause));
1464 }
1465
1466 if (cause & F_AE) {
1467 u32 addr = 0;
1468
1469 if (adapter->params.rev > 0)
1470 addr = t3_read_reg(adapter,
1471 mc7->offset + A_MC7_ERR_ADDR);
1472 mc7->stats.addr_err++;
1473 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1474 mc7->name, addr);
1475 }
1476
1477 if (cause & MC7_INTR_FATAL)
1478 t3_fatal_err(adapter);
1479
1480 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1481}
1482
1483#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1484 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1485/*
1486 * XGMAC interrupt handler.
1487 */
1488static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1489{
1490 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1491 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1492
1493 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1494 mac->stats.tx_fifo_parity_err++;
1495 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1496 }
1497 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1498 mac->stats.rx_fifo_parity_err++;
1499 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1500 }
1501 if (cause & F_TXFIFO_UNDERRUN)
1502 mac->stats.tx_fifo_urun++;
1503 if (cause & F_RXFIFO_OVERFLOW)
1504 mac->stats.rx_fifo_ovfl++;
1505 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1506 mac->stats.serdes_signal_loss++;
1507 if (cause & F_XAUIPCSCTCERR)
1508 mac->stats.xaui_pcs_ctc_err++;
1509 if (cause & F_XAUIPCSALIGNCHANGE)
1510 mac->stats.xaui_pcs_align_change++;
1511
1512 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1513 if (cause & XGM_INTR_FATAL)
1514 t3_fatal_err(adap);
1515 return cause != 0;
1516}
1517
1518/*
1519 * Interrupt handler for PHY events.
1520 */
1521int t3_phy_intr_handler(struct adapter *adapter)
1522{
1523 static const int intr_gpio_bits[] = { 8, 0x20 };
1524
1525 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1526
1527 for_each_port(adapter, i) {
1528 if (cause & intr_gpio_bits[i]) {
1529 struct cphy *phy = &adap2pinfo(adapter, i)->phy;
1530 int phy_cause = phy->ops->intr_handler(phy);
1531
1532 if (phy_cause & cphy_cause_link_change)
1533 t3_link_changed(adapter, i);
1534 if (phy_cause & cphy_cause_fifo_error)
1535 phy->fifo_errors++;
1536 }
1537 }
1538
1539 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1540 return 0;
1541}
1542
1543/*
1544 * T3 slow path (non-data) interrupt handler.
1545 */
1546int t3_slow_intr_handler(struct adapter *adapter)
1547{
1548 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1549
1550 cause &= adapter->slow_intr_mask;
1551 if (!cause)
1552 return 0;
1553 if (cause & F_PCIM0) {
1554 if (is_pcie(adapter))
1555 pcie_intr_handler(adapter);
1556 else
1557 pci_intr_handler(adapter);
1558 }
1559 if (cause & F_SGE3)
1560 t3_sge_err_intr_handler(adapter);
1561 if (cause & F_MC7_PMRX)
1562 mc7_intr_handler(&adapter->pmrx);
1563 if (cause & F_MC7_PMTX)
1564 mc7_intr_handler(&adapter->pmtx);
1565 if (cause & F_MC7_CM)
1566 mc7_intr_handler(&adapter->cm);
1567 if (cause & F_CIM)
1568 cim_intr_handler(adapter);
1569 if (cause & F_TP1)
1570 tp_intr_handler(adapter);
1571 if (cause & F_ULP2_RX)
1572 ulprx_intr_handler(adapter);
1573 if (cause & F_ULP2_TX)
1574 ulptx_intr_handler(adapter);
1575 if (cause & F_PM1_RX)
1576 pmrx_intr_handler(adapter);
1577 if (cause & F_PM1_TX)
1578 pmtx_intr_handler(adapter);
1579 if (cause & F_CPL_SWITCH)
1580 cplsw_intr_handler(adapter);
1581 if (cause & F_MPS0)
1582 mps_intr_handler(adapter);
1583 if (cause & F_MC5A)
1584 t3_mc5_intr_handler(&adapter->mc5);
1585 if (cause & F_XGMAC0_0)
1586 mac_intr_handler(adapter, 0);
1587 if (cause & F_XGMAC0_1)
1588 mac_intr_handler(adapter, 1);
1589 if (cause & F_T3DBG)
1590 t3_os_ext_intr_handler(adapter);
1591
1592 /* Clear the interrupts just processed. */
1593 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1594 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1595 return 1;
1596}
1597
1598/**
1599 * t3_intr_enable - enable interrupts
1600 * @adapter: the adapter whose interrupts should be enabled
1601 *
1602 * Enable interrupts by setting the interrupt enable registers of the
1603 * various HW modules and then enabling the top-level interrupt
1604 * concentrator.
1605 */
1606void t3_intr_enable(struct adapter *adapter)
1607{
1608 static const struct addr_val_pair intr_en_avp[] = {
1609 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1610 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1611 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1612 MC7_INTR_MASK},
1613 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1614 MC7_INTR_MASK},
1615 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1616 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1617 {A_TP_INT_ENABLE, 0x3bfffff},
1618 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1619 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1620 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1621 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1622 };
1623
1624 adapter->slow_intr_mask = PL_INTR_MASK;
1625
1626 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1627
1628 if (adapter->params.rev > 0) {
1629 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1630 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1631 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1632 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1633 F_PBL_BOUND_ERR_CH1);
1634 } else {
1635 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1636 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1637 }
1638
1639 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1640 adapter_info(adapter)->gpio_intr);
1641 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1642 adapter_info(adapter)->gpio_intr);
1643 if (is_pcie(adapter))
1644 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1645 else
1646 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1647 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1648 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1649}
1650
1651/**
1652 * t3_intr_disable - disable a card's interrupts
1653 * @adapter: the adapter whose interrupts should be disabled
1654 *
1655 * Disable interrupts. We only disable the top-level interrupt
1656 * concentrator and the SGE data interrupts.
1657 */
1658void t3_intr_disable(struct adapter *adapter)
1659{
1660 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1661 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1662 adapter->slow_intr_mask = 0;
1663}
1664
1665/**
1666 * t3_intr_clear - clear all interrupts
1667 * @adapter: the adapter whose interrupts should be cleared
1668 *
1669 * Clears all interrupts.
1670 */
1671void t3_intr_clear(struct adapter *adapter)
1672{
1673 static const unsigned int cause_reg_addr[] = {
1674 A_SG_INT_CAUSE,
1675 A_SG_RSPQ_FL_STATUS,
1676 A_PCIX_INT_CAUSE,
1677 A_MC7_INT_CAUSE,
1678 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1679 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1680 A_CIM_HOST_INT_CAUSE,
1681 A_TP_INT_CAUSE,
1682 A_MC5_DB_INT_CAUSE,
1683 A_ULPRX_INT_CAUSE,
1684 A_ULPTX_INT_CAUSE,
1685 A_CPL_INTR_CAUSE,
1686 A_PM1_TX_INT_CAUSE,
1687 A_PM1_RX_INT_CAUSE,
1688 A_MPS_INT_CAUSE,
1689 A_T3DBG_INT_CAUSE,
1690 };
1691 unsigned int i;
1692
1693 /* Clear PHY and MAC interrupts for each port. */
1694 for_each_port(adapter, i)
1695 t3_port_intr_clear(adapter, i);
1696
1697 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1698 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1699
1700 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1701 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1702}
1703
1704/**
1705 * t3_port_intr_enable - enable port-specific interrupts
1706 * @adapter: associated adapter
1707 * @idx: index of port whose interrupts should be enabled
1708 *
1709 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1710 * adapter port.
1711 */
1712void t3_port_intr_enable(struct adapter *adapter, int idx)
1713{
1714 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1715
1716 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1717 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1718 phy->ops->intr_enable(phy);
1719}
1720
1721/**
1722 * t3_port_intr_disable - disable port-specific interrupts
1723 * @adapter: associated adapter
1724 * @idx: index of port whose interrupts should be disabled
1725 *
1726 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1727 * adapter port.
1728 */
1729void t3_port_intr_disable(struct adapter *adapter, int idx)
1730{
1731 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1732
1733 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1734 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1735 phy->ops->intr_disable(phy);
1736}
1737
1738/**
1739 * t3_port_intr_clear - clear port-specific interrupts
1740 * @adapter: associated adapter
1741 * @idx: index of port whose interrupts to clear
1742 *
1743 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1744 * adapter port.
1745 */
1746void t3_port_intr_clear(struct adapter *adapter, int idx)
1747{
1748 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1749
1750 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1751 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1752 phy->ops->intr_clear(phy);
1753}
1754
1755/**
1756 * t3_sge_write_context - write an SGE context
1757 * @adapter: the adapter
1758 * @id: the context id
1759 * @type: the context type
1760 *
1761 * Program an SGE context with the values already loaded in the
1762 * CONTEXT_DATA? registers.
1763 */
1764static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1765 unsigned int type)
1766{
1767 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1768 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1769 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1770 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1771 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1772 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1773 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1774 0, 5, 1);
1775}
1776
1777/**
1778 * t3_sge_init_ecntxt - initialize an SGE egress context
1779 * @adapter: the adapter to configure
1780 * @id: the context id
1781 * @gts_enable: whether to enable GTS for the context
1782 * @type: the egress context type
1783 * @respq: associated response queue
1784 * @base_addr: base address of queue
1785 * @size: number of queue entries
1786 * @token: uP token
1787 * @gen: initial generation value for the context
1788 * @cidx: consumer pointer
1789 *
1790 * Initialize an SGE egress context and make it ready for use. If the
1791 * platform allows concurrent context operations, the caller is
1792 * responsible for appropriate locking.
1793 */
1794int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1795 enum sge_context_type type, int respq, u64 base_addr,
1796 unsigned int size, unsigned int token, int gen,
1797 unsigned int cidx)
1798{
1799 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1800
1801 if (base_addr & 0xfff) /* must be 4K aligned */
1802 return -EINVAL;
1803 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1804 return -EBUSY;
1805
1806 base_addr >>= 12;
1807 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1808 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1809 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1810 V_EC_BASE_LO(base_addr & 0xffff));
1811 base_addr >>= 16;
1812 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1813 base_addr >>= 32;
1814 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1815 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1816 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1817 F_EC_VALID);
1818 return t3_sge_write_context(adapter, id, F_EGRESS);
1819}
1820
1821/**
1822 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1823 * @adapter: the adapter to configure
1824 * @id: the context id
1825 * @gts_enable: whether to enable GTS for the context
1826 * @base_addr: base address of queue
1827 * @size: number of queue entries
1828 * @bsize: size of each buffer for this queue
1829 * @cong_thres: threshold to signal congestion to upstream producers
1830 * @gen: initial generation value for the context
1831 * @cidx: consumer pointer
1832 *
1833 * Initialize an SGE free list context and make it ready for use. The
1834 * caller is responsible for ensuring only one context operation occurs
1835 * at a time.
1836 */
1837int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1838 int gts_enable, u64 base_addr, unsigned int size,
1839 unsigned int bsize, unsigned int cong_thres, int gen,
1840 unsigned int cidx)
1841{
1842 if (base_addr & 0xfff) /* must be 4K aligned */
1843 return -EINVAL;
1844 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1845 return -EBUSY;
1846
1847 base_addr >>= 12;
1848 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1849 base_addr >>= 32;
1850 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1851 V_FL_BASE_HI((u32) base_addr) |
1852 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1853 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1854 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1855 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1856 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1857 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1858 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1859 return t3_sge_write_context(adapter, id, F_FREELIST);
1860}
1861
1862/**
1863 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1864 * @adapter: the adapter to configure
1865 * @id: the context id
1866 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1867 * @base_addr: base address of queue
1868 * @size: number of queue entries
1869 * @fl_thres: threshold for selecting the normal or jumbo free list
1870 * @gen: initial generation value for the context
1871 * @cidx: consumer pointer
1872 *
1873 * Initialize an SGE response queue context and make it ready for use.
1874 * The caller is responsible for ensuring only one context operation
1875 * occurs at a time.
1876 */
1877int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1878 int irq_vec_idx, u64 base_addr, unsigned int size,
1879 unsigned int fl_thres, int gen, unsigned int cidx)
1880{
1881 unsigned int intr = 0;
1882
1883 if (base_addr & 0xfff) /* must be 4K aligned */
1884 return -EINVAL;
1885 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1886 return -EBUSY;
1887
1888 base_addr >>= 12;
1889 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1890 V_CQ_INDEX(cidx));
1891 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1892 base_addr >>= 32;
1893 if (irq_vec_idx >= 0)
1894 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1895 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1896 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1897 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1898 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1899}
1900
1901/**
1902 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1903 * @adapter: the adapter to configure
1904 * @id: the context id
1905 * @base_addr: base address of queue
1906 * @size: number of queue entries
1907 * @rspq: response queue for async notifications
1908 * @ovfl_mode: CQ overflow mode
1909 * @credits: completion queue credits
1910 * @credit_thres: the credit threshold
1911 *
1912 * Initialize an SGE completion queue context and make it ready for use.
1913 * The caller is responsible for ensuring only one context operation
1914 * occurs at a time.
1915 */
1916int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1917 unsigned int size, int rspq, int ovfl_mode,
1918 unsigned int credits, unsigned int credit_thres)
1919{
1920 if (base_addr & 0xfff) /* must be 4K aligned */
1921 return -EINVAL;
1922 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1923 return -EBUSY;
1924
1925 base_addr >>= 12;
1926 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1927 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1928 base_addr >>= 32;
1929 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1930 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1931 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1932 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1933 V_CQ_CREDIT_THRES(credit_thres));
1934 return t3_sge_write_context(adapter, id, F_CQ);
1935}
1936
1937/**
1938 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1939 * @adapter: the adapter
1940 * @id: the egress context id
1941 * @enable: enable (1) or disable (0) the context
1942 *
1943 * Enable or disable an SGE egress context. The caller is responsible for
1944 * ensuring only one context operation occurs at a time.
1945 */
1946int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1947{
1948 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1949 return -EBUSY;
1950
1951 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1952 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1953 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1954 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1955 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1956 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1957 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1958 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1959 0, 5, 1);
1960}
1961
1962/**
1963 * t3_sge_disable_fl - disable an SGE free-buffer list
1964 * @adapter: the adapter
1965 * @id: the free list context id
1966 *
1967 * Disable an SGE free-buffer list. The caller is responsible for
1968 * ensuring only one context operation occurs at a time.
1969 */
1970int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1971{
1972 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1973 return -EBUSY;
1974
1975 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1976 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1977 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1978 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1979 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1980 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1981 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1982 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1983 0, 5, 1);
1984}
1985
1986/**
1987 * t3_sge_disable_rspcntxt - disable an SGE response queue
1988 * @adapter: the adapter
1989 * @id: the response queue context id
1990 *
1991 * Disable an SGE response queue. The caller is responsible for
1992 * ensuring only one context operation occurs at a time.
1993 */
1994int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
1995{
1996 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1997 return -EBUSY;
1998
1999 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2000 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2001 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2002 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2003 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2004 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2005 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2006 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2007 0, 5, 1);
2008}
2009
2010/**
2011 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2012 * @adapter: the adapter
2013 * @id: the completion queue context id
2014 *
2015 * Disable an SGE completion queue. The caller is responsible for
2016 * ensuring only one context operation occurs at a time.
2017 */
2018int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2019{
2020 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2021 return -EBUSY;
2022
2023 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2024 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2025 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2026 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2027 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2028 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2029 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2030 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2031 0, 5, 1);
2032}
2033
2034/**
2035 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2036 * @adapter: the adapter
2037 * @id: the context id
2038 * @op: the operation to perform
2039 *
2040 * Perform the selected operation on an SGE completion queue context.
2041 * The caller is responsible for ensuring only one context operation
2042 * occurs at a time.
2043 */
2044int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2045 unsigned int credits)
2046{
2047 u32 val;
2048
2049 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2050 return -EBUSY;
2051
2052 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2053 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2054 V_CONTEXT(id) | F_CQ);
2055 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2056 0, 5, 1, &val))
2057 return -EIO;
2058
2059 if (op >= 2 && op < 7) {
2060 if (adapter->params.rev > 0)
2061 return G_CQ_INDEX(val);
2062
2063 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2064 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2065 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2066 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2067 return -EIO;
2068 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2069 }
2070 return 0;
2071}
2072
2073/**
2074 * t3_sge_read_context - read an SGE context
2075 * @type: the context type
2076 * @adapter: the adapter
2077 * @id: the context id
2078 * @data: holds the retrieved context
2079 *
2080 * Read an SGE egress context. The caller is responsible for ensuring
2081 * only one context operation occurs at a time.
2082 */
2083static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2084 unsigned int id, u32 data[4])
2085{
2086 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2087 return -EBUSY;
2088
2089 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2090 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2091 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2092 5, 1))
2093 return -EIO;
2094 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2095 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2096 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2097 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2098 return 0;
2099}
2100
2101/**
2102 * t3_sge_read_ecntxt - read an SGE egress context
2103 * @adapter: the adapter
2104 * @id: the context id
2105 * @data: holds the retrieved context
2106 *
2107 * Read an SGE egress context. The caller is responsible for ensuring
2108 * only one context operation occurs at a time.
2109 */
2110int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2111{
2112 if (id >= 65536)
2113 return -EINVAL;
2114 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2115}
2116
2117/**
2118 * t3_sge_read_cq - read an SGE CQ context
2119 * @adapter: the adapter
2120 * @id: the context id
2121 * @data: holds the retrieved context
2122 *
2123 * Read an SGE CQ context. The caller is responsible for ensuring
2124 * only one context operation occurs at a time.
2125 */
2126int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2127{
2128 if (id >= 65536)
2129 return -EINVAL;
2130 return t3_sge_read_context(F_CQ, adapter, id, data);
2131}
2132
2133/**
2134 * t3_sge_read_fl - read an SGE free-list context
2135 * @adapter: the adapter
2136 * @id: the context id
2137 * @data: holds the retrieved context
2138 *
2139 * Read an SGE free-list context. The caller is responsible for ensuring
2140 * only one context operation occurs at a time.
2141 */
2142int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2143{
2144 if (id >= SGE_QSETS * 2)
2145 return -EINVAL;
2146 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2147}
2148
2149/**
2150 * t3_sge_read_rspq - read an SGE response queue context
2151 * @adapter: the adapter
2152 * @id: the context id
2153 * @data: holds the retrieved context
2154 *
2155 * Read an SGE response queue context. The caller is responsible for
2156 * ensuring only one context operation occurs at a time.
2157 */
2158int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2159{
2160 if (id >= SGE_QSETS)
2161 return -EINVAL;
2162 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2163}
2164
2165/**
2166 * t3_config_rss - configure Rx packet steering
2167 * @adapter: the adapter
2168 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2169 * @cpus: values for the CPU lookup table (0xff terminated)
2170 * @rspq: values for the response queue lookup table (0xffff terminated)
2171 *
2172 * Programs the receive packet steering logic. @cpus and @rspq provide
2173 * the values for the CPU and response queue lookup tables. If they
2174 * provide fewer values than the size of the tables the supplied values
2175 * are used repeatedly until the tables are fully populated.
2176 */
2177void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2178 const u8 * cpus, const u16 *rspq)
2179{
2180 int i, j, cpu_idx = 0, q_idx = 0;
2181
2182 if (cpus)
2183 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2184 u32 val = i << 16;
2185
2186 for (j = 0; j < 2; ++j) {
2187 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2188 if (cpus[cpu_idx] == 0xff)
2189 cpu_idx = 0;
2190 }
2191 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2192 }
2193
2194 if (rspq)
2195 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2196 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2197 (i << 16) | rspq[q_idx++]);
2198 if (rspq[q_idx] == 0xffff)
2199 q_idx = 0;
2200 }
2201
2202 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2203}
2204
2205/**
2206 * t3_read_rss - read the contents of the RSS tables
2207 * @adapter: the adapter
2208 * @lkup: holds the contents of the RSS lookup table
2209 * @map: holds the contents of the RSS map table
2210 *
2211 * Reads the contents of the receive packet steering tables.
2212 */
2213int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2214{
2215 int i;
2216 u32 val;
2217
2218 if (lkup)
2219 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2220 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2221 0xffff0000 | i);
2222 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2223 if (!(val & 0x80000000))
2224 return -EAGAIN;
2225 *lkup++ = val;
2226 *lkup++ = (val >> 8);
2227 }
2228
2229 if (map)
2230 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2231 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2232 0xffff0000 | i);
2233 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2234 if (!(val & 0x80000000))
2235 return -EAGAIN;
2236 *map++ = val;
2237 }
2238 return 0;
2239}
2240
2241/**
2242 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2243 * @adap: the adapter
2244 * @enable: 1 to select offload mode, 0 for regular NIC
2245 *
2246 * Switches TP to NIC/offload mode.
2247 */
2248void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2249{
2250 if (is_offload(adap) || !enable)
2251 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2252 V_NICMODE(!enable));
2253}
2254
2255/**
2256 * pm_num_pages - calculate the number of pages of the payload memory
2257 * @mem_size: the size of the payload memory
2258 * @pg_size: the size of each payload memory page
2259 *
2260 * Calculate the number of pages, each of the given size, that fit in a
2261 * memory of the specified size, respecting the HW requirement that the
2262 * number of pages must be a multiple of 24.
2263 */
2264static inline unsigned int pm_num_pages(unsigned int mem_size,
2265 unsigned int pg_size)
2266{
2267 unsigned int n = mem_size / pg_size;
2268
2269 return n - n % 24;
2270}
2271
2272#define mem_region(adap, start, size, reg) \
2273 t3_write_reg((adap), A_ ## reg, (start)); \
2274 start += size
2275
2276/*
2277 * partition_mem - partition memory and configure TP memory settings
2278 * @adap: the adapter
2279 * @p: the TP parameters
2280 *
2281 * Partitions context and payload memory and configures TP's memory
2282 * registers.
2283 */
2284static void partition_mem(struct adapter *adap, const struct tp_params *p)
2285{
2286 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2287 unsigned int timers = 0, timers_shift = 22;
2288
2289 if (adap->params.rev > 0) {
2290 if (tids <= 16 * 1024) {
2291 timers = 1;
2292 timers_shift = 16;
2293 } else if (tids <= 64 * 1024) {
2294 timers = 2;
2295 timers_shift = 18;
2296 } else if (tids <= 256 * 1024) {
2297 timers = 3;
2298 timers_shift = 20;
2299 }
2300 }
2301
2302 t3_write_reg(adap, A_TP_PMM_SIZE,
2303 p->chan_rx_size | (p->chan_tx_size >> 16));
2304
2305 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2306 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2307 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2308 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2309 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2310
2311 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2312 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2313 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2314
2315 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2316 /* Add a bit of headroom and make multiple of 24 */
2317 pstructs += 48;
2318 pstructs -= pstructs % 24;
2319 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2320
2321 m = tids * TCB_SIZE;
2322 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2323 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2324 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2325 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2326 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2327 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2328 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2329 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2330
2331 m = (m + 4095) & ~0xfff;
2332 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2333 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2334
2335 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2336 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2337 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2338 if (tids < m)
2339 adap->params.mc5.nservers += m - tids;
2340}
2341
2342static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2343 u32 val)
2344{
2345 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2346 t3_write_reg(adap, A_TP_PIO_DATA, val);
2347}
2348
2349static void tp_config(struct adapter *adap, const struct tp_params *p)
2350{
2351 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2352 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2353 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2354 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2355 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2356 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2357 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2358 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2359 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2360 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2361 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2362 F_IPV6ENABLE | F_NICMODE);
2363 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2364 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2365 t3_set_reg_field(adap, A_TP_PARA_REG6,
2366 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2367 0);
2368
2369 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2370 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
2371 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
2372 F_RXCONGESTIONMODE);
2373 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2374
2375 if (adap->params.rev > 0) {
2376 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2377 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2378 F_TXPACEAUTO);
2379 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2380 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2381 } else
2382 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2383
2384 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2385 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2386 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2387}
2388
2389/* Desired TP timer resolution in usec */
2390#define TP_TMR_RES 50
2391
2392/* TCP timer values in ms */
2393#define TP_DACK_TIMER 50
2394#define TP_RTO_MIN 250
2395
2396/**
2397 * tp_set_timers - set TP timing parameters
2398 * @adap: the adapter to set
2399 * @core_clk: the core clock frequency in Hz
2400 *
2401 * Set TP's timing parameters, such as the various timer resolutions and
2402 * the TCP timer values.
2403 */
2404static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2405{
2406 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2407 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2408 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2409 unsigned int tps = core_clk >> tre;
2410
2411 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2412 V_DELAYEDACKRESOLUTION(dack_re) |
2413 V_TIMESTAMPRESOLUTION(tstamp_re));
2414 t3_write_reg(adap, A_TP_DACK_TIMER,
2415 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2416 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2417 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2418 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2419 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2420 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2421 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2422 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2423 V_KEEPALIVEMAX(9));
2424
2425#define SECONDS * tps
2426
2427 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2428 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2429 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2430 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2431 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2432 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2433 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2434 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2435 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2436
2437#undef SECONDS
2438}
2439
2440/**
2441 * t3_tp_set_coalescing_size - set receive coalescing size
2442 * @adap: the adapter
2443 * @size: the receive coalescing size
2444 * @psh: whether a set PSH bit should deliver coalesced data
2445 *
2446 * Set the receive coalescing size and PSH bit handling.
2447 */
2448int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2449{
2450 u32 val;
2451
2452 if (size > MAX_RX_COALESCING_LEN)
2453 return -EINVAL;
2454
2455 val = t3_read_reg(adap, A_TP_PARA_REG3);
2456 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2457
2458 if (size) {
2459 val |= F_RXCOALESCEENABLE;
2460 if (psh)
2461 val |= F_RXCOALESCEPSHEN;
2462 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2463 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2464 }
2465 t3_write_reg(adap, A_TP_PARA_REG3, val);
2466 return 0;
2467}
2468
2469/**
2470 * t3_tp_set_max_rxsize - set the max receive size
2471 * @adap: the adapter
2472 * @size: the max receive size
2473 *
2474 * Set TP's max receive size. This is the limit that applies when
2475 * receive coalescing is disabled.
2476 */
2477void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2478{
2479 t3_write_reg(adap, A_TP_PARA_REG7,
2480 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2481}
2482
2483static void __devinit init_mtus(unsigned short mtus[])
2484{
2485 /*
2486 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2487 * it can accomodate max size TCP/IP headers when SACK and timestamps
2488 * are enabled and still have at least 8 bytes of payload.
2489 */
2490 mtus[0] = 88;
2491 mtus[1] = 256;
2492 mtus[2] = 512;
2493 mtus[3] = 576;
2494 mtus[4] = 808;
2495 mtus[5] = 1024;
2496 mtus[6] = 1280;
2497 mtus[7] = 1492;
2498 mtus[8] = 1500;
2499 mtus[9] = 2002;
2500 mtus[10] = 2048;
2501 mtus[11] = 4096;
2502 mtus[12] = 4352;
2503 mtus[13] = 8192;
2504 mtus[14] = 9000;
2505 mtus[15] = 9600;
2506}
2507
2508/*
2509 * Initial congestion control parameters.
2510 */
2511static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2512{
2513 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2514 a[9] = 2;
2515 a[10] = 3;
2516 a[11] = 4;
2517 a[12] = 5;
2518 a[13] = 6;
2519 a[14] = 7;
2520 a[15] = 8;
2521 a[16] = 9;
2522 a[17] = 10;
2523 a[18] = 14;
2524 a[19] = 17;
2525 a[20] = 21;
2526 a[21] = 25;
2527 a[22] = 30;
2528 a[23] = 35;
2529 a[24] = 45;
2530 a[25] = 60;
2531 a[26] = 80;
2532 a[27] = 100;
2533 a[28] = 200;
2534 a[29] = 300;
2535 a[30] = 400;
2536 a[31] = 500;
2537
2538 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2539 b[9] = b[10] = 1;
2540 b[11] = b[12] = 2;
2541 b[13] = b[14] = b[15] = b[16] = 3;
2542 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2543 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2544 b[28] = b[29] = 6;
2545 b[30] = b[31] = 7;
2546}
2547
2548/* The minimum additive increment value for the congestion control table */
2549#define CC_MIN_INCR 2U
2550
2551/**
2552 * t3_load_mtus - write the MTU and congestion control HW tables
2553 * @adap: the adapter
2554 * @mtus: the unrestricted values for the MTU table
2555 * @alphs: the values for the congestion control alpha parameter
2556 * @beta: the values for the congestion control beta parameter
2557 * @mtu_cap: the maximum permitted effective MTU
2558 *
2559 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2560 * Update the high-speed congestion control table with the supplied alpha,
2561 * beta, and MTUs.
2562 */
2563void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2564 unsigned short alpha[NCCTRL_WIN],
2565 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2566{
2567 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2568 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2569 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2570 28672, 40960, 57344, 81920, 114688, 163840, 229376
2571 };
2572
2573 unsigned int i, w;
2574
2575 for (i = 0; i < NMTUS; ++i) {
2576 unsigned int mtu = min(mtus[i], mtu_cap);
2577 unsigned int log2 = fls(mtu);
2578
2579 if (!(mtu & ((1 << log2) >> 2))) /* round */
2580 log2--;
2581 t3_write_reg(adap, A_TP_MTU_TABLE,
2582 (i << 24) | (log2 << 16) | mtu);
2583
2584 for (w = 0; w < NCCTRL_WIN; ++w) {
2585 unsigned int inc;
2586
2587 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2588 CC_MIN_INCR);
2589
2590 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2591 (w << 16) | (beta[w] << 13) | inc);
2592 }
2593 }
2594}
2595
2596/**
2597 * t3_read_hw_mtus - returns the values in the HW MTU table
2598 * @adap: the adapter
2599 * @mtus: where to store the HW MTU values
2600 *
2601 * Reads the HW MTU table.
2602 */
2603void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2604{
2605 int i;
2606
2607 for (i = 0; i < NMTUS; ++i) {
2608 unsigned int val;
2609
2610 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2611 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2612 mtus[i] = val & 0x3fff;
2613 }
2614}
2615
2616/**
2617 * t3_get_cong_cntl_tab - reads the congestion control table
2618 * @adap: the adapter
2619 * @incr: where to store the alpha values
2620 *
2621 * Reads the additive increments programmed into the HW congestion
2622 * control table.
2623 */
2624void t3_get_cong_cntl_tab(struct adapter *adap,
2625 unsigned short incr[NMTUS][NCCTRL_WIN])
2626{
2627 unsigned int mtu, w;
2628
2629 for (mtu = 0; mtu < NMTUS; ++mtu)
2630 for (w = 0; w < NCCTRL_WIN; ++w) {
2631 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2632 0xffff0000 | (mtu << 5) | w);
2633 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2634 0x1fff;
2635 }
2636}
2637
2638/**
2639 * t3_tp_get_mib_stats - read TP's MIB counters
2640 * @adap: the adapter
2641 * @tps: holds the returned counter values
2642 *
2643 * Returns the values of TP's MIB counters.
2644 */
2645void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2646{
2647 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2648 sizeof(*tps) / sizeof(u32), 0);
2649}
2650
2651#define ulp_region(adap, name, start, len) \
2652 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2653 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2654 (start) + (len) - 1); \
2655 start += len
2656
2657#define ulptx_region(adap, name, start, len) \
2658 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2659 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2660 (start) + (len) - 1)
2661
2662static void ulp_config(struct adapter *adap, const struct tp_params *p)
2663{
2664 unsigned int m = p->chan_rx_size;
2665
2666 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2667 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2668 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2669 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2670 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2671 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2672 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2673 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2674}
2675
2676void t3_config_trace_filter(struct adapter *adapter,
2677 const struct trace_params *tp, int filter_index,
2678 int invert, int enable)
2679{
2680 u32 addr, key[4], mask[4];
2681
2682 key[0] = tp->sport | (tp->sip << 16);
2683 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2684 key[2] = tp->dip;
2685 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2686
2687 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2688 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2689 mask[2] = tp->dip_mask;
2690 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2691
2692 if (invert)
2693 key[3] |= (1 << 29);
2694 if (enable)
2695 key[3] |= (1 << 28);
2696
2697 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2698 tp_wr_indirect(adapter, addr++, key[0]);
2699 tp_wr_indirect(adapter, addr++, mask[0]);
2700 tp_wr_indirect(adapter, addr++, key[1]);
2701 tp_wr_indirect(adapter, addr++, mask[1]);
2702 tp_wr_indirect(adapter, addr++, key[2]);
2703 tp_wr_indirect(adapter, addr++, mask[2]);
2704 tp_wr_indirect(adapter, addr++, key[3]);
2705 tp_wr_indirect(adapter, addr, mask[3]);
2706 t3_read_reg(adapter, A_TP_PIO_DATA);
2707}
2708
2709/**
2710 * t3_config_sched - configure a HW traffic scheduler
2711 * @adap: the adapter
2712 * @kbps: target rate in Kbps
2713 * @sched: the scheduler index
2714 *
2715 * Configure a HW scheduler for the target rate
2716 */
2717int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2718{
2719 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2720 unsigned int clk = adap->params.vpd.cclk * 1000;
2721 unsigned int selected_cpt = 0, selected_bpt = 0;
2722
2723 if (kbps > 0) {
2724 kbps *= 125; /* -> bytes */
2725 for (cpt = 1; cpt <= 255; cpt++) {
2726 tps = clk / cpt;
2727 bpt = (kbps + tps / 2) / tps;
2728 if (bpt > 0 && bpt <= 255) {
2729 v = bpt * tps;
2730 delta = v >= kbps ? v - kbps : kbps - v;
2731 if (delta <= mindelta) {
2732 mindelta = delta;
2733 selected_cpt = cpt;
2734 selected_bpt = bpt;
2735 }
2736 } else if (selected_cpt)
2737 break;
2738 }
2739 if (!selected_cpt)
2740 return -EINVAL;
2741 }
2742 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2743 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2744 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2745 if (sched & 1)
2746 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2747 else
2748 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2749 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2750 return 0;
2751}
2752
2753static int tp_init(struct adapter *adap, const struct tp_params *p)
2754{
2755 int busy = 0;
2756
2757 tp_config(adap, p);
2758 t3_set_vlan_accel(adap, 3, 0);
2759
2760 if (is_offload(adap)) {
2761 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2762 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2763 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2764 0, 1000, 5);
2765 if (busy)
2766 CH_ERR(adap, "TP initialization timed out\n");
2767 }
2768
2769 if (!busy)
2770 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2771 return busy;
2772}
2773
2774int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2775{
2776 if (port_mask & ~((1 << adap->params.nports) - 1))
2777 return -EINVAL;
2778 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2779 port_mask << S_PORT0ACTIVE);
2780 return 0;
2781}
2782
2783/*
2784 * Perform the bits of HW initialization that are dependent on the number
2785 * of available ports.
2786 */
2787static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2788{
2789 int i;
2790
2791 if (nports == 1) {
2792 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2793 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2794 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2795 F_PORT0ACTIVE | F_ENFORCEPKT);
2796 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2797 } else {
2798 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2799 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2800 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2801 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2802 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2803 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2804 F_ENFORCEPKT);
2805 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2806 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2807 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2808 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2809 for (i = 0; i < 16; i++)
2810 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2811 (i << 16) | 0x1010);
2812 }
2813}
2814
2815static int calibrate_xgm(struct adapter *adapter)
2816{
2817 if (uses_xaui(adapter)) {
2818 unsigned int v, i;
2819
2820 for (i = 0; i < 5; ++i) {
2821 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2822 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2823 msleep(1);
2824 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2825 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2826 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2827 V_XAUIIMP(G_CALIMP(v) >> 2));
2828 return 0;
2829 }
2830 }
2831 CH_ERR(adapter, "MAC calibration failed\n");
2832 return -1;
2833 } else {
2834 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2835 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2836 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2837 F_XGM_IMPSETUPDATE);
2838 }
2839 return 0;
2840}
2841
2842static void calibrate_xgm_t3b(struct adapter *adapter)
2843{
2844 if (!uses_xaui(adapter)) {
2845 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2846 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2847 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2848 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2849 F_XGM_IMPSETUPDATE);
2850 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2851 0);
2852 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2853 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2854 }
2855}
2856
2857struct mc7_timing_params {
2858 unsigned char ActToPreDly;
2859 unsigned char ActToRdWrDly;
2860 unsigned char PreCyc;
2861 unsigned char RefCyc[5];
2862 unsigned char BkCyc;
2863 unsigned char WrToRdDly;
2864 unsigned char RdToWrDly;
2865};
2866
2867/*
2868 * Write a value to a register and check that the write completed. These
2869 * writes normally complete in a cycle or two, so one read should suffice.
2870 * The very first read exists to flush the posted write to the device.
2871 */
2872static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2873{
2874 t3_write_reg(adapter, addr, val);
2875 t3_read_reg(adapter, addr); /* flush */
2876 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2877 return 0;
2878 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2879 return -EIO;
2880}
2881
2882static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2883{
2884 static const unsigned int mc7_mode[] = {
2885 0x632, 0x642, 0x652, 0x432, 0x442
2886 };
2887 static const struct mc7_timing_params mc7_timings[] = {
2888 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2889 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2890 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2891 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2892 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2893 };
2894
2895 u32 val;
2896 unsigned int width, density, slow, attempts;
2897 struct adapter *adapter = mc7->adapter;
2898 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2899
2900 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2901 slow = val & F_SLOW;
2902 width = G_WIDTH(val);
2903 density = G_DEN(val);
2904
2905 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2906 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2907 msleep(1);
2908
2909 if (!slow) {
2910 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2911 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2912 msleep(1);
2913 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2914 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2915 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2916 mc7->name);
2917 goto out_fail;
2918 }
2919 }
2920
2921 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2922 V_ACTTOPREDLY(p->ActToPreDly) |
2923 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2924 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2925 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2926
2927 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2928 val | F_CLKEN | F_TERM150);
2929 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2930
2931 if (!slow)
2932 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2933 F_DLLENB);
2934 udelay(1);
2935
2936 val = slow ? 3 : 6;
2937 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2938 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2939 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2940 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2941 goto out_fail;
2942
2943 if (!slow) {
2944 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2945 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2946 udelay(5);
2947 }
2948
2949 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2950 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2951 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2952 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2953 mc7_mode[mem_type]) ||
2954 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2955 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2956 goto out_fail;
2957
2958 /* clock value is in KHz */
2959 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2960 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2961
2962 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2963 F_PERREFEN | V_PREREFDIV(mc7_clock));
2964 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2965
2966 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2967 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2968 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2969 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2970 (mc7->size << width) - 1);
2971 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2972 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2973
2974 attempts = 50;
2975 do {
2976 msleep(250);
2977 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2978 } while ((val & F_BUSY) && --attempts);
2979 if (val & F_BUSY) {
2980 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2981 goto out_fail;
2982 }
2983
2984 /* Enable normal memory accesses. */
2985 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2986 return 0;
2987
2988out_fail:
2989 return -1;
2990}
2991
2992static void config_pcie(struct adapter *adap)
2993{
2994 static const u16 ack_lat[4][6] = {
2995 {237, 416, 559, 1071, 2095, 4143},
2996 {128, 217, 289, 545, 1057, 2081},
2997 {73, 118, 154, 282, 538, 1050},
2998 {67, 107, 86, 150, 278, 534}
2999 };
3000 static const u16 rpl_tmr[4][6] = {
3001 {711, 1248, 1677, 3213, 6285, 12429},
3002 {384, 651, 867, 1635, 3171, 6243},
3003 {219, 354, 462, 846, 1614, 3150},
3004 {201, 321, 258, 450, 834, 1602}
3005 };
3006
3007 u16 val;
3008 unsigned int log2_width, pldsize;
3009 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3010
3011 pci_read_config_word(adap->pdev,
3012 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3013 &val);
3014 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3015 pci_read_config_word(adap->pdev,
3016 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3017 &val);
3018
3019 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3020 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3021 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3022 log2_width = fls(adap->params.pci.width) - 1;
3023 acklat = ack_lat[log2_width][pldsize];
3024 if (val & 1) /* check LOsEnable */
3025 acklat += fst_trn_tx * 4;
3026 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3027
3028 if (adap->params.rev == 0)
3029 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3030 V_T3A_ACKLAT(M_T3A_ACKLAT),
3031 V_T3A_ACKLAT(acklat));
3032 else
3033 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3034 V_ACKLAT(acklat));
3035
3036 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3037 V_REPLAYLMT(rpllmt));
3038
3039 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3040 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3041}
3042
3043/*
3044 * Initialize and configure T3 HW modules. This performs the
3045 * initialization steps that need to be done once after a card is reset.
3046 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3047 *
3048 * fw_params are passed to FW and their value is platform dependent. Only the
3049 * top 8 bits are available for use, the rest must be 0.
3050 */
3051int t3_init_hw(struct adapter *adapter, u32 fw_params)
3052{
3053 int err = -EIO, attempts = 100;
3054 const struct vpd_params *vpd = &adapter->params.vpd;
3055
3056 if (adapter->params.rev > 0)
3057 calibrate_xgm_t3b(adapter);
3058 else if (calibrate_xgm(adapter))
3059 goto out_err;
3060
3061 if (vpd->mclk) {
3062 partition_mem(adapter, &adapter->params.tp);
3063
3064 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3065 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3066 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3067 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3068 adapter->params.mc5.nfilters,
3069 adapter->params.mc5.nroutes))
3070 goto out_err;
3071 }
3072
3073 if (tp_init(adapter, &adapter->params.tp))
3074 goto out_err;
3075
3076 t3_tp_set_coalescing_size(adapter,
3077 min(adapter->params.sge.max_pkt_size,
3078 MAX_RX_COALESCING_LEN), 1);
3079 t3_tp_set_max_rxsize(adapter,
3080 min(adapter->params.sge.max_pkt_size, 16384U));
3081 ulp_config(adapter, &adapter->params.tp);
3082
3083 if (is_pcie(adapter))
3084 config_pcie(adapter);
3085 else
3086 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3087
3088 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3089 init_hw_for_avail_ports(adapter, adapter->params.nports);
3090 t3_sge_init(adapter, &adapter->params.sge);
3091
3092 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3093 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3094 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3095 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3096
3097 do { /* wait for uP to initialize */
3098 msleep(20);
3099 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3100 if (!attempts)
3101 goto out_err;
3102
3103 err = 0;
3104out_err:
3105 return err;
3106}
3107
3108/**
3109 * get_pci_mode - determine a card's PCI mode
3110 * @adapter: the adapter
3111 * @p: where to store the PCI settings
3112 *
3113 * Determines a card's PCI mode and associated parameters, such as speed
3114 * and width.
3115 */
3116static void __devinit get_pci_mode(struct adapter *adapter,
3117 struct pci_params *p)
3118{
3119 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3120 u32 pci_mode, pcie_cap;
3121
3122 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3123 if (pcie_cap) {
3124 u16 val;
3125
3126 p->variant = PCI_VARIANT_PCIE;
3127 p->pcie_cap_addr = pcie_cap;
3128 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3129 &val);
3130 p->width = (val >> 4) & 0x3f;
3131 return;
3132 }
3133
3134 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3135 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3136 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3137 pci_mode = G_PCIXINITPAT(pci_mode);
3138 if (pci_mode == 0)
3139 p->variant = PCI_VARIANT_PCI;
3140 else if (pci_mode < 4)
3141 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3142 else if (pci_mode < 8)
3143 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3144 else
3145 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3146}
3147
3148/**
3149 * init_link_config - initialize a link's SW state
3150 * @lc: structure holding the link state
3151 * @ai: information about the current card
3152 *
3153 * Initializes the SW state maintained for each link, including the link's
3154 * capabilities and default speed/duplex/flow-control/autonegotiation
3155 * settings.
3156 */
3157static void __devinit init_link_config(struct link_config *lc,
3158 unsigned int caps)
3159{
3160 lc->supported = caps;
3161 lc->requested_speed = lc->speed = SPEED_INVALID;
3162 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3163 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3164 if (lc->supported & SUPPORTED_Autoneg) {
3165 lc->advertising = lc->supported;
3166 lc->autoneg = AUTONEG_ENABLE;
3167 lc->requested_fc |= PAUSE_AUTONEG;
3168 } else {
3169 lc->advertising = 0;
3170 lc->autoneg = AUTONEG_DISABLE;
3171 }
3172}
3173
3174/**
3175 * mc7_calc_size - calculate MC7 memory size
3176 * @cfg: the MC7 configuration
3177 *
3178 * Calculates the size of an MC7 memory in bytes from the value of its
3179 * configuration register.
3180 */
3181static unsigned int __devinit mc7_calc_size(u32 cfg)
3182{
3183 unsigned int width = G_WIDTH(cfg);
3184 unsigned int banks = !!(cfg & F_BKS) + 1;
3185 unsigned int org = !!(cfg & F_ORG) + 1;
3186 unsigned int density = G_DEN(cfg);
3187 unsigned int MBs = ((256 << density) * banks) / (org << width);
3188
3189 return MBs << 20;
3190}
3191
3192static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3193 unsigned int base_addr, const char *name)
3194{
3195 u32 cfg;
3196
3197 mc7->adapter = adapter;
3198 mc7->name = name;
3199 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3200 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3201 mc7->size = mc7_calc_size(cfg);
3202 mc7->width = G_WIDTH(cfg);
3203}
3204
3205void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3206{
3207 mac->adapter = adapter;
3208 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3209 mac->nucast = 1;
3210
3211 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3212 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3213 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3214 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3215 F_ENRGMII, 0);
3216 }
3217}
3218
3219void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3220{
3221 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3222
3223 mi1_init(adapter, ai);
3224 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3225 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3226 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3227 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3228
3229 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3230 val |= F_ENRGMII;
3231
3232 /* Enable MAC clocks so we can access the registers */
3233 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3234 t3_read_reg(adapter, A_XGM_PORT_CFG);
3235
3236 val |= F_CLKDIVRESET_;
3237 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3238 t3_read_reg(adapter, A_XGM_PORT_CFG);
3239 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3240 t3_read_reg(adapter, A_XGM_PORT_CFG);
3241}
3242
3243/*
3244 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3245 * ones don't.
3246 */
3247int t3_reset_adapter(struct adapter *adapter)
3248{
3249 int i;
3250 uint16_t devid = 0;
3251
3252 if (is_pcie(adapter))
3253 pci_save_state(adapter->pdev);
3254 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3255
3256 /*
3257 * Delay. Give Some time to device to reset fully.
3258 * XXX The delay time should be modified.
3259 */
3260 for (i = 0; i < 10; i++) {
3261 msleep(50);
3262 pci_read_config_word(adapter->pdev, 0x00, &devid);
3263 if (devid == 0x1425)
3264 break;
3265 }
3266
3267 if (devid != 0x1425)
3268 return -1;
3269
3270 if (is_pcie(adapter))
3271 pci_restore_state(adapter->pdev);
3272 return 0;
3273}
3274
3275/*
3276 * Initialize adapter SW state for the various HW modules, set initial values
3277 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3278 * interface.
3279 */
3280int __devinit t3_prep_adapter(struct adapter *adapter,
3281 const struct adapter_info *ai, int reset)
3282{
3283 int ret;
3284 unsigned int i, j = 0;
3285
3286 get_pci_mode(adapter, &adapter->params.pci);
3287
3288 adapter->params.info = ai;
3289 adapter->params.nports = ai->nports;
3290 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3291 adapter->params.linkpoll_period = 0;
3292 adapter->params.stats_update_period = is_10G(adapter) ?
3293 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3294 adapter->params.pci.vpd_cap_addr =
3295 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3296 ret = get_vpd_params(adapter, &adapter->params.vpd);
3297 if (ret < 0)
3298 return ret;
3299
3300 if (reset && t3_reset_adapter(adapter))
3301 return -1;
3302
3303 t3_sge_prep(adapter, &adapter->params.sge);
3304
3305 if (adapter->params.vpd.mclk) {
3306 struct tp_params *p = &adapter->params.tp;
3307
3308 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3309 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3310 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3311
3312 p->nchan = ai->nports;
3313 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3314 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3315 p->cm_size = t3_mc7_size(&adapter->cm);
3316 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3317 p->chan_tx_size = p->pmtx_size / p->nchan;
3318 p->rx_pg_size = 64 * 1024;
3319 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3320 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3321 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3322 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3323 adapter->params.rev > 0 ? 12 : 6;
3324
3325 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3326 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3327 DEFAULT_NFILTERS : 0;
3328 adapter->params.mc5.nroutes = 0;
3329 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3330
3331 init_mtus(adapter->params.mtus);
3332 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3333 }
3334
3335 early_hw_init(adapter, ai);
3336
3337 for_each_port(adapter, i) {
3338 u8 hw_addr[6];
3339 struct port_info *p = adap2pinfo(adapter, i);
3340
3341 while (!adapter->params.vpd.port_type[j])
3342 ++j;
3343
3344 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3345 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3346 ai->mdio_ops);
3347 mac_prep(&p->mac, adapter, j);
3348 ++j;
3349
3350 /*
3351 * The VPD EEPROM stores the base Ethernet address for the
3352 * card. A port's address is derived from the base by adding
3353 * the port's index to the base's low octet.
3354 */
3355 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3356 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3357
3358 memcpy(adapter->port[i]->dev_addr, hw_addr,
3359 ETH_ALEN);
3360 memcpy(adapter->port[i]->perm_addr, hw_addr,
3361 ETH_ALEN);
3362 init_link_config(&p->link_config, p->port_type->caps);
3363 p->phy.ops->power_down(&p->phy, 1);
3364 if (!(p->port_type->caps & SUPPORTED_IRQ))
3365 adapter->params.linkpoll_period = 10;
3366 }
3367
3368 return 0;
3369}
3370
3371void t3_led_ready(struct adapter *adapter)
3372{
3373 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3374 F_GPIO0_OUT_VAL);
3375}
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
new file mode 100644
index 000000000000..9af3bcd64b3b
--- /dev/null
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (C) 2006-2007 Chelsio Communications. All rights reserved.
3 * Copyright (C) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _T3CDEV_H_
34#define _T3CDEV_H_
35
36#include <linux/list.h>
37#include <asm/atomic.h>
38#include <asm/semaphore.h>
39#include <linux/netdevice.h>
40#include <linux/proc_fs.h>
41#include <linux/skbuff.h>
42#include <net/neighbour.h>
43
44#define T3CNAMSIZ 16
45
46/* Get the t3cdev associated with a net_device */
47#define T3CDEV(netdev) (struct t3cdev *)(netdev->priv)
48
49struct cxgb3_client;
50
51enum t3ctype {
52 T3A = 0,
53 T3B
54};
55
56struct t3cdev {
57 char name[T3CNAMSIZ]; /* T3C device name */
58 enum t3ctype type;
59 struct list_head ofld_dev_list; /* for list linking */
60 struct net_device *lldev; /* LL dev associated with T3C messages */
61 struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
62 int (*send)(struct t3cdev *dev, struct sk_buff *skb);
63 int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
64 int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
65 void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
66 void *priv; /* driver private data */
67 void *l2opt; /* optional layer 2 data */
68 void *l3opt; /* optional layer 3 data */
69 void *l4opt; /* optional layer 4 data */
70 void *ulp; /* ulp stuff */
71};
72
73#endif /* _T3CDEV_H_ */
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
new file mode 100644
index 000000000000..2b67dd523cc1
--- /dev/null
+++ b/drivers/net/cxgb3/version.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
33#ifndef __CHELSIO_VERSION_H
34#define __CHELSIO_VERSION_H
35#define DRV_DESC "Chelsio T3 Network Driver"
36#define DRV_NAME "cxgb3"
37/* Driver version */
38#define DRV_VERSION "1.0"
39#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/cxgb3/vsc8211.c
new file mode 100644
index 000000000000..eee4285b31be
--- /dev/null
+++ b/drivers/net/cxgb3/vsc8211.c
@@ -0,0 +1,228 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33
34/* VSC8211 PHY specific registers. */
35enum {
36 VSC8211_INTR_ENABLE = 25,
37 VSC8211_INTR_STATUS = 26,
38 VSC8211_AUX_CTRL_STAT = 28,
39};
40
41enum {
42 VSC_INTR_RX_ERR = 1 << 0,
43 VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
44 VSC_INTR_CABLE = 1 << 2, /* cable impairment */
45 VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
46 VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
47 VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
48 VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
49 VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
50 VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
51 VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
52 VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
53 VSC_INTR_LINK_CHG = 1 << 13, /* link change */
54 VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
55};
56
57#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
58 VSC_INTR_NEG_DONE)
59#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
60 VSC_INTR_ENABLE)
61
62/* PHY specific auxiliary control & status register fields */
63#define S_ACSR_ACTIPHY_TMR 0
64#define M_ACSR_ACTIPHY_TMR 0x3
65#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
66
67#define S_ACSR_SPEED 3
68#define M_ACSR_SPEED 0x3
69#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
70
71#define S_ACSR_DUPLEX 5
72#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
73
74#define S_ACSR_ACTIPHY 6
75#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
76
77/*
78 * Reset the PHY. This PHY completes reset immediately so we never wait.
79 */
80static int vsc8211_reset(struct cphy *cphy, int wait)
81{
82 return t3_phy_reset(cphy, 0, 0);
83}
84
85static int vsc8211_intr_enable(struct cphy *cphy)
86{
87 return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
88}
89
90static int vsc8211_intr_disable(struct cphy *cphy)
91{
92 return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
93}
94
95static int vsc8211_intr_clear(struct cphy *cphy)
96{
97 u32 val;
98
99 /* Clear PHY interrupts by reading the register. */
100 return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
101}
102
103static int vsc8211_autoneg_enable(struct cphy *cphy)
104{
105 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
106 BMCR_ANENABLE | BMCR_ANRESTART);
107}
108
109static int vsc8211_autoneg_restart(struct cphy *cphy)
110{
111 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
112 BMCR_ANRESTART);
113}
114
115static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
116 int *speed, int *duplex, int *fc)
117{
118 unsigned int bmcr, status, lpa, adv;
119 int err, sp = -1, dplx = -1, pause = 0;
120
121 err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
122 if (!err)
123 err = mdio_read(cphy, 0, MII_BMSR, &status);
124 if (err)
125 return err;
126
127 if (link_ok) {
128 /*
129 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
130 * once more to get the current link state.
131 */
132 if (!(status & BMSR_LSTATUS))
133 err = mdio_read(cphy, 0, MII_BMSR, &status);
134 if (err)
135 return err;
136 *link_ok = (status & BMSR_LSTATUS) != 0;
137 }
138 if (!(bmcr & BMCR_ANENABLE)) {
139 dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
140 if (bmcr & BMCR_SPEED1000)
141 sp = SPEED_1000;
142 else if (bmcr & BMCR_SPEED100)
143 sp = SPEED_100;
144 else
145 sp = SPEED_10;
146 } else if (status & BMSR_ANEGCOMPLETE) {
147 err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
148 if (err)
149 return err;
150
151 dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
152 sp = G_ACSR_SPEED(status);
153 if (sp == 0)
154 sp = SPEED_10;
155 else if (sp == 1)
156 sp = SPEED_100;
157 else
158 sp = SPEED_1000;
159
160 if (fc && dplx == DUPLEX_FULL) {
161 err = mdio_read(cphy, 0, MII_LPA, &lpa);
162 if (!err)
163 err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
164 if (err)
165 return err;
166
167 if (lpa & adv & ADVERTISE_PAUSE_CAP)
168 pause = PAUSE_RX | PAUSE_TX;
169 else if ((lpa & ADVERTISE_PAUSE_CAP) &&
170 (lpa & ADVERTISE_PAUSE_ASYM) &&
171 (adv & ADVERTISE_PAUSE_ASYM))
172 pause = PAUSE_TX;
173 else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
174 (adv & ADVERTISE_PAUSE_CAP))
175 pause = PAUSE_RX;
176 }
177 }
178 if (speed)
179 *speed = sp;
180 if (duplex)
181 *duplex = dplx;
182 if (fc)
183 *fc = pause;
184 return 0;
185}
186
187static int vsc8211_power_down(struct cphy *cphy, int enable)
188{
189 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
190 enable ? BMCR_PDOWN : 0);
191}
192
193static int vsc8211_intr_handler(struct cphy *cphy)
194{
195 unsigned int cause;
196 int err, cphy_cause = 0;
197
198 err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
199 if (err)
200 return err;
201
202 cause &= INTR_MASK;
203 if (cause & CFG_CHG_INTR_MASK)
204 cphy_cause |= cphy_cause_link_change;
205 if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
206 cphy_cause |= cphy_cause_fifo_error;
207 return cphy_cause;
208}
209
210static struct cphy_ops vsc8211_ops = {
211 .reset = vsc8211_reset,
212 .intr_enable = vsc8211_intr_enable,
213 .intr_disable = vsc8211_intr_disable,
214 .intr_clear = vsc8211_intr_clear,
215 .intr_handler = vsc8211_intr_handler,
216 .autoneg_enable = vsc8211_autoneg_enable,
217 .autoneg_restart = vsc8211_autoneg_restart,
218 .advertise = t3_phy_advertise,
219 .set_speed_duplex = t3_set_phy_speed_duplex,
220 .get_link_status = vsc8211_get_link_status,
221 .power_down = vsc8211_power_down,
222};
223
224void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
225 int phy_addr, const struct mdio_ops *mdio_ops)
226{
227 cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
228}
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
new file mode 100644
index 000000000000..907a272ae32d
--- /dev/null
+++ b/drivers/net/cxgb3/xgmac.c
@@ -0,0 +1,409 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35/*
36 * # of exact address filters. The first one is used for the station address,
37 * the rest are available for multicast addresses.
38 */
39#define EXACT_ADDR_FILTERS 8
40
41static inline int macidx(const struct cmac *mac)
42{
43 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
44}
45
46static void xaui_serdes_reset(struct cmac *mac)
47{
48 static const unsigned int clear[] = {
49 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
50 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
51 };
52
53 int i;
54 struct adapter *adap = mac->adapter;
55 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
56
57 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
58 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
59 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
60 F_RESETPLL23 | F_RESETPLL01);
61 t3_read_reg(adap, ctrl);
62 udelay(15);
63
64 for (i = 0; i < ARRAY_SIZE(clear); i++) {
65 t3_set_reg_field(adap, ctrl, clear[i], 0);
66 udelay(15);
67 }
68}
69
70void t3b_pcs_reset(struct cmac *mac)
71{
72 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
73 F_PCS_RESET_, 0);
74 udelay(20);
75 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
76 F_PCS_RESET_);
77}
78
79int t3_mac_reset(struct cmac *mac)
80{
81 static const struct addr_val_pair mac_reset_avp[] = {
82 {A_XGM_TX_CTRL, 0},
83 {A_XGM_RX_CTRL, 0},
84 {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
85 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
86 {A_XGM_RX_HASH_LOW, 0},
87 {A_XGM_RX_HASH_HIGH, 0},
88 {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
89 {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
90 {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
91 {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
92 {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
93 {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
94 {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
95 {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
96 {A_XGM_STAT_CTRL, F_CLRSTATS}
97 };
98 u32 val;
99 struct adapter *adap = mac->adapter;
100 unsigned int oft = mac->offset;
101
102 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
103 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
104
105 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
106 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
107 F_RXSTRFRWRD | F_DISERRFRAMES,
108 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
109
110 if (uses_xaui(adap)) {
111 if (adap->params.rev == 0) {
112 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
113 F_RXENABLE | F_TXENABLE);
114 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
115 F_CMULOCK, 1, 5, 2)) {
116 CH_ERR(adap,
117 "MAC %d XAUI SERDES CMU lock failed\n",
118 macidx(mac));
119 return -1;
120 }
121 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
122 F_SERDESRESET_);
123 } else
124 xaui_serdes_reset(mac);
125 }
126
127 if (adap->params.rev > 0)
128 t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
129
130 val = F_MAC_RESET_;
131 if (is_10G(adap))
132 val |= F_PCS_RESET_;
133 else if (uses_xaui(adap))
134 val |= F_PCS_RESET_ | F_XG2G_RESET_;
135 else
136 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
137 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
138 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
139 if ((val & F_PCS_RESET_) && adap->params.rev) {
140 msleep(1);
141 t3b_pcs_reset(mac);
142 }
143
144 memset(&mac->stats, 0, sizeof(mac->stats));
145 return 0;
146}
147
148/*
149 * Set the exact match register 'idx' to recognize the given Ethernet address.
150 */
151static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
152{
153 u32 addr_lo, addr_hi;
154 unsigned int oft = mac->offset + idx * 8;
155
156 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
157 addr_hi = (addr[5] << 8) | addr[4];
158
159 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
160 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
161}
162
163/* Set one of the station's unicast MAC addresses. */
164int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
165{
166 if (idx >= mac->nucast)
167 return -EINVAL;
168 set_addr_filter(mac, idx, addr);
169 return 0;
170}
171
172/*
173 * Specify the number of exact address filters that should be reserved for
174 * unicast addresses. Caller should reload the unicast and multicast addresses
175 * after calling this.
176 */
177int t3_mac_set_num_ucast(struct cmac *mac, int n)
178{
179 if (n > EXACT_ADDR_FILTERS)
180 return -EINVAL;
181 mac->nucast = n;
182 return 0;
183}
184
185/* Calculate the RX hash filter index of an Ethernet address */
186static int hash_hw_addr(const u8 * addr)
187{
188 int hash = 0, octet, bit, i = 0, c;
189
190 for (octet = 0; octet < 6; ++octet)
191 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
192 hash ^= (c & 1) << i;
193 if (++i == 6)
194 i = 0;
195 }
196 return hash;
197}
198
199int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
200{
201 u32 val, hash_lo, hash_hi;
202 struct adapter *adap = mac->adapter;
203 unsigned int oft = mac->offset;
204
205 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
206 if (rm->dev->flags & IFF_PROMISC)
207 val |= F_COPYALLFRAMES;
208 t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
209
210 if (rm->dev->flags & IFF_ALLMULTI)
211 hash_lo = hash_hi = 0xffffffff;
212 else {
213 u8 *addr;
214 int exact_addr_idx = mac->nucast;
215
216 hash_lo = hash_hi = 0;
217 while ((addr = t3_get_next_mcaddr(rm)))
218 if (exact_addr_idx < EXACT_ADDR_FILTERS)
219 set_addr_filter(mac, exact_addr_idx++, addr);
220 else {
221 int hash = hash_hw_addr(addr);
222
223 if (hash < 32)
224 hash_lo |= (1 << hash);
225 else
226 hash_hi |= (1 << (hash - 32));
227 }
228 }
229
230 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
231 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
232 return 0;
233}
234
235int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
236{
237 int hwm, lwm;
238 unsigned int thres, v;
239 struct adapter *adap = mac->adapter;
240
241 /*
242 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
243 * packet size register includes header, but not FCS.
244 */
245 mtu += 14;
246 if (mtu > MAX_FRAME_SIZE - 4)
247 return -EINVAL;
248 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
249
250 /*
251 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
252 * HWM only if flow-control is enabled.
253 */
254 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
255 hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
256 lwm = hwm - 1024;
257 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
258 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
259 v |= V_RXFIFOPAUSELWM(lwm / 8);
260 if (G_RXFIFOPAUSEHWM(v))
261 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
262 V_RXFIFOPAUSEHWM(hwm / 8);
263 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
264
265 /* Adjust the TX FIFO threshold based on the MTU */
266 thres = (adap->params.vpd.cclk * 1000) / 15625;
267 thres = (thres * mtu) / 1000;
268 if (is_10G(adap))
269 thres /= 10;
270 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
271 thres = max(thres, 8U); /* need at least 8 */
272 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
273 V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres));
274 return 0;
275}
276
277int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
278{
279 u32 val;
280 struct adapter *adap = mac->adapter;
281 unsigned int oft = mac->offset;
282
283 if (duplex >= 0 && duplex != DUPLEX_FULL)
284 return -EINVAL;
285 if (speed >= 0) {
286 if (speed == SPEED_10)
287 val = V_PORTSPEED(0);
288 else if (speed == SPEED_100)
289 val = V_PORTSPEED(1);
290 else if (speed == SPEED_1000)
291 val = V_PORTSPEED(2);
292 else if (speed == SPEED_10000)
293 val = V_PORTSPEED(3);
294 else
295 return -EINVAL;
296
297 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
298 V_PORTSPEED(M_PORTSPEED), val);
299 }
300
301 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
302 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
303 if (fc & PAUSE_TX)
304 val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
305 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
306
307 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
308 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
309 return 0;
310}
311
312int t3_mac_enable(struct cmac *mac, int which)
313{
314 int idx = macidx(mac);
315 struct adapter *adap = mac->adapter;
316 unsigned int oft = mac->offset;
317
318 if (which & MAC_DIRECTION_TX) {
319 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
320 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
321 t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
322 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
323 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
324 }
325 if (which & MAC_DIRECTION_RX)
326 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
327 return 0;
328}
329
330int t3_mac_disable(struct cmac *mac, int which)
331{
332 int idx = macidx(mac);
333 struct adapter *adap = mac->adapter;
334
335 if (which & MAC_DIRECTION_TX) {
336 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
337 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
338 t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
339 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
340 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
341 }
342 if (which & MAC_DIRECTION_RX)
343 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
344 return 0;
345}
346
347/*
348 * This function is called periodically to accumulate the current values of the
349 * RMON counters into the port statistics. Since the packet counters are only
350 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
351 * called more frequently than that. The byte counters are 45-bit wide, they
352 * would overflow in ~7.8 hours.
353 */
354const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
355{
356#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
357#define RMON_UPDATE(mac, name, reg) \
358 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
359#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
360 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
361 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
362
363 u32 v, lo;
364
365 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
366 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
367 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
368 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
369 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
370 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
371 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
372 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
373 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
374
375 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
376 mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
377
378 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
379 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
380 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
381 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
382 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
383 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
384 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
385
386 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
387 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
388 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
389 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
390 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
391 /* This counts error frames in general (bad FCS, underrun, etc). */
392 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
393
394 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
395 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
396 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
397 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
398 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
399 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
400 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
401
402 /* The next stat isn't clear-on-read. */
403 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
404 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
405 lo = (u32) mac->stats.rx_cong_drops;
406 mac->stats.rx_cong_drops += (u64) (v - lo);
407
408 return &mac->stats;
409}
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 4ae0fed7122e..9f7e1db8ce62 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * adopted from sunlance.c by Richard van den Berg 6 * adopted from sunlance.c by Richard van den Berg
7 * 7 *
8 * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki 8 * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
9 * 9 *
10 * additional sources: 10 * additional sources:
11 * - PMAD-AA TURBOchannel Ethernet Module Functional Specification, 11 * - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
@@ -44,6 +44,8 @@
44 * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the 44 * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
45 * PMAX requirement to only use halfword accesses to the 45 * PMAX requirement to only use halfword accesses to the
46 * buffer. macro 46 * buffer. macro
47 *
48 * v0.011: Converted the PMAD to the driver model. macro
47 */ 49 */
48 50
49#include <linux/crc32.h> 51#include <linux/crc32.h>
@@ -58,6 +60,7 @@
58#include <linux/spinlock.h> 60#include <linux/spinlock.h>
59#include <linux/stddef.h> 61#include <linux/stddef.h>
60#include <linux/string.h> 62#include <linux/string.h>
63#include <linux/tc.h>
61#include <linux/types.h> 64#include <linux/types.h>
62 65
63#include <asm/addrspace.h> 66#include <asm/addrspace.h>
@@ -69,15 +72,16 @@
69#include <asm/dec/kn01.h> 72#include <asm/dec/kn01.h>
70#include <asm/dec/machtype.h> 73#include <asm/dec/machtype.h>
71#include <asm/dec/system.h> 74#include <asm/dec/system.h>
72#include <asm/dec/tc.h>
73 75
74static char version[] __devinitdata = 76static char version[] __devinitdata =
75"declance.c: v0.010 by Linux MIPS DECstation task force\n"; 77"declance.c: v0.011 by Linux MIPS DECstation task force\n";
76 78
77MODULE_AUTHOR("Linux MIPS DECstation task force"); 79MODULE_AUTHOR("Linux MIPS DECstation task force");
78MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver"); 80MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
79MODULE_LICENSE("GPL"); 81MODULE_LICENSE("GPL");
80 82
83#define __unused __attribute__ ((unused))
84
81/* 85/*
82 * card types 86 * card types
83 */ 87 */
@@ -246,7 +250,6 @@ struct lance_init_block {
246struct lance_private { 250struct lance_private {
247 struct net_device *next; 251 struct net_device *next;
248 int type; 252 int type;
249 int slot;
250 int dma_irq; 253 int dma_irq;
251 volatile struct lance_regs *ll; 254 volatile struct lance_regs *ll;
252 255
@@ -288,6 +291,7 @@ struct lance_regs {
288 291
289int dec_lance_debug = 2; 292int dec_lance_debug = 2;
290 293
294static struct tc_driver dec_lance_tc_driver;
291static struct net_device *root_lance_dev; 295static struct net_device *root_lance_dev;
292 296
293static inline void writereg(volatile unsigned short *regptr, short value) 297static inline void writereg(volatile unsigned short *regptr, short value)
@@ -1023,7 +1027,7 @@ static void lance_set_multicast_retry(unsigned long _opaque)
1023 lance_set_multicast(dev); 1027 lance_set_multicast(dev);
1024} 1028}
1025 1029
1026static int __init dec_lance_init(const int type, const int slot) 1030static int __init dec_lance_probe(struct device *bdev, const int type)
1027{ 1031{
1028 static unsigned version_printed; 1032 static unsigned version_printed;
1029 static const char fmt[] = "declance%d"; 1033 static const char fmt[] = "declance%d";
@@ -1031,6 +1035,7 @@ static int __init dec_lance_init(const int type, const int slot)
1031 struct net_device *dev; 1035 struct net_device *dev;
1032 struct lance_private *lp; 1036 struct lance_private *lp;
1033 volatile struct lance_regs *ll; 1037 volatile struct lance_regs *ll;
1038 resource_size_t start = 0, len = 0;
1034 int i, ret; 1039 int i, ret;
1035 unsigned long esar_base; 1040 unsigned long esar_base;
1036 unsigned char *esar; 1041 unsigned char *esar;
@@ -1038,14 +1043,18 @@ static int __init dec_lance_init(const int type, const int slot)
1038 if (dec_lance_debug && version_printed++ == 0) 1043 if (dec_lance_debug && version_printed++ == 0)
1039 printk(version); 1044 printk(version);
1040 1045
1041 i = 0; 1046 if (bdev)
1042 dev = root_lance_dev; 1047 snprintf(name, sizeof(name), "%s", bdev->bus_id);
1043 while (dev) { 1048 else {
1044 i++; 1049 i = 0;
1045 lp = (struct lance_private *)dev->priv; 1050 dev = root_lance_dev;
1046 dev = lp->next; 1051 while (dev) {
1052 i++;
1053 lp = (struct lance_private *)dev->priv;
1054 dev = lp->next;
1055 }
1056 snprintf(name, sizeof(name), fmt, i);
1047 } 1057 }
1048 snprintf(name, sizeof(name), fmt, i);
1049 1058
1050 dev = alloc_etherdev(sizeof(struct lance_private)); 1059 dev = alloc_etherdev(sizeof(struct lance_private));
1051 if (!dev) { 1060 if (!dev) {
@@ -1063,7 +1072,6 @@ static int __init dec_lance_init(const int type, const int slot)
1063 spin_lock_init(&lp->lock); 1072 spin_lock_init(&lp->lock);
1064 1073
1065 lp->type = type; 1074 lp->type = type;
1066 lp->slot = slot;
1067 switch (type) { 1075 switch (type) {
1068 case ASIC_LANCE: 1076 case ASIC_LANCE:
1069 dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE); 1077 dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
@@ -1110,12 +1118,22 @@ static int __init dec_lance_init(const int type, const int slot)
1110 break; 1118 break;
1111#ifdef CONFIG_TC 1119#ifdef CONFIG_TC
1112 case PMAD_LANCE: 1120 case PMAD_LANCE:
1113 claim_tc_card(slot); 1121 dev_set_drvdata(bdev, dev);
1122
1123 start = to_tc_dev(bdev)->resource.start;
1124 len = to_tc_dev(bdev)->resource.end - start + 1;
1125 if (!request_mem_region(start, len, bdev->bus_id)) {
1126 printk(KERN_ERR
1127 "%s: Unable to reserve MMIO resource\n",
1128 bdev->bus_id);
1129 ret = -EBUSY;
1130 goto err_out_dev;
1131 }
1114 1132
1115 dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot)); 1133 dev->mem_start = CKSEG1ADDR(start);
1116 dev->mem_end = dev->mem_start + 0x100000; 1134 dev->mem_end = dev->mem_start + 0x100000;
1117 dev->base_addr = dev->mem_start + 0x100000; 1135 dev->base_addr = dev->mem_start + 0x100000;
1118 dev->irq = get_tc_irq_nr(slot); 1136 dev->irq = to_tc_dev(bdev)->interrupt;
1119 esar_base = dev->mem_start + 0x1c0002; 1137 esar_base = dev->mem_start + 0x1c0002;
1120 lp->dma_irq = -1; 1138 lp->dma_irq = -1;
1121 1139
@@ -1174,7 +1192,7 @@ static int __init dec_lance_init(const int type, const int slot)
1174 printk(KERN_ERR "%s: declance_init called with unknown type\n", 1192 printk(KERN_ERR "%s: declance_init called with unknown type\n",
1175 name); 1193 name);
1176 ret = -ENODEV; 1194 ret = -ENODEV;
1177 goto err_out_free_dev; 1195 goto err_out_dev;
1178 } 1196 }
1179 1197
1180 ll = (struct lance_regs *) dev->base_addr; 1198 ll = (struct lance_regs *) dev->base_addr;
@@ -1188,7 +1206,7 @@ static int __init dec_lance_init(const int type, const int slot)
1188 "%s: Ethernet station address prom not found!\n", 1206 "%s: Ethernet station address prom not found!\n",
1189 name); 1207 name);
1190 ret = -ENODEV; 1208 ret = -ENODEV;
1191 goto err_out_free_dev; 1209 goto err_out_resource;
1192 } 1210 }
1193 /* Check the prom contents */ 1211 /* Check the prom contents */
1194 for (i = 0; i < 8; i++) { 1212 for (i = 0; i < 8; i++) {
@@ -1198,7 +1216,7 @@ static int __init dec_lance_init(const int type, const int slot)
1198 printk(KERN_ERR "%s: Something is wrong with the " 1216 printk(KERN_ERR "%s: Something is wrong with the "
1199 "ethernet station address prom!\n", name); 1217 "ethernet station address prom!\n", name);
1200 ret = -ENODEV; 1218 ret = -ENODEV;
1201 goto err_out_free_dev; 1219 goto err_out_resource;
1202 } 1220 }
1203 } 1221 }
1204 1222
@@ -1255,48 +1273,51 @@ static int __init dec_lance_init(const int type, const int slot)
1255 if (ret) { 1273 if (ret) {
1256 printk(KERN_ERR 1274 printk(KERN_ERR
1257 "%s: Unable to register netdev, aborting.\n", name); 1275 "%s: Unable to register netdev, aborting.\n", name);
1258 goto err_out_free_dev; 1276 goto err_out_resource;
1259 } 1277 }
1260 1278
1261 lp->next = root_lance_dev; 1279 if (!bdev) {
1262 root_lance_dev = dev; 1280 lp->next = root_lance_dev;
1281 root_lance_dev = dev;
1282 }
1263 1283
1264 printk("%s: registered as %s.\n", name, dev->name); 1284 printk("%s: registered as %s.\n", name, dev->name);
1265 return 0; 1285 return 0;
1266 1286
1267err_out_free_dev: 1287err_out_resource:
1288 if (bdev)
1289 release_mem_region(start, len);
1290
1291err_out_dev:
1268 free_netdev(dev); 1292 free_netdev(dev);
1269 1293
1270err_out: 1294err_out:
1271 return ret; 1295 return ret;
1272} 1296}
1273 1297
1298static void __exit dec_lance_remove(struct device *bdev)
1299{
1300 struct net_device *dev = dev_get_drvdata(bdev);
1301 resource_size_t start, len;
1302
1303 unregister_netdev(dev);
1304 start = to_tc_dev(bdev)->resource.start;
1305 len = to_tc_dev(bdev)->resource.end - start + 1;
1306 release_mem_region(start, len);
1307 free_netdev(dev);
1308}
1274 1309
1275/* Find all the lance cards on the system and initialize them */ 1310/* Find all the lance cards on the system and initialize them */
1276static int __init dec_lance_probe(void) 1311static int __init dec_lance_platform_probe(void)
1277{ 1312{
1278 int count = 0; 1313 int count = 0;
1279 1314
1280 /* Scan slots for PMAD-AA cards first. */
1281#ifdef CONFIG_TC
1282 if (TURBOCHANNEL) {
1283 int slot;
1284
1285 while ((slot = search_tc_card("PMAD-AA")) >= 0) {
1286 if (dec_lance_init(PMAD_LANCE, slot) < 0)
1287 break;
1288 count++;
1289 }
1290 }
1291#endif
1292
1293 /* Then handle onboard devices. */
1294 if (dec_interrupt[DEC_IRQ_LANCE] >= 0) { 1315 if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
1295 if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) { 1316 if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
1296 if (dec_lance_init(ASIC_LANCE, -1) >= 0) 1317 if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
1297 count++; 1318 count++;
1298 } else if (!TURBOCHANNEL) { 1319 } else if (!TURBOCHANNEL) {
1299 if (dec_lance_init(PMAX_LANCE, -1) >= 0) 1320 if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
1300 count++; 1321 count++;
1301 } 1322 }
1302 } 1323 }
@@ -1304,21 +1325,70 @@ static int __init dec_lance_probe(void)
1304 return (count > 0) ? 0 : -ENODEV; 1325 return (count > 0) ? 0 : -ENODEV;
1305} 1326}
1306 1327
1307static void __exit dec_lance_cleanup(void) 1328static void __exit dec_lance_platform_remove(void)
1308{ 1329{
1309 while (root_lance_dev) { 1330 while (root_lance_dev) {
1310 struct net_device *dev = root_lance_dev; 1331 struct net_device *dev = root_lance_dev;
1311 struct lance_private *lp = netdev_priv(dev); 1332 struct lance_private *lp = netdev_priv(dev);
1312 1333
1313 unregister_netdev(dev); 1334 unregister_netdev(dev);
1314#ifdef CONFIG_TC
1315 if (lp->slot >= 0)
1316 release_tc_card(lp->slot);
1317#endif
1318 root_lance_dev = lp->next; 1335 root_lance_dev = lp->next;
1319 free_netdev(dev); 1336 free_netdev(dev);
1320 } 1337 }
1321} 1338}
1322 1339
1323module_init(dec_lance_probe); 1340#ifdef CONFIG_TC
1324module_exit(dec_lance_cleanup); 1341static int __init dec_lance_tc_probe(struct device *dev);
1342static int __exit dec_lance_tc_remove(struct device *dev);
1343
1344static const struct tc_device_id dec_lance_tc_table[] = {
1345 { "DEC ", "PMAD-AA " },
1346 { }
1347};
1348MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
1349
1350static struct tc_driver dec_lance_tc_driver = {
1351 .id_table = dec_lance_tc_table,
1352 .driver = {
1353 .name = "declance",
1354 .bus = &tc_bus_type,
1355 .probe = dec_lance_tc_probe,
1356 .remove = __exit_p(dec_lance_tc_remove),
1357 },
1358};
1359
1360static int __init dec_lance_tc_probe(struct device *dev)
1361{
1362 int status = dec_lance_probe(dev, PMAD_LANCE);
1363 if (!status)
1364 get_device(dev);
1365 return status;
1366}
1367
1368static int __exit dec_lance_tc_remove(struct device *dev)
1369{
1370 put_device(dev);
1371 dec_lance_remove(dev);
1372 return 0;
1373}
1374#endif
1375
1376static int __init dec_lance_init(void)
1377{
1378 int status;
1379
1380 status = tc_register_driver(&dec_lance_tc_driver);
1381 if (!status)
1382 dec_lance_platform_probe();
1383 return status;
1384}
1385
1386static void __exit dec_lance_exit(void)
1387{
1388 dec_lance_platform_remove();
1389 tc_unregister_driver(&dec_lance_tc_driver);
1390}
1391
1392
1393module_init(dec_lance_init);
1394module_exit(dec_lance_exit);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index f091042b146e..689f158a469e 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -59,17 +59,13 @@
59#include <linux/capability.h> 59#include <linux/capability.h>
60#include <linux/in.h> 60#include <linux/in.h>
61#include <linux/ip.h> 61#include <linux/ip.h>
62#ifdef NETIF_F_TSO6
63#include <linux/ipv6.h> 62#include <linux/ipv6.h>
64#endif
65#include <linux/tcp.h> 63#include <linux/tcp.h>
66#include <linux/udp.h> 64#include <linux/udp.h>
67#include <net/pkt_sched.h> 65#include <net/pkt_sched.h>
68#include <linux/list.h> 66#include <linux/list.h>
69#include <linux/reboot.h> 67#include <linux/reboot.h>
70#ifdef NETIF_F_TSO
71#include <net/checksum.h> 68#include <net/checksum.h>
72#endif
73#include <linux/mii.h> 69#include <linux/mii.h>
74#include <linux/ethtool.h> 70#include <linux/ethtool.h>
75#include <linux/if_vlan.h> 71#include <linux/if_vlan.h>
@@ -257,7 +253,6 @@ struct e1000_adapter {
257 spinlock_t tx_queue_lock; 253 spinlock_t tx_queue_lock;
258#endif 254#endif
259 atomic_t irq_sem; 255 atomic_t irq_sem;
260 unsigned int detect_link;
261 unsigned int total_tx_bytes; 256 unsigned int total_tx_bytes;
262 unsigned int total_tx_packets; 257 unsigned int total_tx_packets;
263 unsigned int total_rx_bytes; 258 unsigned int total_rx_bytes;
@@ -348,9 +343,7 @@ struct e1000_adapter {
348 boolean_t have_msi; 343 boolean_t have_msi;
349#endif 344#endif
350 /* to not mess up cache alignment, always add to the bottom */ 345 /* to not mess up cache alignment, always add to the bottom */
351#ifdef NETIF_F_TSO
352 boolean_t tso_force; 346 boolean_t tso_force;
353#endif
354 boolean_t smart_power_down; /* phy smart power down */ 347 boolean_t smart_power_down; /* phy smart power down */
355 boolean_t quad_port_a; 348 boolean_t quad_port_a;
356 unsigned long flags; 349 unsigned long flags;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index fb96c87f9e56..44ebc72962dc 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -338,7 +338,6 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
338 return 0; 338 return 0;
339} 339}
340 340
341#ifdef NETIF_F_TSO
342static int 341static int
343e1000_set_tso(struct net_device *netdev, uint32_t data) 342e1000_set_tso(struct net_device *netdev, uint32_t data)
344{ 343{
@@ -352,18 +351,15 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
352 else 351 else
353 netdev->features &= ~NETIF_F_TSO; 352 netdev->features &= ~NETIF_F_TSO;
354 353
355#ifdef NETIF_F_TSO6
356 if (data) 354 if (data)
357 netdev->features |= NETIF_F_TSO6; 355 netdev->features |= NETIF_F_TSO6;
358 else 356 else
359 netdev->features &= ~NETIF_F_TSO6; 357 netdev->features &= ~NETIF_F_TSO6;
360#endif
361 358
362 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); 359 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
363 adapter->tso_force = TRUE; 360 adapter->tso_force = TRUE;
364 return 0; 361 return 0;
365} 362}
366#endif /* NETIF_F_TSO */
367 363
368static uint32_t 364static uint32_t
369e1000_get_msglevel(struct net_device *netdev) 365e1000_get_msglevel(struct net_device *netdev)
@@ -1971,10 +1967,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1971 .set_tx_csum = e1000_set_tx_csum, 1967 .set_tx_csum = e1000_set_tx_csum,
1972 .get_sg = ethtool_op_get_sg, 1968 .get_sg = ethtool_op_get_sg,
1973 .set_sg = ethtool_op_set_sg, 1969 .set_sg = ethtool_op_set_sg,
1974#ifdef NETIF_F_TSO
1975 .get_tso = ethtool_op_get_tso, 1970 .get_tso = ethtool_op_get_tso,
1976 .set_tso = e1000_set_tso, 1971 .set_tso = e1000_set_tso,
1977#endif
1978 .self_test_count = e1000_diag_test_count, 1972 .self_test_count = e1000_diag_test_count,
1979 .self_test = e1000_diag_test, 1973 .self_test = e1000_diag_test,
1980 .get_strings = e1000_get_strings, 1974 .get_strings = e1000_get_strings,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index c6259c7127f6..222fcd2d10de 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "7.3.15-k2"DRIVERNAPI 39#define DRV_VERSION "7.3.20-k2"DRIVERNAPI
40char e1000_driver_version[] = DRV_VERSION; 40char e1000_driver_version[] = DRV_VERSION;
41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -990,16 +990,12 @@ e1000_probe(struct pci_dev *pdev,
990 netdev->features &= ~NETIF_F_HW_VLAN_FILTER; 990 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
991 } 991 }
992 992
993#ifdef NETIF_F_TSO
994 if ((adapter->hw.mac_type >= e1000_82544) && 993 if ((adapter->hw.mac_type >= e1000_82544) &&
995 (adapter->hw.mac_type != e1000_82547)) 994 (adapter->hw.mac_type != e1000_82547))
996 netdev->features |= NETIF_F_TSO; 995 netdev->features |= NETIF_F_TSO;
997 996
998#ifdef NETIF_F_TSO6
999 if (adapter->hw.mac_type > e1000_82547_rev_2) 997 if (adapter->hw.mac_type > e1000_82547_rev_2)
1000 netdev->features |= NETIF_F_TSO6; 998 netdev->features |= NETIF_F_TSO6;
1001#endif
1002#endif
1003 if (pci_using_dac) 999 if (pci_using_dac)
1004 netdev->features |= NETIF_F_HIGHDMA; 1000 netdev->features |= NETIF_F_HIGHDMA;
1005 1001
@@ -2583,15 +2579,22 @@ e1000_watchdog(unsigned long data)
2583 2579
2584 if (link) { 2580 if (link) {
2585 if (!netif_carrier_ok(netdev)) { 2581 if (!netif_carrier_ok(netdev)) {
2582 uint32_t ctrl;
2586 boolean_t txb2b = 1; 2583 boolean_t txb2b = 1;
2587 e1000_get_speed_and_duplex(&adapter->hw, 2584 e1000_get_speed_and_duplex(&adapter->hw,
2588 &adapter->link_speed, 2585 &adapter->link_speed,
2589 &adapter->link_duplex); 2586 &adapter->link_duplex);
2590 2587
2591 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n", 2588 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2592 adapter->link_speed, 2589 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
2593 adapter->link_duplex == FULL_DUPLEX ? 2590 "Flow Control: %s\n",
2594 "Full Duplex" : "Half Duplex"); 2591 adapter->link_speed,
2592 adapter->link_duplex == FULL_DUPLEX ?
2593 "Full Duplex" : "Half Duplex",
2594 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2595 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2596 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2597 E1000_CTRL_TFCE) ? "TX" : "None" )));
2595 2598
2596 /* tweak tx_queue_len according to speed/duplex 2599 /* tweak tx_queue_len according to speed/duplex
2597 * and adjust the timeout factor */ 2600 * and adjust the timeout factor */
@@ -2619,7 +2622,6 @@ e1000_watchdog(unsigned long data)
2619 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); 2622 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
2620 } 2623 }
2621 2624
2622#ifdef NETIF_F_TSO
2623 /* disable TSO for pcie and 10/100 speeds, to avoid 2625 /* disable TSO for pcie and 10/100 speeds, to avoid
2624 * some hardware issues */ 2626 * some hardware issues */
2625 if (!adapter->tso_force && 2627 if (!adapter->tso_force &&
@@ -2630,22 +2632,17 @@ e1000_watchdog(unsigned long data)
2630 DPRINTK(PROBE,INFO, 2632 DPRINTK(PROBE,INFO,
2631 "10/100 speed: disabling TSO\n"); 2633 "10/100 speed: disabling TSO\n");
2632 netdev->features &= ~NETIF_F_TSO; 2634 netdev->features &= ~NETIF_F_TSO;
2633#ifdef NETIF_F_TSO6
2634 netdev->features &= ~NETIF_F_TSO6; 2635 netdev->features &= ~NETIF_F_TSO6;
2635#endif
2636 break; 2636 break;
2637 case SPEED_1000: 2637 case SPEED_1000:
2638 netdev->features |= NETIF_F_TSO; 2638 netdev->features |= NETIF_F_TSO;
2639#ifdef NETIF_F_TSO6
2640 netdev->features |= NETIF_F_TSO6; 2639 netdev->features |= NETIF_F_TSO6;
2641#endif
2642 break; 2640 break;
2643 default: 2641 default:
2644 /* oops */ 2642 /* oops */
2645 break; 2643 break;
2646 } 2644 }
2647 } 2645 }
2648#endif
2649 2646
2650 /* enable transmits in the hardware, need to do this 2647 /* enable transmits in the hardware, need to do this
2651 * after setting TARC0 */ 2648 * after setting TARC0 */
@@ -2875,7 +2872,6 @@ static int
2875e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 2872e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2876 struct sk_buff *skb) 2873 struct sk_buff *skb)
2877{ 2874{
2878#ifdef NETIF_F_TSO
2879 struct e1000_context_desc *context_desc; 2875 struct e1000_context_desc *context_desc;
2880 struct e1000_buffer *buffer_info; 2876 struct e1000_buffer *buffer_info;
2881 unsigned int i; 2877 unsigned int i;
@@ -2904,7 +2900,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2904 0); 2900 0);
2905 cmd_length = E1000_TXD_CMD_IP; 2901 cmd_length = E1000_TXD_CMD_IP;
2906 ipcse = skb->h.raw - skb->data - 1; 2902 ipcse = skb->h.raw - skb->data - 1;
2907#ifdef NETIF_F_TSO6
2908 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2903 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2909 skb->nh.ipv6h->payload_len = 0; 2904 skb->nh.ipv6h->payload_len = 0;
2910 skb->h.th->check = 2905 skb->h.th->check =
@@ -2914,7 +2909,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2914 IPPROTO_TCP, 2909 IPPROTO_TCP,
2915 0); 2910 0);
2916 ipcse = 0; 2911 ipcse = 0;
2917#endif
2918 } 2912 }
2919 ipcss = skb->nh.raw - skb->data; 2913 ipcss = skb->nh.raw - skb->data;
2920 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 2914 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
@@ -2947,8 +2941,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2947 2941
2948 return TRUE; 2942 return TRUE;
2949 } 2943 }
2950#endif
2951
2952 return FALSE; 2944 return FALSE;
2953} 2945}
2954 2946
@@ -2968,8 +2960,9 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2968 buffer_info = &tx_ring->buffer_info[i]; 2960 buffer_info = &tx_ring->buffer_info[i];
2969 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2961 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2970 2962
2963 context_desc->lower_setup.ip_config = 0;
2971 context_desc->upper_setup.tcp_fields.tucss = css; 2964 context_desc->upper_setup.tcp_fields.tucss = css;
2972 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; 2965 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
2973 context_desc->upper_setup.tcp_fields.tucse = 0; 2966 context_desc->upper_setup.tcp_fields.tucse = 0;
2974 context_desc->tcp_seg_setup.data = 0; 2967 context_desc->tcp_seg_setup.data = 0;
2975 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2968 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
@@ -3005,7 +2998,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3005 while (len) { 2998 while (len) {
3006 buffer_info = &tx_ring->buffer_info[i]; 2999 buffer_info = &tx_ring->buffer_info[i];
3007 size = min(len, max_per_txd); 3000 size = min(len, max_per_txd);
3008#ifdef NETIF_F_TSO
3009 /* Workaround for Controller erratum -- 3001 /* Workaround for Controller erratum --
3010 * descriptor for non-tso packet in a linear SKB that follows a 3002 * descriptor for non-tso packet in a linear SKB that follows a
3011 * tso gets written back prematurely before the data is fully 3003 * tso gets written back prematurely before the data is fully
@@ -3020,7 +3012,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3020 * in TSO mode. Append 4-byte sentinel desc */ 3012 * in TSO mode. Append 4-byte sentinel desc */
3021 if (unlikely(mss && !nr_frags && size == len && size > 8)) 3013 if (unlikely(mss && !nr_frags && size == len && size > 8))
3022 size -= 4; 3014 size -= 4;
3023#endif
3024 /* work-around for errata 10 and it applies 3015 /* work-around for errata 10 and it applies
3025 * to all controllers in PCI-X mode 3016 * to all controllers in PCI-X mode
3026 * The fix is to make sure that the first descriptor of a 3017 * The fix is to make sure that the first descriptor of a
@@ -3062,12 +3053,10 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3062 while (len) { 3053 while (len) {
3063 buffer_info = &tx_ring->buffer_info[i]; 3054 buffer_info = &tx_ring->buffer_info[i];
3064 size = min(len, max_per_txd); 3055 size = min(len, max_per_txd);
3065#ifdef NETIF_F_TSO
3066 /* Workaround for premature desc write-backs 3056 /* Workaround for premature desc write-backs
3067 * in TSO mode. Append 4-byte sentinel desc */ 3057 * in TSO mode. Append 4-byte sentinel desc */
3068 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 3058 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
3069 size -= 4; 3059 size -= 4;
3070#endif
3071 /* Workaround for potential 82544 hang in PCI-X. 3060 /* Workaround for potential 82544 hang in PCI-X.
3072 * Avoid terminating buffers within evenly-aligned 3061 * Avoid terminating buffers within evenly-aligned
3073 * dwords. */ 3062 * dwords. */
@@ -3292,7 +3281,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3292 if (adapter->hw.mac_type >= e1000_82571) 3281 if (adapter->hw.mac_type >= e1000_82571)
3293 max_per_txd = 8192; 3282 max_per_txd = 8192;
3294 3283
3295#ifdef NETIF_F_TSO
3296 mss = skb_shinfo(skb)->gso_size; 3284 mss = skb_shinfo(skb)->gso_size;
3297 /* The controller does a simple calculation to 3285 /* The controller does a simple calculation to
3298 * make sure there is enough room in the FIFO before 3286 * make sure there is enough room in the FIFO before
@@ -3346,16 +3334,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3346 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3334 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3347 count++; 3335 count++;
3348 count++; 3336 count++;
3349#else
3350 if (skb->ip_summed == CHECKSUM_PARTIAL)
3351 count++;
3352#endif
3353 3337
3354#ifdef NETIF_F_TSO
3355 /* Controller Erratum workaround */ 3338 /* Controller Erratum workaround */
3356 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3339 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3357 count++; 3340 count++;
3358#endif
3359 3341
3360 count += TXD_USE_COUNT(len, max_txd_pwr); 3342 count += TXD_USE_COUNT(len, max_txd_pwr);
3361 3343
@@ -3765,8 +3747,8 @@ e1000_update_stats(struct e1000_adapter *adapter)
3765 * @data: pointer to a network interface device structure 3747 * @data: pointer to a network interface device structure
3766 **/ 3748 **/
3767 3749
3768static 3750static irqreturn_t
3769irqreturn_t e1000_intr_msi(int irq, void *data) 3751e1000_intr_msi(int irq, void *data)
3770{ 3752{
3771 struct net_device *netdev = data; 3753 struct net_device *netdev = data;
3772 struct e1000_adapter *adapter = netdev_priv(netdev); 3754 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3774,49 +3756,27 @@ irqreturn_t e1000_intr_msi(int irq, void *data)
3774#ifndef CONFIG_E1000_NAPI 3756#ifndef CONFIG_E1000_NAPI
3775 int i; 3757 int i;
3776#endif 3758#endif
3759 uint32_t icr = E1000_READ_REG(hw, ICR);
3777 3760
3778 /* this code avoids the read of ICR but has to get 1000 interrupts
3779 * at every link change event before it will notice the change */
3780 if (++adapter->detect_link >= 1000) {
3781 uint32_t icr = E1000_READ_REG(hw, ICR);
3782#ifdef CONFIG_E1000_NAPI 3761#ifdef CONFIG_E1000_NAPI
3783 /* read ICR disables interrupts using IAM, so keep up with our 3762 /* read ICR disables interrupts using IAM, so keep up with our
3784 * enable/disable accounting */ 3763 * enable/disable accounting */
3785 atomic_inc(&adapter->irq_sem); 3764 atomic_inc(&adapter->irq_sem);
3786#endif 3765#endif
3787 adapter->detect_link = 0; 3766 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3788 if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) && 3767 hw->get_link_status = 1;
3789 (icr & E1000_ICR_INT_ASSERTED)) { 3768 /* 80003ES2LAN workaround-- For packet buffer work-around on
3790 hw->get_link_status = 1; 3769 * link down event; disable receives here in the ISR and reset
3791 /* 80003ES2LAN workaround-- 3770 * adapter in watchdog */
3792 * For packet buffer work-around on link down event; 3771 if (netif_carrier_ok(netdev) &&
3793 * disable receives here in the ISR and 3772 (adapter->hw.mac_type == e1000_80003es2lan)) {
3794 * reset adapter in watchdog 3773 /* disable receives */
3795 */ 3774 uint32_t rctl = E1000_READ_REG(hw, RCTL);
3796 if (netif_carrier_ok(netdev) && 3775 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3797 (adapter->hw.mac_type == e1000_80003es2lan)) {
3798 /* disable receives */
3799 uint32_t rctl = E1000_READ_REG(hw, RCTL);
3800 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3801 }
3802 /* guard against interrupt when we're going down */
3803 if (!test_bit(__E1000_DOWN, &adapter->flags))
3804 mod_timer(&adapter->watchdog_timer,
3805 jiffies + 1);
3806 } 3776 }
3807 } else { 3777 /* guard against interrupt when we're going down */
3808 E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ | 3778 if (!test_bit(__E1000_DOWN, &adapter->flags))
3809 E1000_ICR_LSC))); 3779 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3810 /* bummer we have to flush here, but things break otherwise as
3811 * some event appears to be lost or delayed and throughput
3812 * drops. In almost all tests this flush is un-necessary */
3813 E1000_WRITE_FLUSH(hw);
3814#ifdef CONFIG_E1000_NAPI
3815 /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
3816 * masked. No need for the IMC write, but it does mean we
3817 * should account for it ASAP. */
3818 atomic_inc(&adapter->irq_sem);
3819#endif
3820 } 3780 }
3821 3781
3822#ifdef CONFIG_E1000_NAPI 3782#ifdef CONFIG_E1000_NAPI
@@ -3836,7 +3796,7 @@ irqreturn_t e1000_intr_msi(int irq, void *data)
3836 3796
3837 for (i = 0; i < E1000_MAX_INTR; i++) 3797 for (i = 0; i < E1000_MAX_INTR; i++)
3838 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3798 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3839 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3799 e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3840 break; 3800 break;
3841 3801
3842 if (likely(adapter->itr_setting & 3)) 3802 if (likely(adapter->itr_setting & 3))
@@ -3939,7 +3899,7 @@ e1000_intr(int irq, void *data)
3939 3899
3940 for (i = 0; i < E1000_MAX_INTR; i++) 3900 for (i = 0; i < E1000_MAX_INTR; i++)
3941 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3901 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3942 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3902 e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3943 break; 3903 break;
3944 3904
3945 if (likely(adapter->itr_setting & 3)) 3905 if (likely(adapter->itr_setting & 3))
@@ -3989,7 +3949,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3989 poll_dev->quota -= work_done; 3949 poll_dev->quota -= work_done;
3990 3950
3991 /* If no Tx and not enough Rx work done, exit the polling mode */ 3951 /* If no Tx and not enough Rx work done, exit the polling mode */
3992 if ((!tx_cleaned && (work_done == 0)) || 3952 if ((tx_cleaned && (work_done < work_to_do)) ||
3993 !netif_running(poll_dev)) { 3953 !netif_running(poll_dev)) {
3994quit_polling: 3954quit_polling:
3995 if (likely(adapter->itr_setting & 3)) 3955 if (likely(adapter->itr_setting & 3))
@@ -4019,7 +3979,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4019#ifdef CONFIG_E1000_NAPI 3979#ifdef CONFIG_E1000_NAPI
4020 unsigned int count = 0; 3980 unsigned int count = 0;
4021#endif 3981#endif
4022 boolean_t cleaned = FALSE; 3982 boolean_t cleaned = TRUE;
4023 unsigned int total_tx_bytes=0, total_tx_packets=0; 3983 unsigned int total_tx_bytes=0, total_tx_packets=0;
4024 3984
4025 i = tx_ring->next_to_clean; 3985 i = tx_ring->next_to_clean;
@@ -4034,10 +3994,13 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4034 3994
4035 if (cleaned) { 3995 if (cleaned) {
4036 struct sk_buff *skb = buffer_info->skb; 3996 struct sk_buff *skb = buffer_info->skb;
4037 unsigned int segs = skb_shinfo(skb)->gso_segs; 3997 unsigned int segs, bytecount;
3998 segs = skb_shinfo(skb)->gso_segs ?: 1;
3999 /* multiply data chunks by size of headers */
4000 bytecount = ((segs - 1) * skb_headlen(skb)) +
4001 skb->len;
4038 total_tx_packets += segs; 4002 total_tx_packets += segs;
4039 total_tx_packets++; 4003 total_tx_bytes += bytecount;
4040 total_tx_bytes += skb->len;
4041 } 4004 }
4042 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 4005 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
4043 tx_desc->upper.data = 0; 4006 tx_desc->upper.data = 0;
@@ -4050,7 +4013,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4050#ifdef CONFIG_E1000_NAPI 4013#ifdef CONFIG_E1000_NAPI
4051#define E1000_TX_WEIGHT 64 4014#define E1000_TX_WEIGHT 64
4052 /* weight of a sort for tx, to avoid endless transmit cleanup */ 4015 /* weight of a sort for tx, to avoid endless transmit cleanup */
4053 if (count++ == E1000_TX_WEIGHT) break; 4016 if (count++ == E1000_TX_WEIGHT) {
4017 cleaned = FALSE;
4018 break;
4019 }
4054#endif 4020#endif
4055 } 4021 }
4056 4022
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 18afc0c25dac..10af742d8a20 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -48,8 +48,6 @@ typedef enum {
48 TRUE = 1 48 TRUE = 1
49} boolean_t; 49} boolean_t;
50 50
51#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
52
53#ifdef DBG 51#ifdef DBG
54#define DEBUGOUT(S) printk(KERN_DEBUG S "\n") 52#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
55#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A) 53#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
@@ -58,7 +56,7 @@ typedef enum {
58#define DEBUGOUT1(S, A...) 56#define DEBUGOUT1(S, A...)
59#endif 57#endif
60 58
61#define DEBUGFUNC(F) DEBUGOUT(F) 59#define DEBUGFUNC(F) DEBUGOUT(F "\n")
62#define DEBUGOUT2 DEBUGOUT1 60#define DEBUGOUT2 DEBUGOUT1
63#define DEBUGOUT3 DEBUGOUT2 61#define DEBUGOUT3 DEBUGOUT2
64#define DEBUGOUT7 DEBUGOUT3 62#define DEBUGOUT7 DEBUGOUT3
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index cf2a279307e1..f8862e203ac9 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -760,22 +760,13 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
760 case SPEED_1000: 760 case SPEED_1000:
761 DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without " 761 DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
762 "Duplex\n"); 762 "Duplex\n");
763 DPRINTK(PROBE, INFO, 763 goto full_duplex_only;
764 "Using Autonegotiation at 1000 Mbps "
765 "Full Duplex only\n");
766 adapter->hw.autoneg = adapter->fc_autoneg = 1;
767 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
768 break;
769 case SPEED_1000 + HALF_DUPLEX: 764 case SPEED_1000 + HALF_DUPLEX:
770 DPRINTK(PROBE, INFO, 765 DPRINTK(PROBE, INFO,
771 "Half Duplex is not supported at 1000 Mbps\n"); 766 "Half Duplex is not supported at 1000 Mbps\n");
772 DPRINTK(PROBE, INFO, 767 /* fall through */
773 "Using Autonegotiation at 1000 Mbps "
774 "Full Duplex only\n");
775 adapter->hw.autoneg = adapter->fc_autoneg = 1;
776 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
777 break;
778 case SPEED_1000 + FULL_DUPLEX: 768 case SPEED_1000 + FULL_DUPLEX:
769full_duplex_only:
779 DPRINTK(PROBE, INFO, 770 DPRINTK(PROBE, INFO,
780 "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); 771 "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
781 adapter->hw.autoneg = adapter->fc_autoneg = 1; 772 adapter->hw.autoneg = adapter->fc_autoneg = 1;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 93f2b7a22160..a363148d0198 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -111,6 +111,7 @@
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. 111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit. 112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error. 113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
114 * 115 *
115 * Known bugs: 116 * Known bugs:
116 * We suspect that on some hardware no TX done interrupts are generated. 117 * We suspect that on some hardware no TX done interrupts are generated.
@@ -127,7 +128,7 @@
127#else 128#else
128#define DRIVERNAPI 129#define DRIVERNAPI
129#endif 130#endif
130#define FORCEDETH_VERSION "0.59" 131#define FORCEDETH_VERSION "0.60"
131#define DRV_NAME "forcedeth" 132#define DRV_NAME "forcedeth"
132 133
133#include <linux/module.h> 134#include <linux/module.h>
@@ -173,9 +174,10 @@
173#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 174#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
174#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ 175#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
175#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ 176#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
176#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ 177#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
177#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ 178#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
178#define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */ 179#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
180#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
179 181
180enum { 182enum {
181 NvRegIrqStatus = 0x000, 183 NvRegIrqStatus = 0x000,
@@ -210,7 +212,7 @@ enum {
210 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 212 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
211 */ 213 */
212 NvRegPollingInterval = 0x00c, 214 NvRegPollingInterval = 0x00c,
213#define NVREG_POLL_DEFAULT_THROUGHPUT 970 215#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
214#define NVREG_POLL_DEFAULT_CPU 13 216#define NVREG_POLL_DEFAULT_CPU 13
215 NvRegMSIMap0 = 0x020, 217 NvRegMSIMap0 = 0x020,
216 NvRegMSIMap1 = 0x024, 218 NvRegMSIMap1 = 0x024,
@@ -304,8 +306,8 @@ enum {
304#define NVREG_TXRXCTL_RESET 0x0010 306#define NVREG_TXRXCTL_RESET 0x0010
305#define NVREG_TXRXCTL_RXCHECK 0x0400 307#define NVREG_TXRXCTL_RXCHECK 0x0400
306#define NVREG_TXRXCTL_DESC_1 0 308#define NVREG_TXRXCTL_DESC_1 0
307#define NVREG_TXRXCTL_DESC_2 0x02100 309#define NVREG_TXRXCTL_DESC_2 0x002100
308#define NVREG_TXRXCTL_DESC_3 0x02200 310#define NVREG_TXRXCTL_DESC_3 0xc02200
309#define NVREG_TXRXCTL_VLANSTRIP 0x00040 311#define NVREG_TXRXCTL_VLANSTRIP 0x00040
310#define NVREG_TXRXCTL_VLANINS 0x00080 312#define NVREG_TXRXCTL_VLANINS 0x00080
311 NvRegTxRingPhysAddrHigh = 0x148, 313 NvRegTxRingPhysAddrHigh = 0x148,
@@ -487,7 +489,8 @@ union ring_type {
487 489
488/* Miscelaneous hardware related defines: */ 490/* Miscelaneous hardware related defines: */
489#define NV_PCI_REGSZ_VER1 0x270 491#define NV_PCI_REGSZ_VER1 0x270
490#define NV_PCI_REGSZ_VER2 0x604 492#define NV_PCI_REGSZ_VER2 0x2d4
493#define NV_PCI_REGSZ_VER3 0x604
491 494
492/* various timeout delays: all in usec */ 495/* various timeout delays: all in usec */
493#define NV_TXRX_RESET_DELAY 4 496#define NV_TXRX_RESET_DELAY 4
@@ -518,12 +521,6 @@ union ring_type {
518#define TX_RING_MIN 64 521#define TX_RING_MIN 64
519#define RING_MAX_DESC_VER_1 1024 522#define RING_MAX_DESC_VER_1 1024
520#define RING_MAX_DESC_VER_2_3 16384 523#define RING_MAX_DESC_VER_2_3 16384
521/*
522 * Difference between the get and put pointers for the tx ring.
523 * This is used to throttle the amount of data outstanding in the
524 * tx ring.
525 */
526#define TX_LIMIT_DIFFERENCE 1
527 524
528/* rx/tx mac addr + type + vlan + align + slack*/ 525/* rx/tx mac addr + type + vlan + align + slack*/
529#define NV_RX_HEADERS (64) 526#define NV_RX_HEADERS (64)
@@ -611,9 +608,6 @@ static const struct nv_ethtool_str nv_estats_str[] = {
611 { "tx_carrier_errors" }, 608 { "tx_carrier_errors" },
612 { "tx_excess_deferral" }, 609 { "tx_excess_deferral" },
613 { "tx_retry_error" }, 610 { "tx_retry_error" },
614 { "tx_deferral" },
615 { "tx_packets" },
616 { "tx_pause" },
617 { "rx_frame_error" }, 611 { "rx_frame_error" },
618 { "rx_extra_byte" }, 612 { "rx_extra_byte" },
619 { "rx_late_collision" }, 613 { "rx_late_collision" },
@@ -626,11 +620,17 @@ static const struct nv_ethtool_str nv_estats_str[] = {
626 { "rx_unicast" }, 620 { "rx_unicast" },
627 { "rx_multicast" }, 621 { "rx_multicast" },
628 { "rx_broadcast" }, 622 { "rx_broadcast" },
623 { "rx_packets" },
624 { "rx_errors_total" },
625 { "tx_errors_total" },
626
627 /* version 2 stats */
628 { "tx_deferral" },
629 { "tx_packets" },
629 { "rx_bytes" }, 630 { "rx_bytes" },
631 { "tx_pause" },
630 { "rx_pause" }, 632 { "rx_pause" },
631 { "rx_drop_frame" }, 633 { "rx_drop_frame" }
632 { "rx_packets" },
633 { "rx_errors_total" }
634}; 634};
635 635
636struct nv_ethtool_stats { 636struct nv_ethtool_stats {
@@ -643,9 +643,6 @@ struct nv_ethtool_stats {
643 u64 tx_carrier_errors; 643 u64 tx_carrier_errors;
644 u64 tx_excess_deferral; 644 u64 tx_excess_deferral;
645 u64 tx_retry_error; 645 u64 tx_retry_error;
646 u64 tx_deferral;
647 u64 tx_packets;
648 u64 tx_pause;
649 u64 rx_frame_error; 646 u64 rx_frame_error;
650 u64 rx_extra_byte; 647 u64 rx_extra_byte;
651 u64 rx_late_collision; 648 u64 rx_late_collision;
@@ -658,13 +655,22 @@ struct nv_ethtool_stats {
658 u64 rx_unicast; 655 u64 rx_unicast;
659 u64 rx_multicast; 656 u64 rx_multicast;
660 u64 rx_broadcast; 657 u64 rx_broadcast;
658 u64 rx_packets;
659 u64 rx_errors_total;
660 u64 tx_errors_total;
661
662 /* version 2 stats */
663 u64 tx_deferral;
664 u64 tx_packets;
661 u64 rx_bytes; 665 u64 rx_bytes;
666 u64 tx_pause;
662 u64 rx_pause; 667 u64 rx_pause;
663 u64 rx_drop_frame; 668 u64 rx_drop_frame;
664 u64 rx_packets;
665 u64 rx_errors_total;
666}; 669};
667 670
671#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
672#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
673
668/* diagnostics */ 674/* diagnostics */
669#define NV_TEST_COUNT_BASE 3 675#define NV_TEST_COUNT_BASE 3
670#define NV_TEST_COUNT_EXTENDED 4 676#define NV_TEST_COUNT_EXTENDED 4
@@ -691,6 +697,12 @@ static const struct register_test nv_registers_test[] = {
691 { 0,0 } 697 { 0,0 }
692}; 698};
693 699
700struct nv_skb_map {
701 struct sk_buff *skb;
702 dma_addr_t dma;
703 unsigned int dma_len;
704};
705
694/* 706/*
695 * SMP locking: 707 * SMP locking:
696 * All hardware access under dev->priv->lock, except the performance 708 * All hardware access under dev->priv->lock, except the performance
@@ -741,10 +753,12 @@ struct fe_priv {
741 /* rx specific fields. 753 /* rx specific fields.
742 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 754 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
743 */ 755 */
756 union ring_type get_rx, put_rx, first_rx, last_rx;
757 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
758 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
759 struct nv_skb_map *rx_skb;
760
744 union ring_type rx_ring; 761 union ring_type rx_ring;
745 unsigned int cur_rx, refill_rx;
746 struct sk_buff **rx_skbuff;
747 dma_addr_t *rx_dma;
748 unsigned int rx_buf_sz; 762 unsigned int rx_buf_sz;
749 unsigned int pkt_limit; 763 unsigned int pkt_limit;
750 struct timer_list oom_kick; 764 struct timer_list oom_kick;
@@ -761,15 +775,15 @@ struct fe_priv {
761 /* 775 /*
762 * tx specific fields. 776 * tx specific fields.
763 */ 777 */
778 union ring_type get_tx, put_tx, first_tx, last_tx;
779 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
780 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
781 struct nv_skb_map *tx_skb;
782
764 union ring_type tx_ring; 783 union ring_type tx_ring;
765 unsigned int next_tx, nic_tx;
766 struct sk_buff **tx_skbuff;
767 dma_addr_t *tx_dma;
768 unsigned int *tx_dma_len;
769 u32 tx_flags; 784 u32 tx_flags;
770 int tx_ring_size; 785 int tx_ring_size;
771 int tx_limit_start; 786 int tx_stop;
772 int tx_limit_stop;
773 787
774 /* vlan fields */ 788 /* vlan fields */
775 struct vlan_group *vlangrp; 789 struct vlan_group *vlangrp;
@@ -921,16 +935,10 @@ static void free_rings(struct net_device *dev)
921 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 935 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
922 np->rx_ring.ex, np->ring_addr); 936 np->rx_ring.ex, np->ring_addr);
923 } 937 }
924 if (np->rx_skbuff) 938 if (np->rx_skb)
925 kfree(np->rx_skbuff); 939 kfree(np->rx_skb);
926 if (np->rx_dma) 940 if (np->tx_skb)
927 kfree(np->rx_dma); 941 kfree(np->tx_skb);
928 if (np->tx_skbuff)
929 kfree(np->tx_skbuff);
930 if (np->tx_dma)
931 kfree(np->tx_dma);
932 if (np->tx_dma_len)
933 kfree(np->tx_dma_len);
934} 942}
935 943
936static int using_multi_irqs(struct net_device *dev) 944static int using_multi_irqs(struct net_device *dev)
@@ -1279,6 +1287,61 @@ static void nv_mac_reset(struct net_device *dev)
1279 pci_push(base); 1287 pci_push(base);
1280} 1288}
1281 1289
1290static void nv_get_hw_stats(struct net_device *dev)
1291{
1292 struct fe_priv *np = netdev_priv(dev);
1293 u8 __iomem *base = get_hwbase(dev);
1294
1295 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1296 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1297 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1298 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1299 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1300 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1301 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1302 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1303 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1304 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1305 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1306 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1307 np->estats.rx_runt += readl(base + NvRegRxRunt);
1308 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1309 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1310 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1311 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1312 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1313 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1314 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1315 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1316 np->estats.rx_packets =
1317 np->estats.rx_unicast +
1318 np->estats.rx_multicast +
1319 np->estats.rx_broadcast;
1320 np->estats.rx_errors_total =
1321 np->estats.rx_crc_errors +
1322 np->estats.rx_over_errors +
1323 np->estats.rx_frame_error +
1324 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1325 np->estats.rx_late_collision +
1326 np->estats.rx_runt +
1327 np->estats.rx_frame_too_long;
1328 np->estats.tx_errors_total =
1329 np->estats.tx_late_collision +
1330 np->estats.tx_fifo_errors +
1331 np->estats.tx_carrier_errors +
1332 np->estats.tx_excess_deferral +
1333 np->estats.tx_retry_error;
1334
1335 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1336 np->estats.tx_deferral += readl(base + NvRegTxDef);
1337 np->estats.tx_packets += readl(base + NvRegTxFrame);
1338 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1339 np->estats.tx_pause += readl(base + NvRegTxPause);
1340 np->estats.rx_pause += readl(base + NvRegRxPause);
1341 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1342 }
1343}
1344
1282/* 1345/*
1283 * nv_get_stats: dev->get_stats function 1346 * nv_get_stats: dev->get_stats function
1284 * Get latest stats value from the nic. 1347 * Get latest stats value from the nic.
@@ -1289,10 +1352,19 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1289{ 1352{
1290 struct fe_priv *np = netdev_priv(dev); 1353 struct fe_priv *np = netdev_priv(dev);
1291 1354
1292 /* It seems that the nic always generates interrupts and doesn't 1355 /* If the nic supports hw counters then retrieve latest values */
1293 * accumulate errors internally. Thus the current values in np->stats 1356 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
1294 * are already up to date. 1357 nv_get_hw_stats(dev);
1295 */ 1358
1359 /* copy to net_device stats */
1360 np->stats.tx_bytes = np->estats.tx_bytes;
1361 np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1362 np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1363 np->stats.rx_crc_errors = np->estats.rx_crc_errors;
1364 np->stats.rx_over_errors = np->estats.rx_over_errors;
1365 np->stats.rx_errors = np->estats.rx_errors_total;
1366 np->stats.tx_errors = np->estats.tx_errors_total;
1367 }
1296 return &np->stats; 1368 return &np->stats;
1297} 1369}
1298 1370
@@ -1304,43 +1376,63 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1304static int nv_alloc_rx(struct net_device *dev) 1376static int nv_alloc_rx(struct net_device *dev)
1305{ 1377{
1306 struct fe_priv *np = netdev_priv(dev); 1378 struct fe_priv *np = netdev_priv(dev);
1307 unsigned int refill_rx = np->refill_rx; 1379 struct ring_desc* less_rx;
1308 int nr;
1309 1380
1310 while (np->cur_rx != refill_rx) { 1381 less_rx = np->get_rx.orig;
1311 struct sk_buff *skb; 1382 if (less_rx-- == np->first_rx.orig)
1312 1383 less_rx = np->last_rx.orig;
1313 nr = refill_rx % np->rx_ring_size;
1314 if (np->rx_skbuff[nr] == NULL) {
1315
1316 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1317 if (!skb)
1318 break;
1319 1384
1385 while (np->put_rx.orig != less_rx) {
1386 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1387 if (skb) {
1320 skb->dev = dev; 1388 skb->dev = dev;
1321 np->rx_skbuff[nr] = skb; 1389 np->put_rx_ctx->skb = skb;
1390 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1391 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1392 np->put_rx_ctx->dma_len = skb->end-skb->data;
1393 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1394 wmb();
1395 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1396 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1397 np->put_rx.orig = np->first_rx.orig;
1398 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1399 np->put_rx_ctx = np->first_rx_ctx;
1322 } else { 1400 } else {
1323 skb = np->rx_skbuff[nr]; 1401 return 1;
1324 } 1402 }
1325 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1403 }
1326 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1404 return 0;
1327 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1405}
1328 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]); 1406
1407static int nv_alloc_rx_optimized(struct net_device *dev)
1408{
1409 struct fe_priv *np = netdev_priv(dev);
1410 struct ring_desc_ex* less_rx;
1411
1412 less_rx = np->get_rx.ex;
1413 if (less_rx-- == np->first_rx.ex)
1414 less_rx = np->last_rx.ex;
1415
1416 while (np->put_rx.ex != less_rx) {
1417 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1418 if (skb) {
1419 skb->dev = dev;
1420 np->put_rx_ctx->skb = skb;
1421 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1422 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1423 np->put_rx_ctx->dma_len = skb->end-skb->data;
1424 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1425 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1329 wmb(); 1426 wmb();
1330 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1427 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1428 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1429 np->put_rx.ex = np->first_rx.ex;
1430 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1431 np->put_rx_ctx = np->first_rx_ctx;
1331 } else { 1432 } else {
1332 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1433 return 1;
1333 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1334 wmb();
1335 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1336 } 1434 }
1337 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1338 dev->name, refill_rx);
1339 refill_rx++;
1340 } 1435 }
1341 np->refill_rx = refill_rx;
1342 if (np->cur_rx - refill_rx == np->rx_ring_size)
1343 return 1;
1344 return 0; 1436 return 0;
1345} 1437}
1346 1438
@@ -1358,6 +1450,7 @@ static void nv_do_rx_refill(unsigned long data)
1358{ 1450{
1359 struct net_device *dev = (struct net_device *) data; 1451 struct net_device *dev = (struct net_device *) data;
1360 struct fe_priv *np = netdev_priv(dev); 1452 struct fe_priv *np = netdev_priv(dev);
1453 int retcode;
1361 1454
1362 if (!using_multi_irqs(dev)) { 1455 if (!using_multi_irqs(dev)) {
1363 if (np->msi_flags & NV_MSI_X_ENABLED) 1456 if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -1367,7 +1460,11 @@ static void nv_do_rx_refill(unsigned long data)
1367 } else { 1460 } else {
1368 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1461 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1369 } 1462 }
1370 if (nv_alloc_rx(dev)) { 1463 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1464 retcode = nv_alloc_rx(dev);
1465 else
1466 retcode = nv_alloc_rx_optimized(dev);
1467 if (retcode) {
1371 spin_lock_irq(&np->lock); 1468 spin_lock_irq(&np->lock);
1372 if (!np->in_shutdown) 1469 if (!np->in_shutdown)
1373 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1470 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -1388,56 +1485,81 @@ static void nv_init_rx(struct net_device *dev)
1388{ 1485{
1389 struct fe_priv *np = netdev_priv(dev); 1486 struct fe_priv *np = netdev_priv(dev);
1390 int i; 1487 int i;
1488 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1489 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1490 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1491 else
1492 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1493 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1494 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1391 1495
1392 np->cur_rx = np->rx_ring_size; 1496 for (i = 0; i < np->rx_ring_size; i++) {
1393 np->refill_rx = 0; 1497 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1394 for (i = 0; i < np->rx_ring_size; i++)
1395 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1396 np->rx_ring.orig[i].flaglen = 0; 1498 np->rx_ring.orig[i].flaglen = 0;
1397 else 1499 np->rx_ring.orig[i].buf = 0;
1500 } else {
1398 np->rx_ring.ex[i].flaglen = 0; 1501 np->rx_ring.ex[i].flaglen = 0;
1502 np->rx_ring.ex[i].txvlan = 0;
1503 np->rx_ring.ex[i].bufhigh = 0;
1504 np->rx_ring.ex[i].buflow = 0;
1505 }
1506 np->rx_skb[i].skb = NULL;
1507 np->rx_skb[i].dma = 0;
1508 }
1399} 1509}
1400 1510
1401static void nv_init_tx(struct net_device *dev) 1511static void nv_init_tx(struct net_device *dev)
1402{ 1512{
1403 struct fe_priv *np = netdev_priv(dev); 1513 struct fe_priv *np = netdev_priv(dev);
1404 int i; 1514 int i;
1515 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1516 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1517 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1518 else
1519 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1520 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1521 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1405 1522
1406 np->next_tx = np->nic_tx = 0;
1407 for (i = 0; i < np->tx_ring_size; i++) { 1523 for (i = 0; i < np->tx_ring_size; i++) {
1408 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1524 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1409 np->tx_ring.orig[i].flaglen = 0; 1525 np->tx_ring.orig[i].flaglen = 0;
1410 else 1526 np->tx_ring.orig[i].buf = 0;
1527 } else {
1411 np->tx_ring.ex[i].flaglen = 0; 1528 np->tx_ring.ex[i].flaglen = 0;
1412 np->tx_skbuff[i] = NULL; 1529 np->tx_ring.ex[i].txvlan = 0;
1413 np->tx_dma[i] = 0; 1530 np->tx_ring.ex[i].bufhigh = 0;
1531 np->tx_ring.ex[i].buflow = 0;
1532 }
1533 np->tx_skb[i].skb = NULL;
1534 np->tx_skb[i].dma = 0;
1414 } 1535 }
1415} 1536}
1416 1537
1417static int nv_init_ring(struct net_device *dev) 1538static int nv_init_ring(struct net_device *dev)
1418{ 1539{
1540 struct fe_priv *np = netdev_priv(dev);
1541
1419 nv_init_tx(dev); 1542 nv_init_tx(dev);
1420 nv_init_rx(dev); 1543 nv_init_rx(dev);
1421 return nv_alloc_rx(dev); 1544 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1545 return nv_alloc_rx(dev);
1546 else
1547 return nv_alloc_rx_optimized(dev);
1422} 1548}
1423 1549
1424static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) 1550static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1425{ 1551{
1426 struct fe_priv *np = netdev_priv(dev); 1552 struct fe_priv *np = netdev_priv(dev);
1427 1553
1428 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", 1554 if (tx_skb->dma) {
1429 dev->name, skbnr); 1555 pci_unmap_page(np->pci_dev, tx_skb->dma,
1430 1556 tx_skb->dma_len,
1431 if (np->tx_dma[skbnr]) {
1432 pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
1433 np->tx_dma_len[skbnr],
1434 PCI_DMA_TODEVICE); 1557 PCI_DMA_TODEVICE);
1435 np->tx_dma[skbnr] = 0; 1558 tx_skb->dma = 0;
1436 } 1559 }
1437 1560 if (tx_skb->skb) {
1438 if (np->tx_skbuff[skbnr]) { 1561 dev_kfree_skb_any(tx_skb->skb);
1439 dev_kfree_skb_any(np->tx_skbuff[skbnr]); 1562 tx_skb->skb = NULL;
1440 np->tx_skbuff[skbnr] = NULL;
1441 return 1; 1563 return 1;
1442 } else { 1564 } else {
1443 return 0; 1565 return 0;
@@ -1450,11 +1572,16 @@ static void nv_drain_tx(struct net_device *dev)
1450 unsigned int i; 1572 unsigned int i;
1451 1573
1452 for (i = 0; i < np->tx_ring_size; i++) { 1574 for (i = 0; i < np->tx_ring_size; i++) {
1453 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1575 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1454 np->tx_ring.orig[i].flaglen = 0; 1576 np->tx_ring.orig[i].flaglen = 0;
1455 else 1577 np->tx_ring.orig[i].buf = 0;
1578 } else {
1456 np->tx_ring.ex[i].flaglen = 0; 1579 np->tx_ring.ex[i].flaglen = 0;
1457 if (nv_release_txskb(dev, i)) 1580 np->tx_ring.ex[i].txvlan = 0;
1581 np->tx_ring.ex[i].bufhigh = 0;
1582 np->tx_ring.ex[i].buflow = 0;
1583 }
1584 if (nv_release_txskb(dev, &np->tx_skb[i]))
1458 np->stats.tx_dropped++; 1585 np->stats.tx_dropped++;
1459 } 1586 }
1460} 1587}
@@ -1463,18 +1590,24 @@ static void nv_drain_rx(struct net_device *dev)
1463{ 1590{
1464 struct fe_priv *np = netdev_priv(dev); 1591 struct fe_priv *np = netdev_priv(dev);
1465 int i; 1592 int i;
1593
1466 for (i = 0; i < np->rx_ring_size; i++) { 1594 for (i = 0; i < np->rx_ring_size; i++) {
1467 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1595 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1468 np->rx_ring.orig[i].flaglen = 0; 1596 np->rx_ring.orig[i].flaglen = 0;
1469 else 1597 np->rx_ring.orig[i].buf = 0;
1598 } else {
1470 np->rx_ring.ex[i].flaglen = 0; 1599 np->rx_ring.ex[i].flaglen = 0;
1600 np->rx_ring.ex[i].txvlan = 0;
1601 np->rx_ring.ex[i].bufhigh = 0;
1602 np->rx_ring.ex[i].buflow = 0;
1603 }
1471 wmb(); 1604 wmb();
1472 if (np->rx_skbuff[i]) { 1605 if (np->rx_skb[i].skb) {
1473 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1606 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1474 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1607 np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
1475 PCI_DMA_FROMDEVICE); 1608 PCI_DMA_FROMDEVICE);
1476 dev_kfree_skb(np->rx_skbuff[i]); 1609 dev_kfree_skb(np->rx_skb[i].skb);
1477 np->rx_skbuff[i] = NULL; 1610 np->rx_skb[i].skb = NULL;
1478 } 1611 }
1479 } 1612 }
1480} 1613}
@@ -1485,6 +1618,11 @@ static void drain_ring(struct net_device *dev)
1485 nv_drain_rx(dev); 1618 nv_drain_rx(dev);
1486} 1619}
1487 1620
1621static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1622{
1623 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1624}
1625
1488/* 1626/*
1489 * nv_start_xmit: dev->hard_start_xmit function 1627 * nv_start_xmit: dev->hard_start_xmit function
1490 * Called with netif_tx_lock held. 1628 * Called with netif_tx_lock held.
@@ -1495,14 +1633,16 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1495 u32 tx_flags = 0; 1633 u32 tx_flags = 0;
1496 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1634 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1497 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1635 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1498 unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
1499 unsigned int start_nr = np->next_tx % np->tx_ring_size;
1500 unsigned int i; 1636 unsigned int i;
1501 u32 offset = 0; 1637 u32 offset = 0;
1502 u32 bcnt; 1638 u32 bcnt;
1503 u32 size = skb->len-skb->data_len; 1639 u32 size = skb->len-skb->data_len;
1504 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1640 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1505 u32 tx_flags_vlan = 0; 1641 u32 empty_slots;
1642 struct ring_desc* put_tx;
1643 struct ring_desc* start_tx;
1644 struct ring_desc* prev_tx;
1645 struct nv_skb_map* prev_tx_ctx;
1506 1646
1507 /* add fragments to entries count */ 1647 /* add fragments to entries count */
1508 for (i = 0; i < fragments; i++) { 1648 for (i = 0; i < fragments; i++) {
@@ -1510,34 +1650,35 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1510 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1650 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1511 } 1651 }
1512 1652
1513 spin_lock_irq(&np->lock); 1653 empty_slots = nv_get_empty_tx_slots(np);
1514 1654 if (unlikely(empty_slots <= entries)) {
1515 if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { 1655 spin_lock_irq(&np->lock);
1516 spin_unlock_irq(&np->lock);
1517 netif_stop_queue(dev); 1656 netif_stop_queue(dev);
1657 np->tx_stop = 1;
1658 spin_unlock_irq(&np->lock);
1518 return NETDEV_TX_BUSY; 1659 return NETDEV_TX_BUSY;
1519 } 1660 }
1520 1661
1662 start_tx = put_tx = np->put_tx.orig;
1663
1521 /* setup the header buffer */ 1664 /* setup the header buffer */
1522 do { 1665 do {
1666 prev_tx = put_tx;
1667 prev_tx_ctx = np->put_tx_ctx;
1523 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1668 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1524 nr = (nr + 1) % np->tx_ring_size; 1669 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1525
1526 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1527 PCI_DMA_TODEVICE); 1670 PCI_DMA_TODEVICE);
1528 np->tx_dma_len[nr] = bcnt; 1671 np->put_tx_ctx->dma_len = bcnt;
1672 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1673 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1529 1674
1530 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1531 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1532 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1533 } else {
1534 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1535 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1536 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1537 }
1538 tx_flags = np->tx_flags; 1675 tx_flags = np->tx_flags;
1539 offset += bcnt; 1676 offset += bcnt;
1540 size -= bcnt; 1677 size -= bcnt;
1678 if (unlikely(put_tx++ == np->last_tx.orig))
1679 put_tx = np->first_tx.orig;
1680 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1681 np->put_tx_ctx = np->first_tx_ctx;
1541 } while (size); 1682 } while (size);
1542 1683
1543 /* setup the fragments */ 1684 /* setup the fragments */
@@ -1547,58 +1688,174 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1547 offset = 0; 1688 offset = 0;
1548 1689
1549 do { 1690 do {
1691 prev_tx = put_tx;
1692 prev_tx_ctx = np->put_tx_ctx;
1550 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1693 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1551 nr = (nr + 1) % np->tx_ring_size; 1694 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1552 1695 PCI_DMA_TODEVICE);
1553 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1696 np->put_tx_ctx->dma_len = bcnt;
1554 PCI_DMA_TODEVICE); 1697 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1555 np->tx_dma_len[nr] = bcnt; 1698 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1556 1699
1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1558 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1559 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1560 } else {
1561 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1562 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1563 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1564 }
1565 offset += bcnt; 1700 offset += bcnt;
1566 size -= bcnt; 1701 size -= bcnt;
1702 if (unlikely(put_tx++ == np->last_tx.orig))
1703 put_tx = np->first_tx.orig;
1704 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1705 np->put_tx_ctx = np->first_tx_ctx;
1567 } while (size); 1706 } while (size);
1568 } 1707 }
1569 1708
1570 /* set last fragment flag */ 1709 /* set last fragment flag */
1571 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1710 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1572 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra); 1711
1573 } else { 1712 /* save skb in this slot's context area */
1574 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra); 1713 prev_tx_ctx->skb = skb;
1714
1715 if (skb_is_gso(skb))
1716 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1717 else
1718 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1719 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1720
1721 spin_lock_irq(&np->lock);
1722
1723 /* set tx flags */
1724 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1725 np->put_tx.orig = put_tx;
1726
1727 spin_unlock_irq(&np->lock);
1728
1729 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1730 dev->name, entries, tx_flags_extra);
1731 {
1732 int j;
1733 for (j=0; j<64; j++) {
1734 if ((j%16) == 0)
1735 dprintk("\n%03x:", j);
1736 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1737 }
1738 dprintk("\n");
1739 }
1740
1741 dev->trans_start = jiffies;
1742 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1743 return NETDEV_TX_OK;
1744}
1745
1746static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1747{
1748 struct fe_priv *np = netdev_priv(dev);
1749 u32 tx_flags = 0;
1750 u32 tx_flags_extra;
1751 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1752 unsigned int i;
1753 u32 offset = 0;
1754 u32 bcnt;
1755 u32 size = skb->len-skb->data_len;
1756 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1757 u32 empty_slots;
1758 struct ring_desc_ex* put_tx;
1759 struct ring_desc_ex* start_tx;
1760 struct ring_desc_ex* prev_tx;
1761 struct nv_skb_map* prev_tx_ctx;
1762
1763 /* add fragments to entries count */
1764 for (i = 0; i < fragments; i++) {
1765 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1766 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1767 }
1768
1769 empty_slots = nv_get_empty_tx_slots(np);
1770 if (unlikely(empty_slots <= entries)) {
1771 spin_lock_irq(&np->lock);
1772 netif_stop_queue(dev);
1773 np->tx_stop = 1;
1774 spin_unlock_irq(&np->lock);
1775 return NETDEV_TX_BUSY;
1776 }
1777
1778 start_tx = put_tx = np->put_tx.ex;
1779
1780 /* setup the header buffer */
1781 do {
1782 prev_tx = put_tx;
1783 prev_tx_ctx = np->put_tx_ctx;
1784 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1785 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1786 PCI_DMA_TODEVICE);
1787 np->put_tx_ctx->dma_len = bcnt;
1788 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1789 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1790 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1791
1792 tx_flags = NV_TX2_VALID;
1793 offset += bcnt;
1794 size -= bcnt;
1795 if (unlikely(put_tx++ == np->last_tx.ex))
1796 put_tx = np->first_tx.ex;
1797 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1798 np->put_tx_ctx = np->first_tx_ctx;
1799 } while (size);
1800
1801 /* setup the fragments */
1802 for (i = 0; i < fragments; i++) {
1803 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1804 u32 size = frag->size;
1805 offset = 0;
1806
1807 do {
1808 prev_tx = put_tx;
1809 prev_tx_ctx = np->put_tx_ctx;
1810 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1811 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1812 PCI_DMA_TODEVICE);
1813 np->put_tx_ctx->dma_len = bcnt;
1814 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1815 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1816 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1817
1818 offset += bcnt;
1819 size -= bcnt;
1820 if (unlikely(put_tx++ == np->last_tx.ex))
1821 put_tx = np->first_tx.ex;
1822 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1823 np->put_tx_ctx = np->first_tx_ctx;
1824 } while (size);
1575 } 1825 }
1576 1826
1577 np->tx_skbuff[nr] = skb; 1827 /* set last fragment flag */
1828 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
1829
1830 /* save skb in this slot's context area */
1831 prev_tx_ctx->skb = skb;
1578 1832
1579#ifdef NETIF_F_TSO
1580 if (skb_is_gso(skb)) 1833 if (skb_is_gso(skb))
1581 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1834 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1582 else 1835 else
1583#endif 1836 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1584 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1585 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 1837 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1586 1838
1587 /* vlan tag */ 1839 /* vlan tag */
1588 if (np->vlangrp && vlan_tx_tag_present(skb)) { 1840 if (likely(!np->vlangrp)) {
1589 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); 1841 start_tx->txvlan = 0;
1842 } else {
1843 if (vlan_tx_tag_present(skb))
1844 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
1845 else
1846 start_tx->txvlan = 0;
1590 } 1847 }
1591 1848
1849 spin_lock_irq(&np->lock);
1850
1592 /* set tx flags */ 1851 /* set tx flags */
1593 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1852 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1594 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1853 np->put_tx.ex = put_tx;
1595 } else { 1854
1596 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan); 1855 spin_unlock_irq(&np->lock);
1597 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1598 }
1599 1856
1600 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1857 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
1601 dev->name, np->next_tx, entries, tx_flags_extra); 1858 dev->name, entries, tx_flags_extra);
1602 { 1859 {
1603 int j; 1860 int j;
1604 for (j=0; j<64; j++) { 1861 for (j=0; j<64; j++) {
@@ -1609,12 +1866,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1609 dprintk("\n"); 1866 dprintk("\n");
1610 } 1867 }
1611 1868
1612 np->next_tx += entries;
1613
1614 dev->trans_start = jiffies; 1869 dev->trans_start = jiffies;
1615 spin_unlock_irq(&np->lock);
1616 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1870 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1617 pci_push(get_hwbase(dev));
1618 return NETDEV_TX_OK; 1871 return NETDEV_TX_OK;
1619} 1872}
1620 1873
@@ -1627,26 +1880,22 @@ static void nv_tx_done(struct net_device *dev)
1627{ 1880{
1628 struct fe_priv *np = netdev_priv(dev); 1881 struct fe_priv *np = netdev_priv(dev);
1629 u32 flags; 1882 u32 flags;
1630 unsigned int i; 1883 struct ring_desc* orig_get_tx = np->get_tx.orig;
1631 struct sk_buff *skb;
1632 1884
1633 while (np->nic_tx != np->next_tx) { 1885 while ((np->get_tx.orig != np->put_tx.orig) &&
1634 i = np->nic_tx % np->tx_ring_size; 1886 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
1635 1887
1636 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1888 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
1637 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen); 1889 dev->name, flags);
1638 else 1890
1639 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen); 1891 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
1892 np->get_tx_ctx->dma_len,
1893 PCI_DMA_TODEVICE);
1894 np->get_tx_ctx->dma = 0;
1640 1895
1641 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
1642 dev->name, np->nic_tx, flags);
1643 if (flags & NV_TX_VALID)
1644 break;
1645 if (np->desc_ver == DESC_VER_1) { 1896 if (np->desc_ver == DESC_VER_1) {
1646 if (flags & NV_TX_LASTPACKET) { 1897 if (flags & NV_TX_LASTPACKET) {
1647 skb = np->tx_skbuff[i]; 1898 if (flags & NV_TX_ERROR) {
1648 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1649 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1650 if (flags & NV_TX_UNDERFLOW) 1899 if (flags & NV_TX_UNDERFLOW)
1651 np->stats.tx_fifo_errors++; 1900 np->stats.tx_fifo_errors++;
1652 if (flags & NV_TX_CARRIERLOST) 1901 if (flags & NV_TX_CARRIERLOST)
@@ -1654,14 +1903,14 @@ static void nv_tx_done(struct net_device *dev)
1654 np->stats.tx_errors++; 1903 np->stats.tx_errors++;
1655 } else { 1904 } else {
1656 np->stats.tx_packets++; 1905 np->stats.tx_packets++;
1657 np->stats.tx_bytes += skb->len; 1906 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
1658 } 1907 }
1908 dev_kfree_skb_any(np->get_tx_ctx->skb);
1909 np->get_tx_ctx->skb = NULL;
1659 } 1910 }
1660 } else { 1911 } else {
1661 if (flags & NV_TX2_LASTPACKET) { 1912 if (flags & NV_TX2_LASTPACKET) {
1662 skb = np->tx_skbuff[i]; 1913 if (flags & NV_TX2_ERROR) {
1663 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1664 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1665 if (flags & NV_TX2_UNDERFLOW) 1914 if (flags & NV_TX2_UNDERFLOW)
1666 np->stats.tx_fifo_errors++; 1915 np->stats.tx_fifo_errors++;
1667 if (flags & NV_TX2_CARRIERLOST) 1916 if (flags & NV_TX2_CARRIERLOST)
@@ -1669,15 +1918,56 @@ static void nv_tx_done(struct net_device *dev)
1669 np->stats.tx_errors++; 1918 np->stats.tx_errors++;
1670 } else { 1919 } else {
1671 np->stats.tx_packets++; 1920 np->stats.tx_packets++;
1672 np->stats.tx_bytes += skb->len; 1921 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
1673 } 1922 }
1923 dev_kfree_skb_any(np->get_tx_ctx->skb);
1924 np->get_tx_ctx->skb = NULL;
1674 } 1925 }
1675 } 1926 }
1676 nv_release_txskb(dev, i); 1927 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
1677 np->nic_tx++; 1928 np->get_tx.orig = np->first_tx.orig;
1929 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
1930 np->get_tx_ctx = np->first_tx_ctx;
1678 } 1931 }
1679 if (np->next_tx - np->nic_tx < np->tx_limit_start) 1932 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
1933 np->tx_stop = 0;
1680 netif_wake_queue(dev); 1934 netif_wake_queue(dev);
1935 }
1936}
1937
1938static void nv_tx_done_optimized(struct net_device *dev, int limit)
1939{
1940 struct fe_priv *np = netdev_priv(dev);
1941 u32 flags;
1942 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
1943
1944 while ((np->get_tx.ex != np->put_tx.ex) &&
1945 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
1946 (limit-- > 0)) {
1947
1948 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
1949 dev->name, flags);
1950
1951 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
1952 np->get_tx_ctx->dma_len,
1953 PCI_DMA_TODEVICE);
1954 np->get_tx_ctx->dma = 0;
1955
1956 if (flags & NV_TX2_LASTPACKET) {
1957 if (!(flags & NV_TX2_ERROR))
1958 np->stats.tx_packets++;
1959 dev_kfree_skb_any(np->get_tx_ctx->skb);
1960 np->get_tx_ctx->skb = NULL;
1961 }
1962 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
1963 np->get_tx.ex = np->first_tx.ex;
1964 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
1965 np->get_tx_ctx = np->first_tx_ctx;
1966 }
1967 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
1968 np->tx_stop = 0;
1969 netif_wake_queue(dev);
1970 }
1681} 1971}
1682 1972
1683/* 1973/*
@@ -1700,9 +1990,8 @@ static void nv_tx_timeout(struct net_device *dev)
1700 { 1990 {
1701 int i; 1991 int i;
1702 1992
1703 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", 1993 printk(KERN_INFO "%s: Ring at %lx\n",
1704 dev->name, (unsigned long)np->ring_addr, 1994 dev->name, (unsigned long)np->ring_addr);
1705 np->next_tx, np->nic_tx);
1706 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1995 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1707 for (i=0;i<=np->register_size;i+= 32) { 1996 for (i=0;i<=np->register_size;i+= 32) {
1708 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1997 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
@@ -1750,13 +2039,16 @@ static void nv_tx_timeout(struct net_device *dev)
1750 nv_stop_tx(dev); 2039 nv_stop_tx(dev);
1751 2040
1752 /* 2) check that the packets were not sent already: */ 2041 /* 2) check that the packets were not sent already: */
1753 nv_tx_done(dev); 2042 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2043 nv_tx_done(dev);
2044 else
2045 nv_tx_done_optimized(dev, np->tx_ring_size);
1754 2046
1755 /* 3) if there are dead entries: clear everything */ 2047 /* 3) if there are dead entries: clear everything */
1756 if (np->next_tx != np->nic_tx) { 2048 if (np->get_tx_ctx != np->put_tx_ctx) {
1757 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 2049 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1758 nv_drain_tx(dev); 2050 nv_drain_tx(dev);
1759 np->next_tx = np->nic_tx = 0; 2051 nv_init_tx(dev);
1760 setup_hw_rings(dev, NV_SETUP_TX_RING); 2052 setup_hw_rings(dev, NV_SETUP_TX_RING);
1761 netif_wake_queue(dev); 2053 netif_wake_queue(dev);
1762 } 2054 }
@@ -1823,40 +2115,27 @@ static int nv_rx_process(struct net_device *dev, int limit)
1823{ 2115{
1824 struct fe_priv *np = netdev_priv(dev); 2116 struct fe_priv *np = netdev_priv(dev);
1825 u32 flags; 2117 u32 flags;
1826 u32 vlanflags = 0; 2118 u32 rx_processed_cnt = 0;
1827 int count; 2119 struct sk_buff *skb;
1828 2120 int len;
1829 for (count = 0; count < limit; ++count) {
1830 struct sk_buff *skb;
1831 int len;
1832 int i;
1833 if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
1834 break; /* we scanned the whole ring - do not continue */
1835
1836 i = np->cur_rx % np->rx_ring_size;
1837 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1838 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
1839 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1840 } else {
1841 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
1842 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1843 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
1844 }
1845 2121
1846 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n", 2122 while((np->get_rx.orig != np->put_rx.orig) &&
1847 dev->name, np->cur_rx, flags); 2123 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2124 (rx_processed_cnt++ < limit)) {
1848 2125
1849 if (flags & NV_RX_AVAIL) 2126 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
1850 break; /* still owned by hardware, */ 2127 dev->name, flags);
1851 2128
1852 /* 2129 /*
1853 * the packet is for us - immediately tear down the pci mapping. 2130 * the packet is for us - immediately tear down the pci mapping.
1854 * TODO: check if a prefetch of the first cacheline improves 2131 * TODO: check if a prefetch of the first cacheline improves
1855 * the performance. 2132 * the performance.
1856 */ 2133 */
1857 pci_unmap_single(np->pci_dev, np->rx_dma[i], 2134 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
1858 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 2135 np->get_rx_ctx->dma_len,
1859 PCI_DMA_FROMDEVICE); 2136 PCI_DMA_FROMDEVICE);
2137 skb = np->get_rx_ctx->skb;
2138 np->get_rx_ctx->skb = NULL;
1860 2139
1861 { 2140 {
1862 int j; 2141 int j;
@@ -1864,123 +2143,228 @@ static int nv_rx_process(struct net_device *dev, int limit)
1864 for (j=0; j<64; j++) { 2143 for (j=0; j<64; j++) {
1865 if ((j%16) == 0) 2144 if ((j%16) == 0)
1866 dprintk("\n%03x:", j); 2145 dprintk("\n%03x:", j);
1867 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); 2146 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1868 } 2147 }
1869 dprintk("\n"); 2148 dprintk("\n");
1870 } 2149 }
1871 /* look at what we actually got: */ 2150 /* look at what we actually got: */
1872 if (np->desc_ver == DESC_VER_1) { 2151 if (np->desc_ver == DESC_VER_1) {
1873 if (!(flags & NV_RX_DESCRIPTORVALID)) 2152 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
1874 goto next_pkt; 2153 len = flags & LEN_MASK_V1;
1875 2154 if (unlikely(flags & NV_RX_ERROR)) {
1876 if (flags & NV_RX_ERROR) { 2155 if (flags & NV_RX_ERROR4) {
1877 if (flags & NV_RX_MISSEDFRAME) { 2156 len = nv_getlen(dev, skb->data, len);
1878 np->stats.rx_missed_errors++; 2157 if (len < 0) {
1879 np->stats.rx_errors++; 2158 np->stats.rx_errors++;
1880 goto next_pkt; 2159 dev_kfree_skb(skb);
1881 } 2160 goto next_pkt;
1882 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 2161 }
1883 np->stats.rx_errors++; 2162 }
1884 goto next_pkt; 2163 /* framing errors are soft errors */
1885 } 2164 else if (flags & NV_RX_FRAMINGERR) {
1886 if (flags & NV_RX_CRCERR) { 2165 if (flags & NV_RX_SUBSTRACT1) {
1887 np->stats.rx_crc_errors++; 2166 len--;
1888 np->stats.rx_errors++; 2167 }
1889 goto next_pkt; 2168 }
1890 } 2169 /* the rest are hard errors */
1891 if (flags & NV_RX_OVERFLOW) { 2170 else {
1892 np->stats.rx_over_errors++; 2171 if (flags & NV_RX_MISSEDFRAME)
1893 np->stats.rx_errors++; 2172 np->stats.rx_missed_errors++;
1894 goto next_pkt; 2173 if (flags & NV_RX_CRCERR)
2174 np->stats.rx_crc_errors++;
2175 if (flags & NV_RX_OVERFLOW)
2176 np->stats.rx_over_errors++;
2177 np->stats.rx_errors++;
2178 dev_kfree_skb(skb);
2179 goto next_pkt;
2180 }
1895 } 2181 }
1896 if (flags & NV_RX_ERROR4) { 2182 } else {
1897 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 2183 dev_kfree_skb(skb);
1898 if (len < 0) { 2184 goto next_pkt;
2185 }
2186 } else {
2187 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2188 len = flags & LEN_MASK_V2;
2189 if (unlikely(flags & NV_RX2_ERROR)) {
2190 if (flags & NV_RX2_ERROR4) {
2191 len = nv_getlen(dev, skb->data, len);
2192 if (len < 0) {
2193 np->stats.rx_errors++;
2194 dev_kfree_skb(skb);
2195 goto next_pkt;
2196 }
2197 }
2198 /* framing errors are soft errors */
2199 else if (flags & NV_RX2_FRAMINGERR) {
2200 if (flags & NV_RX2_SUBSTRACT1) {
2201 len--;
2202 }
2203 }
2204 /* the rest are hard errors */
2205 else {
2206 if (flags & NV_RX2_CRCERR)
2207 np->stats.rx_crc_errors++;
2208 if (flags & NV_RX2_OVERFLOW)
2209 np->stats.rx_over_errors++;
1899 np->stats.rx_errors++; 2210 np->stats.rx_errors++;
2211 dev_kfree_skb(skb);
1900 goto next_pkt; 2212 goto next_pkt;
1901 } 2213 }
1902 } 2214 }
1903 /* framing errors are soft errors. */ 2215 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
1904 if (flags & NV_RX_FRAMINGERR) { 2216 skb->ip_summed = CHECKSUM_UNNECESSARY;
1905 if (flags & NV_RX_SUBSTRACT1) { 2217 } else {
1906 len--; 2218 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2219 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2220 skb->ip_summed = CHECKSUM_UNNECESSARY;
1907 } 2221 }
1908 } 2222 }
1909 } 2223 } else {
1910 } else { 2224 dev_kfree_skb(skb);
1911 if (!(flags & NV_RX2_DESCRIPTORVALID))
1912 goto next_pkt; 2225 goto next_pkt;
2226 }
2227 }
2228 /* got a valid packet - forward it to the network core */
2229 skb_put(skb, len);
2230 skb->protocol = eth_type_trans(skb, dev);
2231 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2232 dev->name, len, skb->protocol);
2233#ifdef CONFIG_FORCEDETH_NAPI
2234 netif_receive_skb(skb);
2235#else
2236 netif_rx(skb);
2237#endif
2238 dev->last_rx = jiffies;
2239 np->stats.rx_packets++;
2240 np->stats.rx_bytes += len;
2241next_pkt:
2242 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2243 np->get_rx.orig = np->first_rx.orig;
2244 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2245 np->get_rx_ctx = np->first_rx_ctx;
2246 }
1913 2247
1914 if (flags & NV_RX2_ERROR) { 2248 return rx_processed_cnt;
1915 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 2249}
1916 np->stats.rx_errors++; 2250
1917 goto next_pkt; 2251static int nv_rx_process_optimized(struct net_device *dev, int limit)
1918 } 2252{
1919 if (flags & NV_RX2_CRCERR) { 2253 struct fe_priv *np = netdev_priv(dev);
1920 np->stats.rx_crc_errors++; 2254 u32 flags;
1921 np->stats.rx_errors++; 2255 u32 vlanflags = 0;
1922 goto next_pkt; 2256 u32 rx_processed_cnt = 0;
1923 } 2257 struct sk_buff *skb;
1924 if (flags & NV_RX2_OVERFLOW) { 2258 int len;
1925 np->stats.rx_over_errors++; 2259
1926 np->stats.rx_errors++; 2260 while((np->get_rx.ex != np->put_rx.ex) &&
1927 goto next_pkt; 2261 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
1928 } 2262 (rx_processed_cnt++ < limit)) {
2263
2264 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2265 dev->name, flags);
2266
2267 /*
2268 * the packet is for us - immediately tear down the pci mapping.
2269 * TODO: check if a prefetch of the first cacheline improves
2270 * the performance.
2271 */
2272 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2273 np->get_rx_ctx->dma_len,
2274 PCI_DMA_FROMDEVICE);
2275 skb = np->get_rx_ctx->skb;
2276 np->get_rx_ctx->skb = NULL;
2277
2278 {
2279 int j;
2280 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2281 for (j=0; j<64; j++) {
2282 if ((j%16) == 0)
2283 dprintk("\n%03x:", j);
2284 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2285 }
2286 dprintk("\n");
2287 }
2288 /* look at what we actually got: */
2289 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2290 len = flags & LEN_MASK_V2;
2291 if (unlikely(flags & NV_RX2_ERROR)) {
1929 if (flags & NV_RX2_ERROR4) { 2292 if (flags & NV_RX2_ERROR4) {
1930 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 2293 len = nv_getlen(dev, skb->data, len);
1931 if (len < 0) { 2294 if (len < 0) {
1932 np->stats.rx_errors++; 2295 dev_kfree_skb(skb);
1933 goto next_pkt; 2296 goto next_pkt;
1934 } 2297 }
1935 } 2298 }
1936 /* framing errors are soft errors */ 2299 /* framing errors are soft errors */
1937 if (flags & NV_RX2_FRAMINGERR) { 2300 else if (flags & NV_RX2_FRAMINGERR) {
1938 if (flags & NV_RX2_SUBSTRACT1) { 2301 if (flags & NV_RX2_SUBSTRACT1) {
1939 len--; 2302 len--;
1940 } 2303 }
1941 } 2304 }
2305 /* the rest are hard errors */
2306 else {
2307 dev_kfree_skb(skb);
2308 goto next_pkt;
2309 }
1942 } 2310 }
1943 if (np->rx_csum) { 2311
1944 flags &= NV_RX2_CHECKSUMMASK; 2312 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
1945 if (flags == NV_RX2_CHECKSUMOK1 || 2313 skb->ip_summed = CHECKSUM_UNNECESSARY;
1946 flags == NV_RX2_CHECKSUMOK2 || 2314 } else {
1947 flags == NV_RX2_CHECKSUMOK3) { 2315 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
1948 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 2316 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
1949 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 2317 skb->ip_summed = CHECKSUM_UNNECESSARY;
1950 } else {
1951 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
1952 } 2318 }
1953 } 2319 }
1954 }
1955 /* got a valid packet - forward it to the network core */
1956 skb = np->rx_skbuff[i];
1957 np->rx_skbuff[i] = NULL;
1958 2320
1959 skb_put(skb, len); 2321 /* got a valid packet - forward it to the network core */
1960 skb->protocol = eth_type_trans(skb, dev); 2322 skb_put(skb, len);
1961 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 2323 skb->protocol = eth_type_trans(skb, dev);
1962 dev->name, np->cur_rx, len, skb->protocol); 2324 prefetch(skb->data);
2325
2326 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2327 dev->name, len, skb->protocol);
2328
2329 if (likely(!np->vlangrp)) {
1963#ifdef CONFIG_FORCEDETH_NAPI 2330#ifdef CONFIG_FORCEDETH_NAPI
1964 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) 2331 netif_receive_skb(skb);
1965 vlan_hwaccel_receive_skb(skb, np->vlangrp,
1966 vlanflags & NV_RX3_VLAN_TAG_MASK);
1967 else
1968 netif_receive_skb(skb);
1969#else 2332#else
1970 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) 2333 netif_rx(skb);
1971 vlan_hwaccel_rx(skb, np->vlangrp,
1972 vlanflags & NV_RX3_VLAN_TAG_MASK);
1973 else
1974 netif_rx(skb);
1975#endif 2334#endif
1976 dev->last_rx = jiffies; 2335 } else {
1977 np->stats.rx_packets++; 2336 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
1978 np->stats.rx_bytes += len; 2337 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2338#ifdef CONFIG_FORCEDETH_NAPI
2339 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2340 vlanflags & NV_RX3_VLAN_TAG_MASK);
2341#else
2342 vlan_hwaccel_rx(skb, np->vlangrp,
2343 vlanflags & NV_RX3_VLAN_TAG_MASK);
2344#endif
2345 } else {
2346#ifdef CONFIG_FORCEDETH_NAPI
2347 netif_receive_skb(skb);
2348#else
2349 netif_rx(skb);
2350#endif
2351 }
2352 }
2353
2354 dev->last_rx = jiffies;
2355 np->stats.rx_packets++;
2356 np->stats.rx_bytes += len;
2357 } else {
2358 dev_kfree_skb(skb);
2359 }
1979next_pkt: 2360next_pkt:
1980 np->cur_rx++; 2361 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2362 np->get_rx.ex = np->first_rx.ex;
2363 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2364 np->get_rx_ctx = np->first_rx_ctx;
1981 } 2365 }
1982 2366
1983 return count; 2367 return rx_processed_cnt;
1984} 2368}
1985 2369
1986static void set_bufsize(struct net_device *dev) 2370static void set_bufsize(struct net_device *dev)
@@ -2456,7 +2840,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2456 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2840 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2457 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 2841 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2458 } 2842 }
2459 pci_push(base);
2460 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2843 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2461 if (!(events & np->irqmask)) 2844 if (!(events & np->irqmask))
2462 break; 2845 break;
@@ -2465,22 +2848,46 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2465 nv_tx_done(dev); 2848 nv_tx_done(dev);
2466 spin_unlock(&np->lock); 2849 spin_unlock(&np->lock);
2467 2850
2468 if (events & NVREG_IRQ_LINK) { 2851#ifdef CONFIG_FORCEDETH_NAPI
2852 if (events & NVREG_IRQ_RX_ALL) {
2853 netif_rx_schedule(dev);
2854
2855 /* Disable furthur receive irq's */
2856 spin_lock(&np->lock);
2857 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2858
2859 if (np->msi_flags & NV_MSI_X_ENABLED)
2860 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2861 else
2862 writel(np->irqmask, base + NvRegIrqMask);
2863 spin_unlock(&np->lock);
2864 }
2865#else
2866 if (nv_rx_process(dev, dev->weight)) {
2867 if (unlikely(nv_alloc_rx(dev))) {
2868 spin_lock(&np->lock);
2869 if (!np->in_shutdown)
2870 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2871 spin_unlock(&np->lock);
2872 }
2873 }
2874#endif
2875 if (unlikely(events & NVREG_IRQ_LINK)) {
2469 spin_lock(&np->lock); 2876 spin_lock(&np->lock);
2470 nv_link_irq(dev); 2877 nv_link_irq(dev);
2471 spin_unlock(&np->lock); 2878 spin_unlock(&np->lock);
2472 } 2879 }
2473 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2880 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
2474 spin_lock(&np->lock); 2881 spin_lock(&np->lock);
2475 nv_linkchange(dev); 2882 nv_linkchange(dev);
2476 spin_unlock(&np->lock); 2883 spin_unlock(&np->lock);
2477 np->link_timeout = jiffies + LINK_TIMEOUT; 2884 np->link_timeout = jiffies + LINK_TIMEOUT;
2478 } 2885 }
2479 if (events & (NVREG_IRQ_TX_ERR)) { 2886 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
2480 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2887 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2481 dev->name, events); 2888 dev->name, events);
2482 } 2889 }
2483 if (events & (NVREG_IRQ_UNKNOWN)) { 2890 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
2484 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2891 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2485 dev->name, events); 2892 dev->name, events);
2486 } 2893 }
@@ -2501,6 +2908,63 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2501 spin_unlock(&np->lock); 2908 spin_unlock(&np->lock);
2502 break; 2909 break;
2503 } 2910 }
2911 if (unlikely(i > max_interrupt_work)) {
2912 spin_lock(&np->lock);
2913 /* disable interrupts on the nic */
2914 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2915 writel(0, base + NvRegIrqMask);
2916 else
2917 writel(np->irqmask, base + NvRegIrqMask);
2918 pci_push(base);
2919
2920 if (!np->in_shutdown) {
2921 np->nic_poll_irq = np->irqmask;
2922 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2923 }
2924 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
2925 spin_unlock(&np->lock);
2926 break;
2927 }
2928
2929 }
2930 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
2931
2932 return IRQ_RETVAL(i);
2933}
2934
2935#define TX_WORK_PER_LOOP 64
2936#define RX_WORK_PER_LOOP 64
2937/**
2938 * All _optimized functions are used to help increase performance
2939 * (reduce CPU and increase throughput). They use descripter version 3,
2940 * compiler directives, and reduce memory accesses.
2941 */
2942static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2943{
2944 struct net_device *dev = (struct net_device *) data;
2945 struct fe_priv *np = netdev_priv(dev);
2946 u8 __iomem *base = get_hwbase(dev);
2947 u32 events;
2948 int i;
2949
2950 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
2951
2952 for (i=0; ; i++) {
2953 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2954 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2955 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2956 } else {
2957 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2958 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2959 }
2960 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2961 if (!(events & np->irqmask))
2962 break;
2963
2964 spin_lock(&np->lock);
2965 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
2966 spin_unlock(&np->lock);
2967
2504#ifdef CONFIG_FORCEDETH_NAPI 2968#ifdef CONFIG_FORCEDETH_NAPI
2505 if (events & NVREG_IRQ_RX_ALL) { 2969 if (events & NVREG_IRQ_RX_ALL) {
2506 netif_rx_schedule(dev); 2970 netif_rx_schedule(dev);
@@ -2516,15 +2980,53 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2516 spin_unlock(&np->lock); 2980 spin_unlock(&np->lock);
2517 } 2981 }
2518#else 2982#else
2519 nv_rx_process(dev, dev->weight); 2983 if (nv_rx_process_optimized(dev, dev->weight)) {
2520 if (nv_alloc_rx(dev)) { 2984 if (unlikely(nv_alloc_rx_optimized(dev))) {
2985 spin_lock(&np->lock);
2986 if (!np->in_shutdown)
2987 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2988 spin_unlock(&np->lock);
2989 }
2990 }
2991#endif
2992 if (unlikely(events & NVREG_IRQ_LINK)) {
2521 spin_lock(&np->lock); 2993 spin_lock(&np->lock);
2522 if (!np->in_shutdown) 2994 nv_link_irq(dev);
2523 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2524 spin_unlock(&np->lock); 2995 spin_unlock(&np->lock);
2525 } 2996 }
2526#endif 2997 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
2527 if (i > max_interrupt_work) { 2998 spin_lock(&np->lock);
2999 nv_linkchange(dev);
3000 spin_unlock(&np->lock);
3001 np->link_timeout = jiffies + LINK_TIMEOUT;
3002 }
3003 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3004 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3005 dev->name, events);
3006 }
3007 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3008 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3009 dev->name, events);
3010 }
3011 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3012 spin_lock(&np->lock);
3013 /* disable interrupts on the nic */
3014 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3015 writel(0, base + NvRegIrqMask);
3016 else
3017 writel(np->irqmask, base + NvRegIrqMask);
3018 pci_push(base);
3019
3020 if (!np->in_shutdown) {
3021 np->nic_poll_irq = np->irqmask;
3022 np->recover_error = 1;
3023 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3024 }
3025 spin_unlock(&np->lock);
3026 break;
3027 }
3028
3029 if (unlikely(i > max_interrupt_work)) {
2528 spin_lock(&np->lock); 3030 spin_lock(&np->lock);
2529 /* disable interrupts on the nic */ 3031 /* disable interrupts on the nic */
2530 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3032 if (!(np->msi_flags & NV_MSI_X_ENABLED))
@@ -2543,7 +3045,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2543 } 3045 }
2544 3046
2545 } 3047 }
2546 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3048 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
2547 3049
2548 return IRQ_RETVAL(i); 3050 return IRQ_RETVAL(i);
2549} 3051}
@@ -2562,20 +3064,19 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
2562 for (i=0; ; i++) { 3064 for (i=0; ; i++) {
2563 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3065 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
2564 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3066 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
2565 pci_push(base);
2566 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3067 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
2567 if (!(events & np->irqmask)) 3068 if (!(events & np->irqmask))
2568 break; 3069 break;
2569 3070
2570 spin_lock_irqsave(&np->lock, flags); 3071 spin_lock_irqsave(&np->lock, flags);
2571 nv_tx_done(dev); 3072 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
2572 spin_unlock_irqrestore(&np->lock, flags); 3073 spin_unlock_irqrestore(&np->lock, flags);
2573 3074
2574 if (events & (NVREG_IRQ_TX_ERR)) { 3075 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
2575 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3076 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2576 dev->name, events); 3077 dev->name, events);
2577 } 3078 }
2578 if (i > max_interrupt_work) { 3079 if (unlikely(i > max_interrupt_work)) {
2579 spin_lock_irqsave(&np->lock, flags); 3080 spin_lock_irqsave(&np->lock, flags);
2580 /* disable interrupts on the nic */ 3081 /* disable interrupts on the nic */
2581 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3082 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
@@ -2604,7 +3105,10 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
2604 u8 __iomem *base = get_hwbase(dev); 3105 u8 __iomem *base = get_hwbase(dev);
2605 unsigned long flags; 3106 unsigned long flags;
2606 3107
2607 pkts = nv_rx_process(dev, limit); 3108 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
3109 pkts = nv_rx_process(dev, limit);
3110 else
3111 pkts = nv_rx_process_optimized(dev, limit);
2608 3112
2609 if (nv_alloc_rx(dev)) { 3113 if (nv_alloc_rx(dev)) {
2610 spin_lock_irqsave(&np->lock, flags); 3114 spin_lock_irqsave(&np->lock, flags);
@@ -2670,20 +3174,20 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
2670 for (i=0; ; i++) { 3174 for (i=0; ; i++) {
2671 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3175 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2672 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3176 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2673 pci_push(base);
2674 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3177 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
2675 if (!(events & np->irqmask)) 3178 if (!(events & np->irqmask))
2676 break; 3179 break;
2677 3180
2678 nv_rx_process(dev, dev->weight); 3181 if (nv_rx_process_optimized(dev, dev->weight)) {
2679 if (nv_alloc_rx(dev)) { 3182 if (unlikely(nv_alloc_rx_optimized(dev))) {
2680 spin_lock_irqsave(&np->lock, flags); 3183 spin_lock_irqsave(&np->lock, flags);
2681 if (!np->in_shutdown) 3184 if (!np->in_shutdown)
2682 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3185 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2683 spin_unlock_irqrestore(&np->lock, flags); 3186 spin_unlock_irqrestore(&np->lock, flags);
3187 }
2684 } 3188 }
2685 3189
2686 if (i > max_interrupt_work) { 3190 if (unlikely(i > max_interrupt_work)) {
2687 spin_lock_irqsave(&np->lock, flags); 3191 spin_lock_irqsave(&np->lock, flags);
2688 /* disable interrupts on the nic */ 3192 /* disable interrupts on the nic */
2689 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3193 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
@@ -2718,11 +3222,15 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
2718 for (i=0; ; i++) { 3222 for (i=0; ; i++) {
2719 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3223 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
2720 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3224 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
2721 pci_push(base);
2722 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3225 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2723 if (!(events & np->irqmask)) 3226 if (!(events & np->irqmask))
2724 break; 3227 break;
2725 3228
3229 /* check tx in case we reached max loop limit in tx isr */
3230 spin_lock_irqsave(&np->lock, flags);
3231 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3232 spin_unlock_irqrestore(&np->lock, flags);
3233
2726 if (events & NVREG_IRQ_LINK) { 3234 if (events & NVREG_IRQ_LINK) {
2727 spin_lock_irqsave(&np->lock, flags); 3235 spin_lock_irqsave(&np->lock, flags);
2728 nv_link_irq(dev); 3236 nv_link_irq(dev);
@@ -2752,7 +3260,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
2752 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3260 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2753 dev->name, events); 3261 dev->name, events);
2754 } 3262 }
2755 if (i > max_interrupt_work) { 3263 if (unlikely(i > max_interrupt_work)) {
2756 spin_lock_irqsave(&np->lock, flags); 3264 spin_lock_irqsave(&np->lock, flags);
2757 /* disable interrupts on the nic */ 3265 /* disable interrupts on the nic */
2758 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3266 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
@@ -2835,6 +3343,16 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2835 u8 __iomem *base = get_hwbase(dev); 3343 u8 __iomem *base = get_hwbase(dev);
2836 int ret = 1; 3344 int ret = 1;
2837 int i; 3345 int i;
3346 irqreturn_t (*handler)(int foo, void *data);
3347
3348 if (intr_test) {
3349 handler = nv_nic_irq_test;
3350 } else {
3351 if (np->desc_ver == DESC_VER_3)
3352 handler = nv_nic_irq_optimized;
3353 else
3354 handler = nv_nic_irq;
3355 }
2838 3356
2839 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3357 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2840 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3358 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
@@ -2872,10 +3390,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2872 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3390 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2873 } else { 3391 } else {
2874 /* Request irq for all interrupts */ 3392 /* Request irq for all interrupts */
2875 if ((!intr_test && 3393 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
2876 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
2877 (intr_test &&
2878 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
2879 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3394 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2880 pci_disable_msix(np->pci_dev); 3395 pci_disable_msix(np->pci_dev);
2881 np->msi_flags &= ~NV_MSI_X_ENABLED; 3396 np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -2891,8 +3406,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2891 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3406 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2892 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3407 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2893 np->msi_flags |= NV_MSI_ENABLED; 3408 np->msi_flags |= NV_MSI_ENABLED;
2894 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 3409 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
2895 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
2896 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3410 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2897 pci_disable_msi(np->pci_dev); 3411 pci_disable_msi(np->pci_dev);
2898 np->msi_flags &= ~NV_MSI_ENABLED; 3412 np->msi_flags &= ~NV_MSI_ENABLED;
@@ -2907,8 +3421,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2907 } 3421 }
2908 } 3422 }
2909 if (ret != 0) { 3423 if (ret != 0) {
2910 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 3424 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
2911 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0))
2912 goto out_err; 3425 goto out_err;
2913 3426
2914 } 3427 }
@@ -3051,47 +3564,8 @@ static void nv_do_stats_poll(unsigned long data)
3051{ 3564{
3052 struct net_device *dev = (struct net_device *) data; 3565 struct net_device *dev = (struct net_device *) data;
3053 struct fe_priv *np = netdev_priv(dev); 3566 struct fe_priv *np = netdev_priv(dev);
3054 u8 __iomem *base = get_hwbase(dev);
3055 3567
3056 np->estats.tx_bytes += readl(base + NvRegTxCnt); 3568 nv_get_hw_stats(dev);
3057 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
3058 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
3059 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
3060 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
3061 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
3062 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
3063 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
3064 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
3065 np->estats.tx_deferral += readl(base + NvRegTxDef);
3066 np->estats.tx_packets += readl(base + NvRegTxFrame);
3067 np->estats.tx_pause += readl(base + NvRegTxPause);
3068 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
3069 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
3070 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
3071 np->estats.rx_runt += readl(base + NvRegRxRunt);
3072 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
3073 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
3074 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
3075 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
3076 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
3077 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
3078 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
3079 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
3080 np->estats.rx_bytes += readl(base + NvRegRxCnt);
3081 np->estats.rx_pause += readl(base + NvRegRxPause);
3082 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
3083 np->estats.rx_packets =
3084 np->estats.rx_unicast +
3085 np->estats.rx_multicast +
3086 np->estats.rx_broadcast;
3087 np->estats.rx_errors_total =
3088 np->estats.rx_crc_errors +
3089 np->estats.rx_over_errors +
3090 np->estats.rx_frame_error +
3091 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
3092 np->estats.rx_late_collision +
3093 np->estats.rx_runt +
3094 np->estats.rx_frame_too_long;
3095 3569
3096 if (!np->in_shutdown) 3570 if (!np->in_shutdown)
3097 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 3571 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
@@ -3465,7 +3939,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3465{ 3939{
3466 struct fe_priv *np = netdev_priv(dev); 3940 struct fe_priv *np = netdev_priv(dev);
3467 u8 __iomem *base = get_hwbase(dev); 3941 u8 __iomem *base = get_hwbase(dev);
3468 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; 3942 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
3469 dma_addr_t ring_addr; 3943 dma_addr_t ring_addr;
3470 3944
3471 if (ring->rx_pending < RX_RING_MIN || 3945 if (ring->rx_pending < RX_RING_MIN ||
@@ -3491,12 +3965,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3491 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 3965 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
3492 &ring_addr); 3966 &ring_addr);
3493 } 3967 }
3494 rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); 3968 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
3495 rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); 3969 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
3496 tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); 3970 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
3497 tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
3498 tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
3499 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3500 /* fall back to old rings */ 3971 /* fall back to old rings */
3501 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3972 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3502 if (rxtx_ring) 3973 if (rxtx_ring)
@@ -3509,14 +3980,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3509 } 3980 }
3510 if (rx_skbuff) 3981 if (rx_skbuff)
3511 kfree(rx_skbuff); 3982 kfree(rx_skbuff);
3512 if (rx_dma)
3513 kfree(rx_dma);
3514 if (tx_skbuff) 3983 if (tx_skbuff)
3515 kfree(tx_skbuff); 3984 kfree(tx_skbuff);
3516 if (tx_dma)
3517 kfree(tx_dma);
3518 if (tx_dma_len)
3519 kfree(tx_dma_len);
3520 goto exit; 3985 goto exit;
3521 } 3986 }
3522 3987
@@ -3538,8 +4003,6 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3538 /* set new values */ 4003 /* set new values */
3539 np->rx_ring_size = ring->rx_pending; 4004 np->rx_ring_size = ring->rx_pending;
3540 np->tx_ring_size = ring->tx_pending; 4005 np->tx_ring_size = ring->tx_pending;
3541 np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
3542 np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
3543 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4006 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3544 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4007 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
3545 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4008 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
@@ -3547,18 +4010,12 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3547 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4010 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
3548 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4011 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
3549 } 4012 }
3550 np->rx_skbuff = (struct sk_buff**)rx_skbuff; 4013 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
3551 np->rx_dma = (dma_addr_t*)rx_dma; 4014 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
3552 np->tx_skbuff = (struct sk_buff**)tx_skbuff;
3553 np->tx_dma = (dma_addr_t*)tx_dma;
3554 np->tx_dma_len = (unsigned int*)tx_dma_len;
3555 np->ring_addr = ring_addr; 4015 np->ring_addr = ring_addr;
3556 4016
3557 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 4017 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
3558 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 4018 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
3559 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
3560 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
3561 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
3562 4019
3563 if (netif_running(dev)) { 4020 if (netif_running(dev)) {
3564 /* reinit driver view of the queues */ 4021 /* reinit driver view of the queues */
@@ -3727,8 +4184,10 @@ static int nv_get_stats_count(struct net_device *dev)
3727{ 4184{
3728 struct fe_priv *np = netdev_priv(dev); 4185 struct fe_priv *np = netdev_priv(dev);
3729 4186
3730 if (np->driver_data & DEV_HAS_STATISTICS) 4187 if (np->driver_data & DEV_HAS_STATISTICS_V1)
3731 return sizeof(struct nv_ethtool_stats)/sizeof(u64); 4188 return NV_DEV_STATISTICS_V1_COUNT;
4189 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4190 return NV_DEV_STATISTICS_V2_COUNT;
3732 else 4191 else
3733 return 0; 4192 return 0;
3734} 4193}
@@ -3955,7 +4414,7 @@ static int nv_loopback_test(struct net_device *dev)
3955 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4414 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
3956 dev->name, len, pkt_len); 4415 dev->name, len, pkt_len);
3957 } else { 4416 } else {
3958 rx_skb = np->rx_skbuff[0]; 4417 rx_skb = np->rx_skb[0].skb;
3959 for (i = 0; i < pkt_len; i++) { 4418 for (i = 0; i < pkt_len; i++) {
3960 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4419 if (rx_skb->data[i] != (u8)(i & 0xff)) {
3961 ret = 0; 4420 ret = 0;
@@ -4315,7 +4774,7 @@ static int nv_open(struct net_device *dev)
4315 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4774 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4316 4775
4317 /* start statistics timer */ 4776 /* start statistics timer */
4318 if (np->driver_data & DEV_HAS_STATISTICS) 4777 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
4319 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 4778 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4320 4779
4321 spin_unlock_irq(&np->lock); 4780 spin_unlock_irq(&np->lock);
@@ -4412,7 +4871,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4412 if (err < 0) 4871 if (err < 0)
4413 goto out_disable; 4872 goto out_disable;
4414 4873
4415 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS)) 4874 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
4875 np->register_size = NV_PCI_REGSZ_VER3;
4876 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
4416 np->register_size = NV_PCI_REGSZ_VER2; 4877 np->register_size = NV_PCI_REGSZ_VER2;
4417 else 4878 else
4418 np->register_size = NV_PCI_REGSZ_VER1; 4879 np->register_size = NV_PCI_REGSZ_VER1;
@@ -4475,10 +4936,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4475 np->rx_csum = 1; 4936 np->rx_csum = 1;
4476 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4937 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4477 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4938 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4478#ifdef NETIF_F_TSO
4479 dev->features |= NETIF_F_TSO; 4939 dev->features |= NETIF_F_TSO;
4480#endif 4940 }
4481 }
4482 4941
4483 np->vlanctl_bits = 0; 4942 np->vlanctl_bits = 0;
4484 if (id->driver_data & DEV_HAS_VLAN) { 4943 if (id->driver_data & DEV_HAS_VLAN) {
@@ -4512,8 +4971,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4512 4971
4513 np->rx_ring_size = RX_RING_DEFAULT; 4972 np->rx_ring_size = RX_RING_DEFAULT;
4514 np->tx_ring_size = TX_RING_DEFAULT; 4973 np->tx_ring_size = TX_RING_DEFAULT;
4515 np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
4516 np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
4517 4974
4518 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4975 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4519 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 4976 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
@@ -4530,22 +4987,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4530 goto out_unmap; 4987 goto out_unmap;
4531 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4988 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4532 } 4989 }
4533 np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); 4990 np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
4534 np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); 4991 np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
4535 np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); 4992 if (!np->rx_skb || !np->tx_skb)
4536 np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
4537 np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
4538 if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
4539 goto out_freering; 4993 goto out_freering;
4540 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 4994 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4541 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 4995 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4542 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
4543 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
4544 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
4545 4996
4546 dev->open = nv_open; 4997 dev->open = nv_open;
4547 dev->stop = nv_close; 4998 dev->stop = nv_close;
4548 dev->hard_start_xmit = nv_start_xmit; 4999 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5000 dev->hard_start_xmit = nv_start_xmit;
5001 else
5002 dev->hard_start_xmit = nv_start_xmit_optimized;
4549 dev->get_stats = nv_get_stats; 5003 dev->get_stats = nv_get_stats;
4550 dev->change_mtu = nv_change_mtu; 5004 dev->change_mtu = nv_change_mtu;
4551 dev->set_mac_address = nv_set_mac_address; 5005 dev->set_mac_address = nv_set_mac_address;
@@ -4553,7 +5007,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4553#ifdef CONFIG_NET_POLL_CONTROLLER 5007#ifdef CONFIG_NET_POLL_CONTROLLER
4554 dev->poll_controller = nv_poll_controller; 5008 dev->poll_controller = nv_poll_controller;
4555#endif 5009#endif
4556 dev->weight = 64; 5010 dev->weight = RX_WORK_PER_LOOP;
4557#ifdef CONFIG_FORCEDETH_NAPI 5011#ifdef CONFIG_FORCEDETH_NAPI
4558 dev->poll = nv_napi_poll; 5012 dev->poll = nv_napi_poll;
4559#endif 5013#endif
@@ -4868,83 +5322,83 @@ static struct pci_device_id pci_tbl[] = {
4868 }, 5322 },
4869 { /* CK804 Ethernet Controller */ 5323 { /* CK804 Ethernet Controller */
4870 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 5324 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
4871 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5325 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4872 }, 5326 },
4873 { /* CK804 Ethernet Controller */ 5327 { /* CK804 Ethernet Controller */
4874 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 5328 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
4875 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5329 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4876 }, 5330 },
4877 { /* MCP04 Ethernet Controller */ 5331 { /* MCP04 Ethernet Controller */
4878 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 5332 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
4879 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5333 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4880 }, 5334 },
4881 { /* MCP04 Ethernet Controller */ 5335 { /* MCP04 Ethernet Controller */
4882 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 5336 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
4883 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5337 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4884 }, 5338 },
4885 { /* MCP51 Ethernet Controller */ 5339 { /* MCP51 Ethernet Controller */
4886 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 5340 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
4887 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, 5341 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
4888 }, 5342 },
4889 { /* MCP51 Ethernet Controller */ 5343 { /* MCP51 Ethernet Controller */
4890 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 5344 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
4891 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, 5345 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
4892 }, 5346 },
4893 { /* MCP55 Ethernet Controller */ 5347 { /* MCP55 Ethernet Controller */
4894 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 5348 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
4895 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5349 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4896 }, 5350 },
4897 { /* MCP55 Ethernet Controller */ 5351 { /* MCP55 Ethernet Controller */
4898 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 5352 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
4899 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5353 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4900 }, 5354 },
4901 { /* MCP61 Ethernet Controller */ 5355 { /* MCP61 Ethernet Controller */
4902 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5356 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
4903 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5357 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4904 }, 5358 },
4905 { /* MCP61 Ethernet Controller */ 5359 { /* MCP61 Ethernet Controller */
4906 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5360 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
4907 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5361 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4908 }, 5362 },
4909 { /* MCP61 Ethernet Controller */ 5363 { /* MCP61 Ethernet Controller */
4910 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5364 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
4911 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5365 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4912 }, 5366 },
4913 { /* MCP61 Ethernet Controller */ 5367 { /* MCP61 Ethernet Controller */
4914 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5368 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
4915 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5369 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4916 }, 5370 },
4917 { /* MCP65 Ethernet Controller */ 5371 { /* MCP65 Ethernet Controller */
4918 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5372 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
4919 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5373 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4920 }, 5374 },
4921 { /* MCP65 Ethernet Controller */ 5375 { /* MCP65 Ethernet Controller */
4922 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
4923 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4924 }, 5378 },
4925 { /* MCP65 Ethernet Controller */ 5379 { /* MCP65 Ethernet Controller */
4926 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
4927 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4928 }, 5382 },
4929 { /* MCP65 Ethernet Controller */ 5383 { /* MCP65 Ethernet Controller */
4930 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5384 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
4931 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5385 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4932 }, 5386 },
4933 { /* MCP67 Ethernet Controller */ 5387 { /* MCP67 Ethernet Controller */
4934 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5388 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
4935 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5389 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4936 }, 5390 },
4937 { /* MCP67 Ethernet Controller */ 5391 { /* MCP67 Ethernet Controller */
4938 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5392 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
4939 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5393 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4940 }, 5394 },
4941 { /* MCP67 Ethernet Controller */ 5395 { /* MCP67 Ethernet Controller */
4942 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5396 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
4943 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5397 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4944 }, 5398 },
4945 { /* MCP67 Ethernet Controller */ 5399 { /* MCP67 Ethernet Controller */
4946 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5400 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
4947 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5401 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4948 }, 5402 },
4949 {0,}, 5403 {0,},
4950}; 5404};
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 844c136e9920..7dc5185aa2c0 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -3034,7 +3034,7 @@ static int __init hp100_module_init(void)
3034 goto out2; 3034 goto out2;
3035#endif 3035#endif
3036#ifdef CONFIG_PCI 3036#ifdef CONFIG_PCI
3037 err = pci_module_init(&hp100_pci_driver); 3037 err = pci_register_driver(&hp100_pci_driver);
3038 if (err && err != -ENODEV) 3038 if (err && err != -ENODEV)
3039 goto out3; 3039 goto out3;
3040#endif 3040#endif
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index f4aba4355b19..cf30a1059ce0 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -61,9 +61,7 @@
61#include <net/pkt_sched.h> 61#include <net/pkt_sched.h>
62#include <linux/list.h> 62#include <linux/list.h>
63#include <linux/reboot.h> 63#include <linux/reboot.h>
64#ifdef NETIF_F_TSO
65#include <net/checksum.h> 64#include <net/checksum.h>
66#endif
67 65
68#include <linux/ethtool.h> 66#include <linux/ethtool.h>
69#include <linux/if_vlan.h> 67#include <linux/if_vlan.h>
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 82c044d6e08a..d6628bd9590a 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -82,10 +82,8 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
82 {"tx_restart_queue", IXGB_STAT(restart_queue) }, 82 {"tx_restart_queue", IXGB_STAT(restart_queue) },
83 {"rx_long_length_errors", IXGB_STAT(stats.roc)}, 83 {"rx_long_length_errors", IXGB_STAT(stats.roc)},
84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, 84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
85#ifdef NETIF_F_TSO
86 {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)}, 85 {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
87 {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)}, 86 {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
88#endif
89 {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)}, 87 {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
90 {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)}, 88 {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
91 {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)}, 89 {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
@@ -240,7 +238,6 @@ ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
240 return 0; 238 return 0;
241} 239}
242 240
243#ifdef NETIF_F_TSO
244static int 241static int
245ixgb_set_tso(struct net_device *netdev, uint32_t data) 242ixgb_set_tso(struct net_device *netdev, uint32_t data)
246{ 243{
@@ -250,7 +247,6 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
250 netdev->features &= ~NETIF_F_TSO; 247 netdev->features &= ~NETIF_F_TSO;
251 return 0; 248 return 0;
252} 249}
253#endif /* NETIF_F_TSO */
254 250
255static uint32_t 251static uint32_t
256ixgb_get_msglevel(struct net_device *netdev) 252ixgb_get_msglevel(struct net_device *netdev)
@@ -722,10 +718,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
722 .set_sg = ethtool_op_set_sg, 718 .set_sg = ethtool_op_set_sg,
723 .get_msglevel = ixgb_get_msglevel, 719 .get_msglevel = ixgb_get_msglevel,
724 .set_msglevel = ixgb_set_msglevel, 720 .set_msglevel = ixgb_set_msglevel,
725#ifdef NETIF_F_TSO
726 .get_tso = ethtool_op_get_tso, 721 .get_tso = ethtool_op_get_tso,
727 .set_tso = ixgb_set_tso, 722 .set_tso = ixgb_set_tso,
728#endif
729 .get_strings = ixgb_get_strings, 723 .get_strings = ixgb_get_strings,
730 .phys_id = ixgb_phys_id, 724 .phys_id = ixgb_phys_id,
731 .get_stats_count = ixgb_get_stats_count, 725 .get_stats_count = ixgb_get_stats_count,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index a083a9189230..51bd7e8ff0d6 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -456,9 +456,7 @@ ixgb_probe(struct pci_dev *pdev,
456 NETIF_F_HW_VLAN_TX | 456 NETIF_F_HW_VLAN_TX |
457 NETIF_F_HW_VLAN_RX | 457 NETIF_F_HW_VLAN_RX |
458 NETIF_F_HW_VLAN_FILTER; 458 NETIF_F_HW_VLAN_FILTER;
459#ifdef NETIF_F_TSO
460 netdev->features |= NETIF_F_TSO; 459 netdev->features |= NETIF_F_TSO;
461#endif
462#ifdef NETIF_F_LLTX 460#ifdef NETIF_F_LLTX
463 netdev->features |= NETIF_F_LLTX; 461 netdev->features |= NETIF_F_LLTX;
464#endif 462#endif
@@ -1176,7 +1174,6 @@ ixgb_watchdog(unsigned long data)
1176static int 1174static int
1177ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) 1175ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1178{ 1176{
1179#ifdef NETIF_F_TSO
1180 struct ixgb_context_desc *context_desc; 1177 struct ixgb_context_desc *context_desc;
1181 unsigned int i; 1178 unsigned int i;
1182 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1179 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
@@ -1233,7 +1230,6 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1233 1230
1234 return 1; 1231 return 1;
1235 } 1232 }
1236#endif
1237 1233
1238 return 0; 1234 return 0;
1239} 1235}
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 25b559b5d5ed..5eb7a3536f29 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -1046,6 +1046,14 @@ static int __devinit macb_probe(struct platform_device *pdev)
1046 1046
1047 spin_lock_init(&bp->lock); 1047 spin_lock_init(&bp->lock);
1048 1048
1049#if defined(CONFIG_ARCH_AT91)
1050 bp->pclk = clk_get(&pdev->dev, "macb_clk");
1051 if (IS_ERR(bp->pclk)) {
1052 dev_err(&pdev->dev, "failed to get macb_clk\n");
1053 goto err_out_free_dev;
1054 }
1055 clk_enable(bp->pclk);
1056#else
1049 bp->pclk = clk_get(&pdev->dev, "pclk"); 1057 bp->pclk = clk_get(&pdev->dev, "pclk");
1050 if (IS_ERR(bp->pclk)) { 1058 if (IS_ERR(bp->pclk)) {
1051 dev_err(&pdev->dev, "failed to get pclk\n"); 1059 dev_err(&pdev->dev, "failed to get pclk\n");
@@ -1059,6 +1067,7 @@ static int __devinit macb_probe(struct platform_device *pdev)
1059 1067
1060 clk_enable(bp->pclk); 1068 clk_enable(bp->pclk);
1061 clk_enable(bp->hclk); 1069 clk_enable(bp->hclk);
1070#endif
1062 1071
1063 bp->regs = ioremap(regs->start, regs->end - regs->start + 1); 1072 bp->regs = ioremap(regs->start, regs->end - regs->start + 1);
1064 if (!bp->regs) { 1073 if (!bp->regs) {
@@ -1119,9 +1128,17 @@ static int __devinit macb_probe(struct platform_device *pdev)
1119 1128
1120 pdata = pdev->dev.platform_data; 1129 pdata = pdev->dev.platform_data;
1121 if (pdata && pdata->is_rmii) 1130 if (pdata && pdata->is_rmii)
1131#if defined(CONFIG_ARCH_AT91)
1132 macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
1133#else
1122 macb_writel(bp, USRIO, 0); 1134 macb_writel(bp, USRIO, 0);
1135#endif
1123 else 1136 else
1137#if defined(CONFIG_ARCH_AT91)
1138 macb_writel(bp, USRIO, MACB_BIT(CLKEN));
1139#else
1124 macb_writel(bp, USRIO, MACB_BIT(MII)); 1140 macb_writel(bp, USRIO, MACB_BIT(MII));
1141#endif
1125 1142
1126 bp->tx_pending = DEF_TX_RING_PENDING; 1143 bp->tx_pending = DEF_TX_RING_PENDING;
1127 1144
@@ -1148,9 +1165,11 @@ err_out_free_irq:
1148err_out_iounmap: 1165err_out_iounmap:
1149 iounmap(bp->regs); 1166 iounmap(bp->regs);
1150err_out_disable_clocks: 1167err_out_disable_clocks:
1168#ifndef CONFIG_ARCH_AT91
1151 clk_disable(bp->hclk); 1169 clk_disable(bp->hclk);
1152 clk_disable(bp->pclk);
1153 clk_put(bp->hclk); 1170 clk_put(bp->hclk);
1171#endif
1172 clk_disable(bp->pclk);
1154err_out_put_pclk: 1173err_out_put_pclk:
1155 clk_put(bp->pclk); 1174 clk_put(bp->pclk);
1156err_out_free_dev: 1175err_out_free_dev:
@@ -1173,9 +1192,11 @@ static int __devexit macb_remove(struct platform_device *pdev)
1173 unregister_netdev(dev); 1192 unregister_netdev(dev);
1174 free_irq(dev->irq, dev); 1193 free_irq(dev->irq, dev);
1175 iounmap(bp->regs); 1194 iounmap(bp->regs);
1195#ifndef CONFIG_ARCH_AT91
1176 clk_disable(bp->hclk); 1196 clk_disable(bp->hclk);
1177 clk_disable(bp->pclk);
1178 clk_put(bp->hclk); 1197 clk_put(bp->hclk);
1198#endif
1199 clk_disable(bp->pclk);
1179 clk_put(bp->pclk); 1200 clk_put(bp->pclk);
1180 free_netdev(dev); 1201 free_netdev(dev);
1181 platform_set_drvdata(pdev, NULL); 1202 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/macb.h b/drivers/net/macb.h
index 27bf0ae0f0bb..b3bb2182edd1 100644
--- a/drivers/net/macb.h
+++ b/drivers/net/macb.h
@@ -200,7 +200,7 @@
200#define MACB_SOF_OFFSET 30 200#define MACB_SOF_OFFSET 30
201#define MACB_SOF_SIZE 2 201#define MACB_SOF_SIZE 2
202 202
203/* Bitfields in USRIO */ 203/* Bitfields in USRIO (AVR32) */
204#define MACB_MII_OFFSET 0 204#define MACB_MII_OFFSET 0
205#define MACB_MII_SIZE 1 205#define MACB_MII_SIZE 1
206#define MACB_EAM_OFFSET 1 206#define MACB_EAM_OFFSET 1
@@ -210,6 +210,12 @@
210#define MACB_TX_PAUSE_ZERO_OFFSET 3 210#define MACB_TX_PAUSE_ZERO_OFFSET 3
211#define MACB_TX_PAUSE_ZERO_SIZE 1 211#define MACB_TX_PAUSE_ZERO_SIZE 1
212 212
213/* Bitfields in USRIO (AT91) */
214#define MACB_RMII_OFFSET 0
215#define MACB_RMII_SIZE 1
216#define MACB_CLKEN_OFFSET 1
217#define MACB_CLKEN_SIZE 1
218
213/* Bitfields in WOL */ 219/* Bitfields in WOL */
214#define MACB_IP_OFFSET 0 220#define MACB_IP_OFFSET 0
215#define MACB_IP_SIZE 16 221#define MACB_IP_SIZE 16
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 2907cfb12ada..9ec24f0d5d68 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/crc32.h> 16#include <linux/crc32.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/bitrev.h>
18#include <asm/prom.h> 19#include <asm/prom.h>
19#include <asm/dbdma.h> 20#include <asm/dbdma.h>
20#include <asm/io.h> 21#include <asm/io.h>
@@ -74,7 +75,6 @@ struct mace_data {
74#define PRIV_BYTES (sizeof(struct mace_data) \ 75#define PRIV_BYTES (sizeof(struct mace_data) \
75 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd)) 76 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
76 77
77static int bitrev(int);
78static int mace_open(struct net_device *dev); 78static int mace_open(struct net_device *dev);
79static int mace_close(struct net_device *dev); 79static int mace_close(struct net_device *dev);
80static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 80static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
@@ -96,18 +96,6 @@ static void __mace_set_address(struct net_device *dev, void *addr);
96 */ 96 */
97static unsigned char *dummy_buf; 97static unsigned char *dummy_buf;
98 98
99/* Bit-reverse one byte of an ethernet hardware address. */
100static inline int
101bitrev(int b)
102{
103 int d = 0, i;
104
105 for (i = 0; i < 8; ++i, b >>= 1)
106 d = (d << 1) | (b & 1);
107 return d;
108}
109
110
111static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match) 99static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
112{ 100{
113 struct device_node *mace = macio_get_of_node(mdev); 101 struct device_node *mace = macio_get_of_node(mdev);
@@ -173,7 +161,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
173 161
174 rev = addr[0] == 0 && addr[1] == 0xA0; 162 rev = addr[0] == 0 && addr[1] == 0xA0;
175 for (j = 0; j < 6; ++j) { 163 for (j = 0; j < 6; ++j) {
176 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j]; 164 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
177 } 165 }
178 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | 166 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
179 in_8(&mp->mace->chipid_lo); 167 in_8(&mp->mace->chipid_lo);
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 464e4a6f3d5f..5d541e873041 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/crc32.h> 24#include <linux/crc32.h>
25#include <linux/bitrev.h>
25#include <asm/io.h> 26#include <asm/io.h>
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/irq.h> 28#include <asm/irq.h>
@@ -81,19 +82,6 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id);
81static irqreturn_t mace_dma_intr(int irq, void *dev_id); 82static irqreturn_t mace_dma_intr(int irq, void *dev_id);
82static void mace_tx_timeout(struct net_device *dev); 83static void mace_tx_timeout(struct net_device *dev);
83 84
84/* Bit-reverse one byte of an ethernet hardware address. */
85
86static int bitrev(int b)
87{
88 int d = 0, i;
89
90 for (i = 0; i < 8; ++i, b >>= 1) {
91 d = (d << 1) | (b & 1);
92 }
93
94 return d;
95}
96
97/* 85/*
98 * Load a receive DMA channel with a base address and ring length 86 * Load a receive DMA channel with a base address and ring length
99 */ 87 */
@@ -219,12 +207,12 @@ struct net_device *mace_probe(int unit)
219 addr = (void *)MACE_PROM; 207 addr = (void *)MACE_PROM;
220 208
221 for (j = 0; j < 6; ++j) { 209 for (j = 0; j < 6; ++j) {
222 u8 v=bitrev(addr[j<<4]); 210 u8 v = bitrev8(addr[j<<4]);
223 checksum ^= v; 211 checksum ^= v;
224 dev->dev_addr[j] = v; 212 dev->dev_addr[j] = v;
225 } 213 }
226 for (; j < 8; ++j) { 214 for (; j < 8; ++j) {
227 checksum ^= bitrev(addr[j<<4]); 215 checksum ^= bitrev8(addr[j<<4]);
228 } 216 }
229 217
230 if (checksum != 0xFF) { 218 if (checksum != 0xFF) {
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index 393d995f1919..24f6050fbf33 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -121,16 +121,12 @@ enum macsonic_type {
121 * For reversing the PROM address 121 * For reversing the PROM address
122 */ 122 */
123 123
124static unsigned char nibbletab[] = {0, 8, 4, 12, 2, 10, 6, 14,
125 1, 9, 5, 13, 3, 11, 7, 15};
126
127static inline void bit_reverse_addr(unsigned char addr[6]) 124static inline void bit_reverse_addr(unsigned char addr[6])
128{ 125{
129 int i; 126 int i;
130 127
131 for(i = 0; i < 6; i++) 128 for(i = 0; i < 6; i++)
132 addr[i] = ((nibbletab[addr[i] & 0xf] << 4) | 129 addr[i] = bitrev8(addr[i]);
133 nibbletab[(addr[i] >> 4) &0xf]);
134} 130}
135 131
136int __init macsonic_init(struct net_device* dev) 132int __init macsonic_init(struct net_device* dev)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 61cbd4a60446..030924fb1ab3 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1412,10 +1412,8 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
1412 .set_tx_csum = ethtool_op_set_tx_hw_csum, 1412 .set_tx_csum = ethtool_op_set_tx_hw_csum,
1413 .get_sg = ethtool_op_get_sg, 1413 .get_sg = ethtool_op_get_sg,
1414 .set_sg = ethtool_op_set_sg, 1414 .set_sg = ethtool_op_set_sg,
1415#ifdef NETIF_F_TSO
1416 .get_tso = ethtool_op_get_tso, 1415 .get_tso = ethtool_op_get_tso,
1417 .set_tso = ethtool_op_set_tso, 1416 .set_tso = ethtool_op_set_tso,
1418#endif
1419 .get_strings = myri10ge_get_strings, 1417 .get_strings = myri10ge_get_strings,
1420 .get_stats_count = myri10ge_get_stats_count, 1418 .get_stats_count = myri10ge_get_stats_count,
1421 .get_ethtool_stats = myri10ge_get_ethtool_stats, 1419 .get_ethtool_stats = myri10ge_get_ethtool_stats,
@@ -1975,13 +1973,11 @@ again:
1975 mss = 0; 1973 mss = 0;
1976 max_segments = MXGEFW_MAX_SEND_DESC; 1974 max_segments = MXGEFW_MAX_SEND_DESC;
1977 1975
1978#ifdef NETIF_F_TSO
1979 if (skb->len > (dev->mtu + ETH_HLEN)) { 1976 if (skb->len > (dev->mtu + ETH_HLEN)) {
1980 mss = skb_shinfo(skb)->gso_size; 1977 mss = skb_shinfo(skb)->gso_size;
1981 if (mss != 0) 1978 if (mss != 0)
1982 max_segments = MYRI10GE_MAX_SEND_DESC_TSO; 1979 max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
1983 } 1980 }
1984#endif /*NETIF_F_TSO */
1985 1981
1986 if ((unlikely(avail < max_segments))) { 1982 if ((unlikely(avail < max_segments))) {
1987 /* we are out of transmit resources */ 1983 /* we are out of transmit resources */
@@ -2013,7 +2009,6 @@ again:
2013 2009
2014 cum_len = 0; 2010 cum_len = 0;
2015 2011
2016#ifdef NETIF_F_TSO
2017 if (mss) { /* TSO */ 2012 if (mss) { /* TSO */
2018 /* this removes any CKSUM flag from before */ 2013 /* this removes any CKSUM flag from before */
2019 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST); 2014 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
@@ -2029,7 +2024,6 @@ again:
2029 * the checksum by parsing the header. */ 2024 * the checksum by parsing the header. */
2030 pseudo_hdr_offset = mss; 2025 pseudo_hdr_offset = mss;
2031 } else 2026 } else
2032#endif /*NETIF_F_TSO */
2033 /* Mark small packets, and pad out tiny packets */ 2027 /* Mark small packets, and pad out tiny packets */
2034 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) { 2028 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
2035 flags |= MXGEFW_FLAGS_SMALL; 2029 flags |= MXGEFW_FLAGS_SMALL;
@@ -2097,7 +2091,6 @@ again:
2097 seglen = len; 2091 seglen = len;
2098 flags_next = flags & ~MXGEFW_FLAGS_FIRST; 2092 flags_next = flags & ~MXGEFW_FLAGS_FIRST;
2099 cum_len_next = cum_len + seglen; 2093 cum_len_next = cum_len + seglen;
2100#ifdef NETIF_F_TSO
2101 if (mss) { /* TSO */ 2094 if (mss) { /* TSO */
2102 (req - rdma_count)->rdma_count = rdma_count + 1; 2095 (req - rdma_count)->rdma_count = rdma_count + 1;
2103 2096
@@ -2124,7 +2117,6 @@ again:
2124 (small * MXGEFW_FLAGS_SMALL); 2117 (small * MXGEFW_FLAGS_SMALL);
2125 } 2118 }
2126 } 2119 }
2127#endif /* NETIF_F_TSO */
2128 req->addr_high = high_swapped; 2120 req->addr_high = high_swapped;
2129 req->addr_low = htonl(low); 2121 req->addr_low = htonl(low);
2130 req->pseudo_hdr_offset = htons(pseudo_hdr_offset); 2122 req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
@@ -2161,14 +2153,12 @@ again:
2161 } 2153 }
2162 2154
2163 (req - rdma_count)->rdma_count = rdma_count; 2155 (req - rdma_count)->rdma_count = rdma_count;
2164#ifdef NETIF_F_TSO
2165 if (mss) 2156 if (mss)
2166 do { 2157 do {
2167 req--; 2158 req--;
2168 req->flags |= MXGEFW_FLAGS_TSO_LAST; 2159 req->flags |= MXGEFW_FLAGS_TSO_LAST;
2169 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | 2160 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
2170 MXGEFW_FLAGS_FIRST))); 2161 MXGEFW_FLAGS_FIRST)));
2171#endif
2172 idx = ((count - 1) + tx->req) & tx->mask; 2162 idx = ((count - 1) + tx->req) & tx->mask;
2173 tx->info[idx].last = 1; 2163 tx->info[idx].last = 1;
2174 if (tx->wc_fifo == NULL) 2164 if (tx->wc_fifo == NULL)
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index e8598b809228..3f3896e98879 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -63,11 +63,14 @@
63 63
64#include "netxen_nic_hw.h" 64#include "netxen_nic_hw.h"
65 65
66#define NETXEN_NIC_BUILD_NO "2"
67#define _NETXEN_NIC_LINUX_MAJOR 3 66#define _NETXEN_NIC_LINUX_MAJOR 3
68#define _NETXEN_NIC_LINUX_MINOR 3 67#define _NETXEN_NIC_LINUX_MINOR 3
69#define _NETXEN_NIC_LINUX_SUBVERSION 3 68#define _NETXEN_NIC_LINUX_SUBVERSION 3
70#define NETXEN_NIC_LINUX_VERSIONID "3.3.3" "-" NETXEN_NIC_BUILD_NO 69#define NETXEN_NIC_LINUX_VERSIONID "3.3.3"
70
71#define NUM_FLASH_SECTORS (64)
72#define FLASH_SECTOR_SIZE (64 * 1024)
73#define FLASH_TOTAL_SIZE (NUM_FLASH_SECTORS * FLASH_SECTOR_SIZE)
71 74
72#define RCV_DESC_RINGSIZE \ 75#define RCV_DESC_RINGSIZE \
73 (sizeof(struct rcv_desc) * adapter->max_rx_desc_count) 76 (sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
@@ -85,6 +88,7 @@
85#define NETXEN_RCV_PRODUCER_OFFSET 0 88#define NETXEN_RCV_PRODUCER_OFFSET 0
86#define NETXEN_RCV_PEG_DB_ID 2 89#define NETXEN_RCV_PEG_DB_ID 2
87#define NETXEN_HOST_DUMMY_DMA_SIZE 1024 90#define NETXEN_HOST_DUMMY_DMA_SIZE 1024
91#define FLASH_SUCCESS 0
88 92
89#define ADDR_IN_WINDOW1(off) \ 93#define ADDR_IN_WINDOW1(off) \
90 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 94 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
@@ -1028,6 +1032,15 @@ void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
1028void netxen_load_firmware(struct netxen_adapter *adapter); 1032void netxen_load_firmware(struct netxen_adapter *adapter);
1029int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); 1033int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
1030int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); 1034int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
1035int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
1036 u8 *bytes, size_t size);
1037int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
1038 u8 *bytes, size_t size);
1039int netxen_flash_unlock(struct netxen_adapter *adapter);
1040int netxen_backup_crbinit(struct netxen_adapter *adapter);
1041int netxen_flash_erase_secondary(struct netxen_adapter *adapter);
1042int netxen_flash_erase_primary(struct netxen_adapter *adapter);
1043
1031int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data); 1044int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data);
1032int netxen_rom_se(struct netxen_adapter *adapter, int addr); 1045int netxen_rom_se(struct netxen_adapter *adapter, int addr);
1033int netxen_do_rom_se(struct netxen_adapter *adapter, int addr); 1046int netxen_do_rom_se(struct netxen_adapter *adapter, int addr);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index c381d77a7336..cc0efe213e01 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/delay.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36#include <linux/pci.h> 37#include <linux/pci.h>
37#include <asm/io.h> 38#include <asm/io.h>
@@ -94,17 +95,7 @@ static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
94 95
95static int netxen_nic_get_eeprom_len(struct net_device *dev) 96static int netxen_nic_get_eeprom_len(struct net_device *dev)
96{ 97{
97 struct netxen_port *port = netdev_priv(dev); 98 return FLASH_TOTAL_SIZE;
98 struct netxen_adapter *adapter = port->adapter;
99 int n;
100
101 if ((netxen_rom_fast_read(adapter, 0, &n) == 0)
102 && (n & NETXEN_ROM_ROUNDUP)) {
103 n &= ~NETXEN_ROM_ROUNDUP;
104 if (n < NETXEN_MAX_EEPROM_LEN)
105 return n;
106 }
107 return 0;
108} 99}
109 100
110static void 101static void
@@ -440,18 +431,92 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
440 struct netxen_port *port = netdev_priv(dev); 431 struct netxen_port *port = netdev_priv(dev);
441 struct netxen_adapter *adapter = port->adapter; 432 struct netxen_adapter *adapter = port->adapter;
442 int offset; 433 int offset;
434 int ret;
443 435
444 if (eeprom->len == 0) 436 if (eeprom->len == 0)
445 return -EINVAL; 437 return -EINVAL;
446 438
447 eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16); 439 eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16);
448 for (offset = 0; offset < eeprom->len; offset++) 440 offset = eeprom->offset;
449 if (netxen_rom_fast_read 441
450 (adapter, (8 * offset) + 8, (int *)eeprom->data) == -1) 442 ret = netxen_rom_fast_read_words(adapter, offset, bytes,
451 return -EIO; 443 eeprom->len);
444 if (ret < 0)
445 return ret;
446
452 return 0; 447 return 0;
453} 448}
454 449
450static int
451netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
452 u8 * bytes)
453{
454 struct netxen_port *port = netdev_priv(dev);
455 struct netxen_adapter *adapter = port->adapter;
456 int offset = eeprom->offset;
457 static int flash_start;
458 static int ready_to_flash;
459 int ret;
460
461 if (flash_start == 0) {
462 ret = netxen_flash_unlock(adapter);
463 if (ret < 0) {
464 printk(KERN_ERR "%s: Flash unlock failed.\n",
465 netxen_nic_driver_name);
466 return ret;
467 }
468 printk(KERN_INFO "%s: flash unlocked. \n",
469 netxen_nic_driver_name);
470 ret = netxen_flash_erase_secondary(adapter);
471 if (ret != FLASH_SUCCESS) {
472 printk(KERN_ERR "%s: Flash erase failed.\n",
473 netxen_nic_driver_name);
474 return ret;
475 }
476 printk(KERN_INFO "%s: secondary flash erased successfully.\n",
477 netxen_nic_driver_name);
478 flash_start = 1;
479 return 0;
480 }
481
482 if (offset == BOOTLD_START) {
483 ret = netxen_flash_erase_primary(adapter);
484 if (ret != FLASH_SUCCESS) {
485 printk(KERN_ERR "%s: Flash erase failed.\n",
486 netxen_nic_driver_name);
487 return ret;
488 }
489
490 ret = netxen_rom_se(adapter, USER_START);
491 if (ret != FLASH_SUCCESS)
492 return ret;
493 ret = netxen_rom_se(adapter, FIXED_START);
494 if (ret != FLASH_SUCCESS)
495 return ret;
496
497 printk(KERN_INFO "%s: primary flash erased successfully\n",
498 netxen_nic_driver_name);
499
500 ret = netxen_backup_crbinit(adapter);
501 if (ret != FLASH_SUCCESS) {
502 printk(KERN_ERR "%s: CRBinit backup failed.\n",
503 netxen_nic_driver_name);
504 return ret;
505 }
506 printk(KERN_INFO "%s: CRBinit backup done.\n",
507 netxen_nic_driver_name);
508 ready_to_flash = 1;
509 }
510
511 if (!ready_to_flash) {
512 printk(KERN_ERR "%s: Invalid write sequence, returning...\n",
513 netxen_nic_driver_name);
514 return -EINVAL;
515 }
516
517 return netxen_rom_fast_write_words(adapter, offset, bytes, eeprom->len);
518}
519
455static void 520static void
456netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) 521netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
457{ 522{
@@ -721,6 +786,7 @@ struct ethtool_ops netxen_nic_ethtool_ops = {
721 .get_link = netxen_nic_get_link, 786 .get_link = netxen_nic_get_link,
722 .get_eeprom_len = netxen_nic_get_eeprom_len, 787 .get_eeprom_len = netxen_nic_get_eeprom_len,
723 .get_eeprom = netxen_nic_get_eeprom, 788 .get_eeprom = netxen_nic_get_eeprom,
789 .set_eeprom = netxen_nic_set_eeprom,
724 .get_ringparam = netxen_nic_get_ringparam, 790 .get_ringparam = netxen_nic_get_ringparam,
725 .get_pauseparam = netxen_nic_get_pauseparam, 791 .get_pauseparam = netxen_nic_get_pauseparam,
726 .set_pauseparam = netxen_nic_set_pauseparam, 792 .set_pauseparam = netxen_nic_set_pauseparam,
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 973af96337a9..f7bb8c90537c 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -110,6 +110,7 @@ static void crb_addr_transform_setup(void)
110 crb_addr_transform(CAM); 110 crb_addr_transform(CAM);
111 crb_addr_transform(C2C1); 111 crb_addr_transform(C2C1);
112 crb_addr_transform(C2C0); 112 crb_addr_transform(C2C0);
113 crb_addr_transform(SMB);
113} 114}
114 115
115int netxen_init_firmware(struct netxen_adapter *adapter) 116int netxen_init_firmware(struct netxen_adapter *adapter)
@@ -276,6 +277,7 @@ unsigned long netxen_decode_crb_addr(unsigned long addr)
276 277
277static long rom_max_timeout = 10000; 278static long rom_max_timeout = 10000;
278static long rom_lock_timeout = 1000000; 279static long rom_lock_timeout = 1000000;
280static long rom_write_timeout = 700;
279 281
280static inline int rom_lock(struct netxen_adapter *adapter) 282static inline int rom_lock(struct netxen_adapter *adapter)
281{ 283{
@@ -404,7 +406,7 @@ do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
404{ 406{
405 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); 407 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
406 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); 408 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
407 udelay(100); /* prevent bursting on CRB */ 409 udelay(70); /* prevent bursting on CRB */
408 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 410 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
409 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); 411 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
410 if (netxen_wait_rom_done(adapter)) { 412 if (netxen_wait_rom_done(adapter)) {
@@ -413,13 +415,46 @@ do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
413 } 415 }
414 /* reset abyte_cnt and dummy_byte_cnt */ 416 /* reset abyte_cnt and dummy_byte_cnt */
415 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); 417 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
416 udelay(100); /* prevent bursting on CRB */ 418 udelay(70); /* prevent bursting on CRB */
417 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 419 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
418 420
419 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA); 421 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
420 return 0; 422 return 0;
421} 423}
422 424
425static inline int
426do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
427 u8 *bytes, size_t size)
428{
429 int addridx;
430 int ret = 0;
431
432 for (addridx = addr; addridx < (addr + size); addridx += 4) {
433 ret = do_rom_fast_read(adapter, addridx, (int *)bytes);
434 if (ret != 0)
435 break;
436 bytes += 4;
437 }
438
439 return ret;
440}
441
442int
443netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
444 u8 *bytes, size_t size)
445{
446 int ret;
447
448 ret = rom_lock(adapter);
449 if (ret < 0)
450 return ret;
451
452 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
453
454 netxen_rom_unlock(adapter);
455 return ret;
456}
457
423int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) 458int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
424{ 459{
425 int ret; 460 int ret;
@@ -443,6 +478,152 @@ int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data)
443 netxen_rom_unlock(adapter); 478 netxen_rom_unlock(adapter);
444 return ret; 479 return ret;
445} 480}
481
482static inline int do_rom_fast_write_words(struct netxen_adapter *adapter,
483 int addr, u8 *bytes, size_t size)
484{
485 int addridx = addr;
486 int ret = 0;
487
488 while (addridx < (addr + size)) {
489 int last_attempt = 0;
490 int timeout = 0;
491 int data;
492
493 data = *(u32*)bytes;
494
495 ret = do_rom_fast_write(adapter, addridx, data);
496 if (ret < 0)
497 return ret;
498
499 while(1) {
500 int data1;
501
502 do_rom_fast_read(adapter, addridx, &data1);
503 if (data1 == data)
504 break;
505
506 if (timeout++ >= rom_write_timeout) {
507 if (last_attempt++ < 4) {
508 ret = do_rom_fast_write(adapter,
509 addridx, data);
510 if (ret < 0)
511 return ret;
512 }
513 else {
514 printk(KERN_INFO "Data write did not "
515 "succeed at address 0x%x\n", addridx);
516 break;
517 }
518 }
519 }
520
521 bytes += 4;
522 addridx += 4;
523 }
524
525 return ret;
526}
527
528int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
529 u8 *bytes, size_t size)
530{
531 int ret = 0;
532
533 ret = rom_lock(adapter);
534 if (ret < 0)
535 return ret;
536
537 ret = do_rom_fast_write_words(adapter, addr, bytes, size);
538 netxen_rom_unlock(adapter);
539
540 return ret;
541}
542
543int netxen_rom_wrsr(struct netxen_adapter *adapter, int data)
544{
545 int ret;
546
547 ret = netxen_rom_wren(adapter);
548 if (ret < 0)
549 return ret;
550
551 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_ROM_WDATA, data);
552 netxen_crb_writelit_adapter(adapter,
553 NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0x1);
554
555 ret = netxen_wait_rom_done(adapter);
556 if (ret < 0)
557 return ret;
558
559 return netxen_rom_wip_poll(adapter);
560}
561
562int netxen_rom_rdsr(struct netxen_adapter *adapter)
563{
564 int ret;
565
566 ret = rom_lock(adapter);
567 if (ret < 0)
568 return ret;
569
570 ret = netxen_do_rom_rdsr(adapter);
571 netxen_rom_unlock(adapter);
572 return ret;
573}
574
575int netxen_backup_crbinit(struct netxen_adapter *adapter)
576{
577 int ret = FLASH_SUCCESS;
578 int val;
579 char *buffer = kmalloc(FLASH_SECTOR_SIZE, GFP_KERNEL);
580
581 if (!buffer)
582 return -ENOMEM;
583 /* unlock sector 63 */
584 val = netxen_rom_rdsr(adapter);
585 val = val & 0xe3;
586 ret = netxen_rom_wrsr(adapter, val);
587 if (ret != FLASH_SUCCESS)
588 goto out_kfree;
589
590 ret = netxen_rom_wip_poll(adapter);
591 if (ret != FLASH_SUCCESS)
592 goto out_kfree;
593
594 /* copy sector 0 to sector 63 */
595 ret = netxen_rom_fast_read_words(adapter, CRBINIT_START,
596 buffer, FLASH_SECTOR_SIZE);
597 if (ret != FLASH_SUCCESS)
598 goto out_kfree;
599
600 ret = netxen_rom_fast_write_words(adapter, FIXED_START,
601 buffer, FLASH_SECTOR_SIZE);
602 if (ret != FLASH_SUCCESS)
603 goto out_kfree;
604
605 /* lock sector 63 */
606 val = netxen_rom_rdsr(adapter);
607 if (!(val & 0x8)) {
608 val |= (0x1 << 2);
609 /* lock sector 63 */
610 if (netxen_rom_wrsr(adapter, val) == 0) {
611 ret = netxen_rom_wip_poll(adapter);
612 if (ret != FLASH_SUCCESS)
613 goto out_kfree;
614
615 /* lock SR writes */
616 ret = netxen_rom_wip_poll(adapter);
617 if (ret != FLASH_SUCCESS)
618 goto out_kfree;
619 }
620 }
621
622out_kfree:
623 kfree(buffer);
624 return ret;
625}
626
446int netxen_do_rom_se(struct netxen_adapter *adapter, int addr) 627int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
447{ 628{
448 netxen_rom_wren(adapter); 629 netxen_rom_wren(adapter);
@@ -457,6 +638,27 @@ int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
457 return netxen_rom_wip_poll(adapter); 638 return netxen_rom_wip_poll(adapter);
458} 639}
459 640
641void check_erased_flash(struct netxen_adapter *adapter, int addr)
642{
643 int i;
644 int val;
645 int count = 0, erased_errors = 0;
646 int range;
647
648 range = (addr == USER_START) ? FIXED_START : addr + FLASH_SECTOR_SIZE;
649
650 for (i = addr; i < range; i += 4) {
651 netxen_rom_fast_read(adapter, i, &val);
652 if (val != 0xffffffff)
653 erased_errors++;
654 count++;
655 }
656
657 if (erased_errors)
658 printk(KERN_INFO "0x%x out of 0x%x words fail to be erased "
659 "for sector address: %x\n", erased_errors, count, addr);
660}
661
460int netxen_rom_se(struct netxen_adapter *adapter, int addr) 662int netxen_rom_se(struct netxen_adapter *adapter, int addr)
461{ 663{
462 int ret = 0; 664 int ret = 0;
@@ -465,6 +667,68 @@ int netxen_rom_se(struct netxen_adapter *adapter, int addr)
465 } 667 }
466 ret = netxen_do_rom_se(adapter, addr); 668 ret = netxen_do_rom_se(adapter, addr);
467 netxen_rom_unlock(adapter); 669 netxen_rom_unlock(adapter);
670 msleep(30);
671 check_erased_flash(adapter, addr);
672
673 return ret;
674}
675
676int
677netxen_flash_erase_sections(struct netxen_adapter *adapter, int start, int end)
678{
679 int ret = FLASH_SUCCESS;
680 int i;
681
682 for (i = start; i < end; i++) {
683 ret = netxen_rom_se(adapter, i * FLASH_SECTOR_SIZE);
684 if (ret)
685 break;
686 ret = netxen_rom_wip_poll(adapter);
687 if (ret < 0)
688 return ret;
689 }
690
691 return ret;
692}
693
694int
695netxen_flash_erase_secondary(struct netxen_adapter *adapter)
696{
697 int ret = FLASH_SUCCESS;
698 int start, end;
699
700 start = SECONDARY_START / FLASH_SECTOR_SIZE;
701 end = USER_START / FLASH_SECTOR_SIZE;
702 ret = netxen_flash_erase_sections(adapter, start, end);
703
704 return ret;
705}
706
707int
708netxen_flash_erase_primary(struct netxen_adapter *adapter)
709{
710 int ret = FLASH_SUCCESS;
711 int start, end;
712
713 start = PRIMARY_START / FLASH_SECTOR_SIZE;
714 end = SECONDARY_START / FLASH_SECTOR_SIZE;
715 ret = netxen_flash_erase_sections(adapter, start, end);
716
717 return ret;
718}
719
720int netxen_flash_unlock(struct netxen_adapter *adapter)
721{
722 int ret = 0;
723
724 ret = netxen_rom_wrsr(adapter, 0);
725 if (ret < 0)
726 return ret;
727
728 ret = netxen_rom_wren(adapter);
729 if (ret < 0)
730 return ret;
731
468 return ret; 732 return ret;
469} 733}
470 734
@@ -543,9 +807,13 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
543 } 807 }
544 for (i = 0; i < n; i++) { 808 for (i = 0; i < n; i++) {
545 809
546 off = 810 off = netxen_decode_crb_addr((unsigned long)buf[i].addr);
547 netxen_decode_crb_addr((unsigned long)buf[i].addr) + 811 if (off == NETXEN_ADDR_ERROR) {
548 NETXEN_PCI_CRBSPACE; 812 printk(KERN_ERR"CRB init value out of range %lx\n",
813 buf[i].addr);
814 continue;
815 }
816 off += NETXEN_PCI_CRBSPACE;
549 /* skipping cold reboot MAGIC */ 817 /* skipping cold reboot MAGIC */
550 if (off == NETXEN_CAM_RAM(0x1fc)) 818 if (off == NETXEN_CAM_RAM(0x1fc))
551 continue; 819 continue;
@@ -662,6 +930,7 @@ void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
662 int loops = 0; 930 int loops = 0;
663 931
664 if (!pegtune_val) { 932 if (!pegtune_val) {
933 val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
665 while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) { 934 while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
666 udelay(100); 935 udelay(100);
667 schedule(); 936 schedule();
diff --git a/drivers/net/oaknet.c b/drivers/net/oaknet.c
deleted file mode 100644
index 702e3e95612a..000000000000
--- a/drivers/net/oaknet.c
+++ /dev/null
@@ -1,666 +0,0 @@
1/*
2 *
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Module name: oaknet.c
6 *
7 * Description:
8 * Driver for the National Semiconductor DP83902AV Ethernet controller
9 * on-board the IBM PowerPC "Oak" evaluation board. Adapted from the
10 * various other 8390 drivers written by Donald Becker and Paul Gortmaker.
11 *
12 * Additional inspiration from the "tcd8390.c" driver from TiVo, Inc.
13 * and "enetLib.c" from IBM.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/errno.h>
19#include <linux/delay.h>
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/init.h>
23#include <linux/jiffies.h>
24
25#include <asm/board.h>
26#include <asm/io.h>
27
28#include "8390.h"
29
30
31/* Preprocessor Defines */
32
33#if !defined(TRUE) || TRUE != 1
34#define TRUE 1
35#endif
36
37#if !defined(FALSE) || FALSE != 0
38#define FALSE 0
39#endif
40
41#define OAKNET_START_PG 0x20 /* First page of TX buffer */
42#define OAKNET_STOP_PG 0x40 /* Last pagge +1 of RX ring */
43
44#define OAKNET_WAIT (2 * HZ / 100) /* 20 ms */
45
46/* Experimenting with some fixes for a broken driver... */
47
48#define OAKNET_DISINT
49#define OAKNET_HEADCHECK
50#define OAKNET_RWFIX
51
52
53/* Global Variables */
54
55static const char *name = "National DP83902AV";
56
57static struct net_device *oaknet_devs;
58
59
60/* Function Prototypes */
61
62static int oaknet_open(struct net_device *dev);
63static int oaknet_close(struct net_device *dev);
64
65static void oaknet_reset_8390(struct net_device *dev);
66static void oaknet_get_8390_hdr(struct net_device *dev,
67 struct e8390_pkt_hdr *hdr, int ring_page);
68static void oaknet_block_input(struct net_device *dev, int count,
69 struct sk_buff *skb, int ring_offset);
70static void oaknet_block_output(struct net_device *dev, int count,
71 const unsigned char *buf, int start_page);
72
73static void oaknet_dma_error(struct net_device *dev, const char *name);
74
75
76/*
77 * int oaknet_init()
78 *
79 * Description:
80 * This routine performs all the necessary platform-specific initiali-
81 * zation and set-up for the IBM "Oak" evaluation board's National
82 * Semiconductor DP83902AV "ST-NIC" Ethernet controller.
83 *
84 * Input(s):
85 * N/A
86 *
87 * Output(s):
88 * N/A
89 *
90 * Returns:
91 * 0 if OK, otherwise system error number on error.
92 *
93 */
94static int __init oaknet_init(void)
95{
96 register int i;
97 int reg0, regd;
98 int ret = -ENOMEM;
99 struct net_device *dev;
100#if 0
101 unsigned long ioaddr = OAKNET_IO_BASE;
102#else
103 unsigned long ioaddr = ioremap(OAKNET_IO_BASE, OAKNET_IO_SIZE);
104#endif
105 bd_t *bip = (bd_t *)__res;
106
107 if (!ioaddr)
108 return -ENOMEM;
109
110 dev = alloc_ei_netdev();
111 if (!dev)
112 goto out_unmap;
113
114 ret = -EBUSY;
115 if (!request_region(OAKNET_IO_BASE, OAKNET_IO_SIZE, name))
116 goto out_dev;
117
118 /* Quick register check to see if the device is really there. */
119
120 ret = -ENODEV;
121 if ((reg0 = ei_ibp(ioaddr)) == 0xFF)
122 goto out_region;
123
124 /*
125 * That worked. Now a more thorough check, using the multicast
126 * address registers, that the device is definitely out there
127 * and semi-functional.
128 */
129
130 ei_obp(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
131 regd = ei_ibp(ioaddr + 0x0D);
132 ei_obp(0xFF, ioaddr + 0x0D);
133 ei_obp(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
134 ei_ibp(ioaddr + EN0_COUNTER0);
135
136 /* It's no good. Fix things back up and leave. */
137
138 ret = -ENODEV;
139 if (ei_ibp(ioaddr + EN0_COUNTER0) != 0) {
140 ei_obp(reg0, ioaddr);
141 ei_obp(regd, ioaddr + 0x0D);
142 goto out_region;
143 }
144
145 SET_MODULE_OWNER(dev);
146
147 /*
148 * This controller is on an embedded board, so the base address
149 * and interrupt assignments are pre-assigned and unchageable.
150 */
151
152 dev->base_addr = ioaddr;
153 dev->irq = OAKNET_INT;
154
155 /*
156 * Disable all chip interrupts for now and ACK all pending
157 * interrupts.
158 */
159
160 ei_obp(0x0, ioaddr + EN0_IMR);
161 ei_obp(0xFF, ioaddr + EN0_ISR);
162
163 /* Attempt to get the interrupt line */
164
165 ret = -EAGAIN;
166 if (request_irq(dev->irq, ei_interrupt, 0, name, dev)) {
167 printk("%s: unable to request interrupt %d.\n",
168 name, dev->irq);
169 goto out_region;
170 }
171
172 /* Tell the world about what and where we've found. */
173
174 printk("%s: %s at", dev->name, name);
175 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
176 dev->dev_addr[i] = bip->bi_enetaddr[i];
177 printk("%c%.2x", (i ? ':' : ' '), dev->dev_addr[i]);
178 }
179 printk(", found at %#lx, using IRQ %d.\n", dev->base_addr, dev->irq);
180
181 /* Set up some required driver fields and then we're done. */
182
183 ei_status.name = name;
184 ei_status.word16 = FALSE;
185 ei_status.tx_start_page = OAKNET_START_PG;
186 ei_status.rx_start_page = OAKNET_START_PG + TX_PAGES;
187 ei_status.stop_page = OAKNET_STOP_PG;
188
189 ei_status.reset_8390 = &oaknet_reset_8390;
190 ei_status.block_input = &oaknet_block_input;
191 ei_status.block_output = &oaknet_block_output;
192 ei_status.get_8390_hdr = &oaknet_get_8390_hdr;
193
194 dev->open = oaknet_open;
195 dev->stop = oaknet_close;
196#ifdef CONFIG_NET_POLL_CONTROLLER
197 dev->poll_controller = ei_poll;
198#endif
199
200 NS8390_init(dev, FALSE);
201 ret = register_netdev(dev);
202 if (ret)
203 goto out_irq;
204
205 oaknet_devs = dev;
206 return 0;
207
208out_irq;
209 free_irq(dev->irq, dev);
210out_region:
211 release_region(OAKNET_IO_BASE, OAKNET_IO_SIZE);
212out_dev:
213 free_netdev(dev);
214out_unmap:
215 iounmap(ioaddr);
216 return ret;
217}
218
219/*
220 * static int oaknet_open()
221 *
222 * Description:
223 * This routine is a modest wrapper around ei_open, the 8390-generic,
224 * driver open routine. This just increments the module usage count
225 * and passes along the status from ei_open.
226 *
227 * Input(s):
228 * *dev - Pointer to the device structure for this driver.
229 *
230 * Output(s):
231 * *dev - Pointer to the device structure for this driver, potentially
232 * modified by ei_open.
233 *
234 * Returns:
235 * 0 if OK, otherwise < 0 on error.
236 *
237 */
238static int
239oaknet_open(struct net_device *dev)
240{
241 int status = ei_open(dev);
242 return (status);
243}
244
245/*
246 * static int oaknet_close()
247 *
248 * Description:
249 * This routine is a modest wrapper around ei_close, the 8390-generic,
250 * driver close routine. This just decrements the module usage count
251 * and passes along the status from ei_close.
252 *
253 * Input(s):
254 * *dev - Pointer to the device structure for this driver.
255 *
256 * Output(s):
257 * *dev - Pointer to the device structure for this driver, potentially
258 * modified by ei_close.
259 *
260 * Returns:
261 * 0 if OK, otherwise < 0 on error.
262 *
263 */
264static int
265oaknet_close(struct net_device *dev)
266{
267 int status = ei_close(dev);
268 return (status);
269}
270
271/*
272 * static void oaknet_reset_8390()
273 *
274 * Description:
275 * This routine resets the DP83902 chip.
276 *
277 * Input(s):
278 * *dev - Pointer to the device structure for this driver.
279 *
280 * Output(s):
281 * N/A
282 *
283 * Returns:
284 * N/A
285 *
286 */
287static void
288oaknet_reset_8390(struct net_device *dev)
289{
290 int base = E8390_BASE;
291
292 /*
293 * We have no provision of reseting the controller as is done
294 * in other drivers, such as "ne.c". However, the following
295 * seems to work well enough in the TiVo driver.
296 */
297
298 printk("Resetting %s...\n", dev->name);
299 ei_obp(E8390_STOP | E8390_NODMA | E8390_PAGE0, base + E8390_CMD);
300 ei_status.txing = 0;
301 ei_status.dmaing = 0;
302}
303
304/*
305 * static void oaknet_get_8390_hdr()
306 *
307 * Description:
308 * This routine grabs the 8390-specific header. It's similar to the
309 * block input routine, but we don't need to be concerned with ring wrap
310 * as the header will be at the start of a page, so we optimize accordingly.
311 *
312 * Input(s):
313 * *dev - Pointer to the device structure for this driver.
314 * *hdr - Pointer to storage for the 8390-specific packet header.
315 * ring_page - ?
316 *
317 * Output(s):
318 * *hdr - Pointer to the 8390-specific packet header for the just-
319 * received frame.
320 *
321 * Returns:
322 * N/A
323 *
324 */
325static void
326oaknet_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
327 int ring_page)
328{
329 int base = dev->base_addr;
330
331 /*
332 * This should NOT happen. If it does, it is the LAST thing you'll
333 * see.
334 */
335
336 if (ei_status.dmaing) {
337 oaknet_dma_error(dev, "oaknet_get_8390_hdr");
338 return;
339 }
340
341 ei_status.dmaing |= 0x01;
342 outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, base + OAKNET_CMD);
343 outb_p(sizeof(struct e8390_pkt_hdr), base + EN0_RCNTLO);
344 outb_p(0, base + EN0_RCNTHI);
345 outb_p(0, base + EN0_RSARLO); /* On page boundary */
346 outb_p(ring_page, base + EN0_RSARHI);
347 outb_p(E8390_RREAD + E8390_START, base + OAKNET_CMD);
348
349 if (ei_status.word16)
350 insw(base + OAKNET_DATA, hdr,
351 sizeof(struct e8390_pkt_hdr) >> 1);
352 else
353 insb(base + OAKNET_DATA, hdr,
354 sizeof(struct e8390_pkt_hdr));
355
356 /* Byte-swap the packet byte count */
357
358 hdr->count = le16_to_cpu(hdr->count);
359
360 outb_p(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */
361 ei_status.dmaing &= ~0x01;
362}
363
364/*
365 * XXX - Document me.
366 */
367static void
368oaknet_block_input(struct net_device *dev, int count, struct sk_buff *skb,
369 int ring_offset)
370{
371 int base = OAKNET_BASE;
372 char *buf = skb->data;
373
374 /*
375 * This should NOT happen. If it does, it is the LAST thing you'll
376 * see.
377 */
378
379 if (ei_status.dmaing) {
380 oaknet_dma_error(dev, "oaknet_block_input");
381 return;
382 }
383
384#ifdef OAKNET_DISINT
385 save_flags(flags);
386 cli();
387#endif
388
389 ei_status.dmaing |= 0x01;
390 ei_obp(E8390_NODMA + E8390_PAGE0 + E8390_START, base + E8390_CMD);
391 ei_obp(count & 0xff, base + EN0_RCNTLO);
392 ei_obp(count >> 8, base + EN0_RCNTHI);
393 ei_obp(ring_offset & 0xff, base + EN0_RSARLO);
394 ei_obp(ring_offset >> 8, base + EN0_RSARHI);
395 ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
396 if (ei_status.word16) {
397 ei_isw(base + E8390_DATA, buf, count >> 1);
398 if (count & 0x01) {
399 buf[count - 1] = ei_ib(base + E8390_DATA);
400#ifdef OAKNET_HEADCHECK
401 bytes++;
402#endif
403 }
404 } else {
405 ei_isb(base + E8390_DATA, buf, count);
406 }
407#ifdef OAKNET_HEADCHECK
408 /*
409 * This was for the ALPHA version only, but enough people have
410 * been encountering problems so it is still here. If you see
411 * this message you either 1) have a slightly incompatible clone
412 * or 2) have noise/speed problems with your bus.
413 */
414
415 /* DMA termination address check... */
416 {
417 int addr, tries = 20;
418 do {
419 /* DON'T check for 'ei_ibp(EN0_ISR) & ENISR_RDC' here
420 -- it's broken for Rx on some cards! */
421 int high = ei_ibp(base + EN0_RSARHI);
422 int low = ei_ibp(base + EN0_RSARLO);
423 addr = (high << 8) + low;
424 if (((ring_offset + bytes) & 0xff) == low)
425 break;
426 } while (--tries > 0);
427 if (tries <= 0)
428 printk("%s: RX transfer address mismatch,"
429 "%#4.4x (expected) vs. %#4.4x (actual).\n",
430 dev->name, ring_offset + bytes, addr);
431 }
432#endif
433 ei_obp(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */
434 ei_status.dmaing &= ~0x01;
435
436#ifdef OAKNET_DISINT
437 restore_flags(flags);
438#endif
439}
440
441/*
442 * static void oaknet_block_output()
443 *
444 * Description:
445 * This routine...
446 *
447 * Input(s):
448 * *dev - Pointer to the device structure for this driver.
449 * count - Number of bytes to be transferred.
450 * *buf -
451 * start_page -
452 *
453 * Output(s):
454 * N/A
455 *
456 * Returns:
457 * N/A
458 *
459 */
460static void
461oaknet_block_output(struct net_device *dev, int count,
462 const unsigned char *buf, int start_page)
463{
464 int base = E8390_BASE;
465#if 0
466 int bug;
467#endif
468 unsigned long start;
469#ifdef OAKNET_DISINT
470 unsigned long flags;
471#endif
472#ifdef OAKNET_HEADCHECK
473 int retries = 0;
474#endif
475
476 /* Round the count up for word writes. */
477
478 if (ei_status.word16 && (count & 0x1))
479 count++;
480
481 /*
482 * This should NOT happen. If it does, it is the LAST thing you'll
483 * see.
484 */
485
486 if (ei_status.dmaing) {
487 oaknet_dma_error(dev, "oaknet_block_output");
488 return;
489 }
490
491#ifdef OAKNET_DISINT
492 save_flags(flags);
493 cli();
494#endif
495
496 ei_status.dmaing |= 0x01;
497
498 /* Make sure we are in page 0. */
499
500 ei_obp(E8390_PAGE0 + E8390_START + E8390_NODMA, base + E8390_CMD);
501
502#ifdef OAKNET_HEADCHECK
503retry:
504#endif
505
506#if 0
507 /*
508 * The 83902 documentation states that the processor needs to
509 * do a "dummy read" before doing the remote write to work
510 * around a chip bug they don't feel like fixing.
511 */
512
513 bug = 0;
514 while (1) {
515 unsigned int rdhi;
516 unsigned int rdlo;
517
518 /* Now the normal output. */
519 ei_obp(ENISR_RDC, base + EN0_ISR);
520 ei_obp(count & 0xff, base + EN0_RCNTLO);
521 ei_obp(count >> 8, base + EN0_RCNTHI);
522 ei_obp(0x00, base + EN0_RSARLO);
523 ei_obp(start_page, base + EN0_RSARHI);
524
525 if (bug++)
526 break;
527
528 /* Perform the dummy read */
529 rdhi = ei_ibp(base + EN0_CRDAHI);
530 rdlo = ei_ibp(base + EN0_CRDALO);
531 ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
532
533 while (1) {
534 unsigned int nrdhi;
535 unsigned int nrdlo;
536 nrdhi = ei_ibp(base + EN0_CRDAHI);
537 nrdlo = ei_ibp(base + EN0_CRDALO);
538 if ((rdhi != nrdhi) || (rdlo != nrdlo))
539 break;
540 }
541 }
542#else
543#ifdef OAKNET_RWFIX
544 /*
545 * Handle the read-before-write bug the same way as the
546 * Crynwr packet driver -- the Nat'l Semi. method doesn't work.
547 * Actually this doesn't always work either, but if you have
548 * problems with your 83902 this is better than nothing!
549 */
550
551 ei_obp(0x42, base + EN0_RCNTLO);
552 ei_obp(0x00, base + EN0_RCNTHI);
553 ei_obp(0x42, base + EN0_RSARLO);
554 ei_obp(0x00, base + EN0_RSARHI);
555 ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
556 /* Make certain that the dummy read has occurred. */
557 udelay(6);
558#endif
559
560 ei_obp(ENISR_RDC, base + EN0_ISR);
561
562 /* Now the normal output. */
563 ei_obp(count & 0xff, base + EN0_RCNTLO);
564 ei_obp(count >> 8, base + EN0_RCNTHI);
565 ei_obp(0x00, base + EN0_RSARLO);
566 ei_obp(start_page, base + EN0_RSARHI);
567#endif /* 0/1 */
568
569 ei_obp(E8390_RWRITE + E8390_START, base + E8390_CMD);
570 if (ei_status.word16) {
571 ei_osw(E8390_BASE + E8390_DATA, buf, count >> 1);
572 } else {
573 ei_osb(E8390_BASE + E8390_DATA, buf, count);
574 }
575
576#ifdef OAKNET_DISINT
577 restore_flags(flags);
578#endif
579
580 start = jiffies;
581
582#ifdef OAKNET_HEADCHECK
583 /*
584 * This was for the ALPHA version only, but enough people have
585 * been encountering problems so it is still here.
586 */
587
588 {
589 /* DMA termination address check... */
590 int addr, tries = 20;
591 do {
592 int high = ei_ibp(base + EN0_RSARHI);
593 int low = ei_ibp(base + EN0_RSARLO);
594 addr = (high << 8) + low;
595 if ((start_page << 8) + count == addr)
596 break;
597 } while (--tries > 0);
598
599 if (tries <= 0) {
600 printk("%s: Tx packet transfer address mismatch,"
601 "%#4.4x (expected) vs. %#4.4x (actual).\n",
602 dev->name, (start_page << 8) + count, addr);
603 if (retries++ == 0)
604 goto retry;
605 }
606 }
607#endif
608
609 while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) {
610 if (time_after(jiffies, start + OAKNET_WAIT)) {
611 printk("%s: timeout waiting for Tx RDC.\n", dev->name);
612 oaknet_reset_8390(dev);
613 NS8390_init(dev, TRUE);
614 break;
615 }
616 }
617
618 ei_obp(ENISR_RDC, base + EN0_ISR); /* Ack intr. */
619 ei_status.dmaing &= ~0x01;
620}
621
622/*
623 * static void oaknet_dma_error()
624 *
625 * Description:
626 * This routine prints out a last-ditch informative message to the console
627 * indicating that a DMA error occurred. If you see this, it's the last
628 * thing you'll see.
629 *
630 * Input(s):
631 * *dev - Pointer to the device structure for this driver.
632 * *name - Informative text (e.g. function name) indicating where the
633 * DMA error occurred.
634 *
635 * Output(s):
636 * N/A
637 *
638 * Returns:
639 * N/A
640 *
641 */
642static void
643oaknet_dma_error(struct net_device *dev, const char *name)
644{
645 printk(KERN_EMERG "%s: DMAing conflict in %s."
646 "[DMAstat:%d][irqlock:%d][intr:%ld]\n",
647 dev->name, name, ei_status.dmaing, ei_status.irqlock,
648 dev->interrupt);
649}
650
651/*
652 * Oak Ethernet module unload interface.
653 */
654static void __exit oaknet_cleanup_module (void)
655{
656 /* Convert to loop once driver supports multiple devices. */
657 unregister_netdev(oaknet_dev);
658 free_irq(oaknet_devs->irq, oaknet_devs);
659 release_region(oaknet_devs->base_addr, OAKNET_IO_SIZE);
660 iounmap(ioaddr);
661 free_netdev(oaknet_devs);
662}
663
664module_init(oaknet_init);
665module_exit(oaknet_cleanup_module);
666MODULE_LICENSE("GPL");
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
new file mode 100644
index 000000000000..d670ac74824f
--- /dev/null
+++ b/drivers/net/pasemi_mac.c
@@ -0,0 +1,1019 @@
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <asm/dma-mapping.h>
29#include <linux/in.h>
30#include <linux/skbuff.h>
31
32#include <linux/ip.h>
33#include <linux/tcp.h>
34#include <net/checksum.h>
35
36#include "pasemi_mac.h"
37
38
39/* TODO list
40 *
41 * - Get rid of pci_{read,write}_config(), map registers with ioremap
42 * for performance
43 * - PHY support
44 * - Multicast support
45 * - Large MTU support
46 * - Other performance improvements
47 */
48
49
50/* Must be a power of two */
51#define RX_RING_SIZE 512
52#define TX_RING_SIZE 512
53
54#define TX_DESC(mac, num) ((mac)->tx->desc[(num) & (TX_RING_SIZE-1)])
55#define TX_DESC_INFO(mac, num) ((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)])
56#define RX_DESC(mac, num) ((mac)->rx->desc[(num) & (RX_RING_SIZE-1)])
57#define RX_DESC_INFO(mac, num) ((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)])
58#define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])
59
60#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
61
62/* XXXOJN these should come out of the device tree some day */
63#define PAS_DMA_CAP_BASE 0xe00d0040
64#define PAS_DMA_CAP_SIZE 0x100
65#define PAS_DMA_COM_BASE 0xe00d0100
66#define PAS_DMA_COM_SIZE 0x100
67
68static struct pasdma_status *dma_status;
69
70static int pasemi_get_mac_addr(struct pasemi_mac *mac)
71{
72 struct pci_dev *pdev = mac->pdev;
73 struct device_node *dn = pci_device_to_OF_node(pdev);
74 const u8 *maddr;
75 u8 addr[6];
76
77 if (!dn) {
78 dev_dbg(&pdev->dev,
79 "No device node for mac, not configuring\n");
80 return -ENOENT;
81 }
82
83 maddr = get_property(dn, "mac-address", NULL);
84 if (maddr == NULL) {
85 dev_warn(&pdev->dev,
86 "no mac address in device tree, not configuring\n");
87 return -ENOENT;
88 }
89
90 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
91 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
92 dev_warn(&pdev->dev,
93 "can't parse mac address, not configuring\n");
94 return -EINVAL;
95 }
96
97 memcpy(mac->mac_addr, addr, sizeof(addr));
98 return 0;
99}
100
101static int pasemi_mac_setup_rx_resources(struct net_device *dev)
102{
103 struct pasemi_mac_rxring *ring;
104 struct pasemi_mac *mac = netdev_priv(dev);
105 int chan_id = mac->dma_rxch;
106
107 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
108
109 if (!ring)
110 goto out_ring;
111
112 spin_lock_init(&ring->lock);
113
114 ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
115 RX_RING_SIZE, GFP_KERNEL);
116
117 if (!ring->desc_info)
118 goto out_desc_info;
119
120 /* Allocate descriptors */
121 ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
122 RX_RING_SIZE *
123 sizeof(struct pas_dma_xct_descr),
124 &ring->dma, GFP_KERNEL);
125
126 if (!ring->desc)
127 goto out_desc;
128
129 memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
130
131 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
132 RX_RING_SIZE * sizeof(u64),
133 &ring->buf_dma, GFP_KERNEL);
134 if (!ring->buffers)
135 goto out_buffers;
136
137 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
138
139 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
140 PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
141
142 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
143 PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
144 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2));
145
146 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
147 PAS_DMA_RXCHAN_CFG_HBU(1));
148
149 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
150 PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
151
152 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
153 PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
154 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
155
156 ring->next_to_fill = 0;
157 ring->next_to_clean = 0;
158
159 snprintf(ring->irq_name, sizeof(ring->irq_name),
160 "%s rx", dev->name);
161 mac->rx = ring;
162
163 return 0;
164
165out_buffers:
166 dma_free_coherent(&mac->dma_pdev->dev,
167 RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
168 mac->rx->desc, mac->rx->dma);
169out_desc:
170 kfree(ring->desc_info);
171out_desc_info:
172 kfree(ring);
173out_ring:
174 return -ENOMEM;
175}
176
177
178static int pasemi_mac_setup_tx_resources(struct net_device *dev)
179{
180 struct pasemi_mac *mac = netdev_priv(dev);
181 u32 val;
182 int chan_id = mac->dma_txch;
183 struct pasemi_mac_txring *ring;
184
185 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
186 if (!ring)
187 goto out_ring;
188
189 spin_lock_init(&ring->lock);
190
191 ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
192 TX_RING_SIZE, GFP_KERNEL);
193 if (!ring->desc_info)
194 goto out_desc_info;
195
196 /* Allocate descriptors */
197 ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
198 TX_RING_SIZE *
199 sizeof(struct pas_dma_xct_descr),
200 &ring->dma, GFP_KERNEL);
201 if (!ring->desc)
202 goto out_desc;
203
204 memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
205
206 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
207 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
208 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
209 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
210
211 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
212
213 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
214 PAS_DMA_TXCHAN_CFG_TY_IFACE |
215 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
216 PAS_DMA_TXCHAN_CFG_UP |
217 PAS_DMA_TXCHAN_CFG_WT(2));
218
219 ring->next_to_use = 0;
220 ring->next_to_clean = 0;
221
222 snprintf(ring->irq_name, sizeof(ring->irq_name),
223 "%s tx", dev->name);
224 mac->tx = ring;
225
226 return 0;
227
228out_desc:
229 kfree(ring->desc_info);
230out_desc_info:
231 kfree(ring);
232out_ring:
233 return -ENOMEM;
234}
235
236static void pasemi_mac_free_tx_resources(struct net_device *dev)
237{
238 struct pasemi_mac *mac = netdev_priv(dev);
239 unsigned int i;
240 struct pasemi_mac_buffer *info;
241 struct pas_dma_xct_descr *dp;
242
243 for (i = 0; i < TX_RING_SIZE; i++) {
244 info = &TX_DESC_INFO(mac, i);
245 dp = &TX_DESC(mac, i);
246 if (info->dma) {
247 if (info->skb) {
248 pci_unmap_single(mac->dma_pdev,
249 info->dma,
250 info->skb->len,
251 PCI_DMA_TODEVICE);
252 dev_kfree_skb_any(info->skb);
253 }
254 info->dma = 0;
255 info->skb = NULL;
256 dp->mactx = 0;
257 dp->ptr = 0;
258 }
259 }
260
261 dma_free_coherent(&mac->dma_pdev->dev,
262 TX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
263 mac->tx->desc, mac->tx->dma);
264
265 kfree(mac->tx->desc_info);
266 kfree(mac->tx);
267 mac->tx = NULL;
268}
269
270static void pasemi_mac_free_rx_resources(struct net_device *dev)
271{
272 struct pasemi_mac *mac = netdev_priv(dev);
273 unsigned int i;
274 struct pasemi_mac_buffer *info;
275 struct pas_dma_xct_descr *dp;
276
277 for (i = 0; i < RX_RING_SIZE; i++) {
278 info = &RX_DESC_INFO(mac, i);
279 dp = &RX_DESC(mac, i);
280 if (info->dma) {
281 if (info->skb) {
282 pci_unmap_single(mac->dma_pdev,
283 info->dma,
284 info->skb->len,
285 PCI_DMA_FROMDEVICE);
286 dev_kfree_skb_any(info->skb);
287 }
288 info->dma = 0;
289 info->skb = NULL;
290 dp->macrx = 0;
291 dp->ptr = 0;
292 }
293 }
294
295 dma_free_coherent(&mac->dma_pdev->dev,
296 RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
297 mac->rx->desc, mac->rx->dma);
298
299 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
300 mac->rx->buffers, mac->rx->buf_dma);
301
302 kfree(mac->rx->desc_info);
303 kfree(mac->rx);
304 mac->rx = NULL;
305}
306
307static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
308{
309 struct pasemi_mac *mac = netdev_priv(dev);
310 unsigned int i;
311 int start = mac->rx->next_to_fill;
312 unsigned int count;
313
314 count = (mac->rx->next_to_clean + RX_RING_SIZE -
315 mac->rx->next_to_fill) & (RX_RING_SIZE - 1);
316
317 /* Check to see if we're doing first-time setup */
318 if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
319 count = RX_RING_SIZE;
320
321 if (count <= 0)
322 return;
323
324 for (i = start; i < start + count; i++) {
325 struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i);
326 u64 *buff = &RX_BUFF(mac, i);
327 struct sk_buff *skb;
328 dma_addr_t dma;
329
330 skb = dev_alloc_skb(BUF_SIZE);
331
332 if (!skb) {
333 count = i - start;
334 break;
335 }
336
337 skb->dev = dev;
338
339 dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
340 PCI_DMA_FROMDEVICE);
341
342 if (dma_mapping_error(dma)) {
343 dev_kfree_skb_irq(info->skb);
344 count = i - start;
345 break;
346 }
347
348 info->skb = skb;
349 info->dma = dma;
350 *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
351 }
352
353 wmb();
354
355 pci_write_config_dword(mac->dma_pdev,
356 PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
357 count);
358 pci_write_config_dword(mac->dma_pdev,
359 PAS_DMA_RXINT_INCR(mac->dma_if),
360 count);
361
362 mac->rx->next_to_fill += count;
363}
364
365static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
366{
367 unsigned int i;
368 int start, count;
369
370 spin_lock(&mac->rx->lock);
371
372 start = mac->rx->next_to_clean;
373 count = 0;
374
375 for (i = start; i < (start + RX_RING_SIZE) && count < limit; i++) {
376 struct pas_dma_xct_descr *dp;
377 struct pasemi_mac_buffer *info;
378 struct sk_buff *skb;
379 unsigned int j, len;
380 dma_addr_t dma;
381
382 rmb();
383
384 dp = &RX_DESC(mac, i);
385
386 if (!(dp->macrx & XCT_MACRX_O))
387 break;
388
389 count++;
390
391 info = NULL;
392
393 /* We have to scan for our skb since there's no way
394 * to back-map them from the descriptor, and if we
395 * have several receive channels then they might not
396 * show up in the same order as they were put on the
397 * interface ring.
398 */
399
400 dma = (dp->ptr & XCT_PTR_ADDR_M);
401 for (j = start; j < (start + RX_RING_SIZE); j++) {
402 info = &RX_DESC_INFO(mac, j);
403 if (info->dma == dma)
404 break;
405 }
406
407 BUG_ON(!info);
408 BUG_ON(info->dma != dma);
409
410 pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len,
411 PCI_DMA_FROMDEVICE);
412
413 skb = info->skb;
414
415 len = (dp->macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
416
417 skb_put(skb, len);
418
419 skb->protocol = eth_type_trans(skb, mac->netdev);
420
421 if ((dp->macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
422 skb->ip_summed = CHECKSUM_COMPLETE;
423 skb->csum = (dp->macrx & XCT_MACRX_CSUM_M) >>
424 XCT_MACRX_CSUM_S;
425 } else
426 skb->ip_summed = CHECKSUM_NONE;
427
428 mac->stats.rx_bytes += len;
429 mac->stats.rx_packets++;
430
431 netif_receive_skb(skb);
432
433 info->dma = 0;
434 info->skb = NULL;
435 dp->ptr = 0;
436 dp->macrx = 0;
437 }
438
439 mac->rx->next_to_clean += count;
440 pasemi_mac_replenish_rx_ring(mac->netdev);
441
442 spin_unlock(&mac->rx->lock);
443
444 return count;
445}
446
447static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
448{
449 int i;
450 struct pasemi_mac_buffer *info;
451 struct pas_dma_xct_descr *dp;
452 int start, count;
453 int flags;
454
455 spin_lock_irqsave(&mac->tx->lock, flags);
456
457 start = mac->tx->next_to_clean;
458 count = 0;
459
460 for (i = start; i < mac->tx->next_to_use; i++) {
461 dp = &TX_DESC(mac, i);
462 if (!dp || (dp->mactx & XCT_MACTX_O))
463 break;
464
465 count++;
466
467 info = &TX_DESC_INFO(mac, i);
468
469 pci_unmap_single(mac->dma_pdev, info->dma,
470 info->skb->len, PCI_DMA_TODEVICE);
471 dev_kfree_skb_irq(info->skb);
472
473 info->skb = NULL;
474 info->dma = 0;
475 dp->mactx = 0;
476 dp->ptr = 0;
477 }
478 mac->tx->next_to_clean += count;
479 spin_unlock_irqrestore(&mac->tx->lock, flags);
480
481 return count;
482}
483
484
485static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
486{
487 struct net_device *dev = data;
488 struct pasemi_mac *mac = netdev_priv(dev);
489 unsigned int reg;
490
491 if (!(*mac->rx_status & PAS_STATUS_INT))
492 return IRQ_NONE;
493
494 netif_rx_schedule(dev);
495 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
496 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
497
498 reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
499 PAS_IOB_DMA_RXCH_RESET_DINTC;
500 if (*mac->rx_status & PAS_STATUS_TIMER)
501 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
502
503 pci_write_config_dword(mac->iob_pdev,
504 PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
505
506
507 return IRQ_HANDLED;
508}
509
510static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
511{
512 struct net_device *dev = data;
513 struct pasemi_mac *mac = netdev_priv(dev);
514 unsigned int reg;
515 int was_full;
516
517 was_full = mac->tx->next_to_clean - mac->tx->next_to_use == TX_RING_SIZE;
518
519 if (!(*mac->tx_status & PAS_STATUS_INT))
520 return IRQ_NONE;
521
522 pasemi_mac_clean_tx(mac);
523
524 reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
525 if (*mac->tx_status & PAS_STATUS_TIMER)
526 reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
527
528 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
529 reg);
530
531 if (was_full)
532 netif_wake_queue(dev);
533
534 return IRQ_HANDLED;
535}
536
537static int pasemi_mac_open(struct net_device *dev)
538{
539 struct pasemi_mac *mac = netdev_priv(dev);
540 unsigned int flags;
541 int ret;
542
543 /* enable rx section */
544 pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
545 PAS_DMA_COM_RXCMD_EN);
546
547 /* enable tx section */
548 pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
549 PAS_DMA_COM_TXCMD_EN);
550
551 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
552 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
553 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
554
555 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
556
557 flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
558 PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
559
560 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
561
562 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
563 PAS_IOB_DMA_RXCH_CFG_CNTTH(30));
564
565 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
566 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
567
568 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
569
570 ret = pasemi_mac_setup_rx_resources(dev);
571 if (ret)
572 goto out_rx_resources;
573
574 ret = pasemi_mac_setup_tx_resources(dev);
575 if (ret)
576 goto out_tx_resources;
577
578 pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
579 PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
580 PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
581
582 /* enable rx if */
583 pci_write_config_dword(mac->dma_pdev,
584 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
585 PAS_DMA_RXINT_RCMDSTA_EN);
586
587 /* enable rx channel */
588 pci_write_config_dword(mac->dma_pdev,
589 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
590 PAS_DMA_RXCHAN_CCMDSTA_EN |
591 PAS_DMA_RXCHAN_CCMDSTA_DU);
592
593 /* enable tx channel */
594 pci_write_config_dword(mac->dma_pdev,
595 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
596 PAS_DMA_TXCHAN_TCMDSTA_EN);
597
598 pasemi_mac_replenish_rx_ring(dev);
599
600 netif_start_queue(dev);
601 netif_poll_enable(dev);
602
603 ret = request_irq(mac->dma_pdev->irq + mac->dma_txch,
604 &pasemi_mac_tx_intr, IRQF_DISABLED,
605 mac->tx->irq_name, dev);
606 if (ret) {
607 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
608 mac->dma_pdev->irq + mac->dma_txch, ret);
609 goto out_tx_int;
610 }
611
612 ret = request_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch,
613 &pasemi_mac_rx_intr, IRQF_DISABLED,
614 mac->rx->irq_name, dev);
615 if (ret) {
616 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
617 mac->dma_pdev->irq + 20 + mac->dma_rxch, ret);
618 goto out_rx_int;
619 }
620
621 return 0;
622
623out_rx_int:
624 free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
625out_tx_int:
626 netif_poll_disable(dev);
627 netif_stop_queue(dev);
628 pasemi_mac_free_tx_resources(dev);
629out_tx_resources:
630 pasemi_mac_free_rx_resources(dev);
631out_rx_resources:
632
633 return ret;
634}
635
636#define MAX_RETRIES 5000
637
638static int pasemi_mac_close(struct net_device *dev)
639{
640 struct pasemi_mac *mac = netdev_priv(dev);
641 unsigned int stat;
642 int retries;
643
644 netif_stop_queue(dev);
645
646 /* Clean out any pending buffers */
647 pasemi_mac_clean_tx(mac);
648 pasemi_mac_clean_rx(mac, RX_RING_SIZE);
649
650 /* Disable interface */
651 pci_write_config_dword(mac->dma_pdev,
652 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
653 PAS_DMA_TXCHAN_TCMDSTA_ST);
654 pci_write_config_dword(mac->dma_pdev,
655 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
656 PAS_DMA_RXINT_RCMDSTA_ST);
657 pci_write_config_dword(mac->dma_pdev,
658 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
659 PAS_DMA_RXCHAN_CCMDSTA_ST);
660
661 for (retries = 0; retries < MAX_RETRIES; retries++) {
662 pci_read_config_dword(mac->dma_pdev,
663 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
664 &stat);
665 if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
666 break;
667 cond_resched();
668 }
669
670 if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
671 dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
672 }
673
674 for (retries = 0; retries < MAX_RETRIES; retries++) {
675 pci_read_config_dword(mac->dma_pdev,
676 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
677 &stat);
678 if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)
679 break;
680 cond_resched();
681 }
682
683 if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
684 dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
685 }
686
687 for (retries = 0; retries < MAX_RETRIES; retries++) {
688 pci_read_config_dword(mac->dma_pdev,
689 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
690 &stat);
691 if (stat & PAS_DMA_RXINT_RCMDSTA_ACT)
692 break;
693 cond_resched();
694 }
695
696 if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT)) {
697 dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
698 }
699
700 /* Then, disable the channel. This must be done separately from
701 * stopping, since you can't disable when active.
702 */
703
704 pci_write_config_dword(mac->dma_pdev,
705 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
706 pci_write_config_dword(mac->dma_pdev,
707 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
708 pci_write_config_dword(mac->dma_pdev,
709 PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
710
711 free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
712 free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev);
713
714 /* Free resources */
715 pasemi_mac_free_rx_resources(dev);
716 pasemi_mac_free_tx_resources(dev);
717
718 return 0;
719}
720
721static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
722{
723 struct pasemi_mac *mac = netdev_priv(dev);
724 struct pasemi_mac_txring *txring;
725 struct pasemi_mac_buffer *info;
726 struct pas_dma_xct_descr *dp;
727 u64 dflags;
728 dma_addr_t map;
729 int flags;
730
731 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
732
733 if (skb->ip_summed == CHECKSUM_PARTIAL) {
734 switch (skb->nh.iph->protocol) {
735 case IPPROTO_TCP:
736 dflags |= XCT_MACTX_CSUM_TCP;
737 dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
738 dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
739 break;
740 case IPPROTO_UDP:
741 dflags |= XCT_MACTX_CSUM_UDP;
742 dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
743 dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
744 break;
745 }
746 }
747
748 map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
749
750 if (dma_mapping_error(map))
751 return NETDEV_TX_BUSY;
752
753 txring = mac->tx;
754
755 spin_lock_irqsave(&txring->lock, flags);
756
757 if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) {
758 spin_unlock_irqrestore(&txring->lock, flags);
759 pasemi_mac_clean_tx(mac);
760 spin_lock_irqsave(&txring->lock, flags);
761
762 if (txring->next_to_clean - txring->next_to_use ==
763 TX_RING_SIZE) {
764 /* Still no room -- stop the queue and wait for tx
765 * intr when there's room.
766 */
767 netif_stop_queue(dev);
768 goto out_err;
769 }
770 }
771
772
773 dp = &TX_DESC(mac, txring->next_to_use);
774 info = &TX_DESC_INFO(mac, txring->next_to_use);
775
776 dp->mactx = dflags | XCT_MACTX_LLEN(skb->len);
777 dp->ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
778 info->dma = map;
779 info->skb = skb;
780
781 txring->next_to_use++;
782 mac->stats.tx_packets++;
783 mac->stats.tx_bytes += skb->len;
784
785 spin_unlock_irqrestore(&txring->lock, flags);
786
787 pci_write_config_dword(mac->dma_pdev,
788 PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
789
790 return NETDEV_TX_OK;
791
792out_err:
793 spin_unlock_irqrestore(&txring->lock, flags);
794 pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE);
795 return NETDEV_TX_BUSY;
796}
797
798static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
799{
800 struct pasemi_mac *mac = netdev_priv(dev);
801
802 return &mac->stats;
803}
804
805static void pasemi_mac_set_rx_mode(struct net_device *dev)
806{
807 struct pasemi_mac *mac = netdev_priv(dev);
808 unsigned int flags;
809
810 pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
811
812 /* Set promiscuous */
813 if (dev->flags & IFF_PROMISC)
814 flags |= PAS_MAC_CFG_PCFG_PR;
815 else
816 flags &= ~PAS_MAC_CFG_PCFG_PR;
817
818 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
819}
820
821
822static int pasemi_mac_poll(struct net_device *dev, int *budget)
823{
824 int pkts, limit = min(*budget, dev->quota);
825 struct pasemi_mac *mac = netdev_priv(dev);
826
827 pkts = pasemi_mac_clean_rx(mac, limit);
828
829 if (pkts < limit) {
830 /* all done, no more packets present */
831 netif_rx_complete(dev);
832
833 /* re-enable receive interrupts */
834 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
835 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
836 return 0;
837 } else {
838 /* used up our quantum, so reschedule */
839 dev->quota -= pkts;
840 *budget -= pkts;
841 return 1;
842 }
843}
844
845static int __devinit
846pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
847{
848 static int index = 0;
849 struct net_device *dev;
850 struct pasemi_mac *mac;
851 int err;
852
853 err = pci_enable_device(pdev);
854 if (err)
855 return err;
856
857 dev = alloc_etherdev(sizeof(struct pasemi_mac));
858 if (dev == NULL) {
859 dev_err(&pdev->dev,
860 "pasemi_mac: Could not allocate ethernet device.\n");
861 err = -ENOMEM;
862 goto out_disable_device;
863 }
864
865 SET_MODULE_OWNER(dev);
866 pci_set_drvdata(pdev, dev);
867 SET_NETDEV_DEV(dev, &pdev->dev);
868
869 mac = netdev_priv(dev);
870
871 mac->pdev = pdev;
872 mac->netdev = dev;
873 mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
874
875 if (!mac->dma_pdev) {
876 dev_err(&pdev->dev, "Can't find DMA Controller\n");
877 err = -ENODEV;
878 goto out_free_netdev;
879 }
880
881 mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
882
883 if (!mac->iob_pdev) {
884 dev_err(&pdev->dev, "Can't find I/O Bridge\n");
885 err = -ENODEV;
886 goto out_put_dma_pdev;
887 }
888
889 /* These should come out of the device tree eventually */
890 mac->dma_txch = index;
891 mac->dma_rxch = index;
892
893 /* We probe GMAC before XAUI, but the DMA interfaces are
894 * in XAUI, GMAC order.
895 */
896 if (index < 4)
897 mac->dma_if = index + 2;
898 else
899 mac->dma_if = index - 4;
900 index++;
901
902 switch (pdev->device) {
903 case 0xa005:
904 mac->type = MAC_TYPE_GMAC;
905 break;
906 case 0xa006:
907 mac->type = MAC_TYPE_XAUI;
908 break;
909 default:
910 err = -ENODEV;
911 goto out;
912 }
913
914 /* get mac addr from device tree */
915 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
916 err = -ENODEV;
917 goto out;
918 }
919 memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
920
921 dev->open = pasemi_mac_open;
922 dev->stop = pasemi_mac_close;
923 dev->hard_start_xmit = pasemi_mac_start_tx;
924 dev->get_stats = pasemi_mac_get_stats;
925 dev->set_multicast_list = pasemi_mac_set_rx_mode;
926 dev->weight = 64;
927 dev->poll = pasemi_mac_poll;
928 dev->features = NETIF_F_HW_CSUM;
929
930 /* The dma status structure is located in the I/O bridge, and
931 * is cache coherent.
932 */
933 if (!dma_status)
934 /* XXXOJN This should come from the device tree */
935 dma_status = __ioremap(0xfd800000, 0x1000, 0);
936
937 mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
938 mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
939
940 err = register_netdev(dev);
941
942 if (err) {
943 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
944 err);
945 goto out;
946 } else
947 printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
948 "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
949 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
950 mac->dma_if, mac->dma_txch, mac->dma_rxch,
951 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
952 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
953
954 return err;
955
956out:
957 pci_dev_put(mac->iob_pdev);
958out_put_dma_pdev:
959 pci_dev_put(mac->dma_pdev);
960out_free_netdev:
961 free_netdev(dev);
962out_disable_device:
963 pci_disable_device(pdev);
964 return err;
965
966}
967
968static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
969{
970 struct net_device *netdev = pci_get_drvdata(pdev);
971 struct pasemi_mac *mac;
972
973 if (!netdev)
974 return;
975
976 mac = netdev_priv(netdev);
977
978 unregister_netdev(netdev);
979
980 pci_disable_device(pdev);
981 pci_dev_put(mac->dma_pdev);
982 pci_dev_put(mac->iob_pdev);
983
984 pci_set_drvdata(pdev, NULL);
985 free_netdev(netdev);
986}
987
988static struct pci_device_id pasemi_mac_pci_tbl[] = {
989 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
990 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
991};
992
993MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
994
995static struct pci_driver pasemi_mac_driver = {
996 .name = "pasemi_mac",
997 .id_table = pasemi_mac_pci_tbl,
998 .probe = pasemi_mac_probe,
999 .remove = __devexit_p(pasemi_mac_remove),
1000};
1001
1002static void __exit pasemi_mac_cleanup_module(void)
1003{
1004 pci_unregister_driver(&pasemi_mac_driver);
1005 __iounmap(dma_status);
1006 dma_status = NULL;
1007}
1008
1009int pasemi_mac_init_module(void)
1010{
1011 return pci_register_driver(&pasemi_mac_driver);
1012}
1013
1014MODULE_LICENSE("GPL");
1015MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
1016MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
1017
1018module_init(pasemi_mac_init_module);
1019module_exit(pasemi_mac_cleanup_module);
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h
new file mode 100644
index 000000000000..c3e37e46a18a
--- /dev/null
+++ b/drivers/net/pasemi_mac.h
@@ -0,0 +1,460 @@
1/*
2 * Copyright (C) 2006 PA Semi, Inc
3 *
4 * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and
5 * hardware register layouts.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef PASEMI_MAC_H
22#define PASEMI_MAC_H
23
24#include <linux/ethtool.h>
25#include <linux/netdevice.h>
26#include <linux/spinlock.h>
27
28struct pasemi_mac_txring {
29 spinlock_t lock;
30 struct pas_dma_xct_descr *desc;
31 dma_addr_t dma;
32 unsigned int size;
33 unsigned int next_to_use;
34 unsigned int next_to_clean;
35 struct pasemi_mac_buffer *desc_info;
36 char irq_name[10]; /* "eth%d tx" */
37};
38
39struct pasemi_mac_rxring {
40 spinlock_t lock;
41 struct pas_dma_xct_descr *desc; /* RX channel descriptor ring */
42 dma_addr_t dma;
43 u64 *buffers; /* RX interface buffer ring */
44 dma_addr_t buf_dma;
45 unsigned int size;
46 unsigned int next_to_fill;
47 unsigned int next_to_clean;
48 struct pasemi_mac_buffer *desc_info;
49 char irq_name[10]; /* "eth%d rx" */
50};
51
52struct pasemi_mac {
53 struct net_device *netdev;
54 struct pci_dev *pdev;
55 struct pci_dev *dma_pdev;
56 struct pci_dev *iob_pdev;
57 struct net_device_stats stats;
58
59 /* Pointer to the cacheable per-channel status registers */
60 u64 *rx_status;
61 u64 *tx_status;
62
63 u8 type;
64#define MAC_TYPE_GMAC 1
65#define MAC_TYPE_XAUI 2
66 u32 dma_txch;
67 u32 dma_if;
68 u32 dma_rxch;
69
70 u8 mac_addr[6];
71
72 struct timer_list rxtimer;
73
74 struct pasemi_mac_txring *tx;
75 struct pasemi_mac_rxring *rx;
76};
77
78/* Software status descriptor (desc_info) */
79struct pasemi_mac_buffer {
80 struct sk_buff *skb;
81 dma_addr_t dma;
82};
83
84
85/* status register layout in IOB region, at 0xfb800000 */
86struct pasdma_status {
87 u64 rx_sta[64];
88 u64 tx_sta[20];
89};
90
91/* descriptor structure */
92struct pas_dma_xct_descr {
93 union {
94 u64 mactx;
95 u64 macrx;
96 };
97 union {
98 u64 ptr;
99 u64 rxb;
100 };
101};
102
103/* MAC CFG register offsets */
104
105enum {
106 PAS_MAC_CFG_PCFG = 0x80,
107 PAS_MAC_CFG_TXP = 0x98,
108 PAS_MAC_IPC_CHNL = 0x208,
109};
110
111/* MAC CFG register fields */
112#define PAS_MAC_CFG_PCFG_PE 0x80000000
113#define PAS_MAC_CFG_PCFG_CE 0x40000000
114#define PAS_MAC_CFG_PCFG_BU 0x20000000
115#define PAS_MAC_CFG_PCFG_TT 0x10000000
116#define PAS_MAC_CFG_PCFG_TSR_M 0x0c000000
117#define PAS_MAC_CFG_PCFG_TSR_10M 0x00000000
118#define PAS_MAC_CFG_PCFG_TSR_100M 0x04000000
119#define PAS_MAC_CFG_PCFG_TSR_1G 0x08000000
120#define PAS_MAC_CFG_PCFG_TSR_10G 0x0c000000
121#define PAS_MAC_CFG_PCFG_T24 0x02000000
122#define PAS_MAC_CFG_PCFG_PR 0x01000000
123#define PAS_MAC_CFG_PCFG_CRO_M 0x00ff0000
124#define PAS_MAC_CFG_PCFG_CRO_S 16
125#define PAS_MAC_CFG_PCFG_IPO_M 0x0000ff00
126#define PAS_MAC_CFG_PCFG_IPO_S 8
127#define PAS_MAC_CFG_PCFG_S1 0x00000080
128#define PAS_MAC_CFG_PCFG_IO_M 0x00000060
129#define PAS_MAC_CFG_PCFG_IO_MAC 0x00000000
130#define PAS_MAC_CFG_PCFG_IO_OFF 0x00000020
131#define PAS_MAC_CFG_PCFG_IO_IND_ETH 0x00000040
132#define PAS_MAC_CFG_PCFG_IO_IND_IP 0x00000060
133#define PAS_MAC_CFG_PCFG_LP 0x00000010
134#define PAS_MAC_CFG_PCFG_TS 0x00000008
135#define PAS_MAC_CFG_PCFG_HD 0x00000004
136#define PAS_MAC_CFG_PCFG_SPD_M 0x00000003
137#define PAS_MAC_CFG_PCFG_SPD_10M 0x00000000
138#define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001
139#define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002
140#define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003
141#define PAS_MAC_CFG_TXP_FCF 0x01000000
142#define PAS_MAC_CFG_TXP_FCE 0x00800000
143#define PAS_MAC_CFG_TXP_FC 0x00400000
144#define PAS_MAC_CFG_TXP_FPC_M 0x00300000
145#define PAS_MAC_CFG_TXP_FPC_S 20
146#define PAS_MAC_CFG_TXP_FPC(x) (((x) << PAS_MAC_CFG_TXP_FPC_S) & \
147 PAS_MAC_CFG_TXP_FPC_M)
148#define PAS_MAC_CFG_TXP_RT 0x00080000
149#define PAS_MAC_CFG_TXP_BL 0x00040000
150#define PAS_MAC_CFG_TXP_SL_M 0x00030000
151#define PAS_MAC_CFG_TXP_SL_S 16
152#define PAS_MAC_CFG_TXP_SL(x) (((x) << PAS_MAC_CFG_TXP_SL_S) & \
153 PAS_MAC_CFG_TXP_SL_M)
154#define PAS_MAC_CFG_TXP_COB_M 0x0000f000
155#define PAS_MAC_CFG_TXP_COB_S 12
156#define PAS_MAC_CFG_TXP_COB(x) (((x) << PAS_MAC_CFG_TXP_COB_S) & \
157 PAS_MAC_CFG_TXP_COB_M)
158#define PAS_MAC_CFG_TXP_TIFT_M 0x00000f00
159#define PAS_MAC_CFG_TXP_TIFT_S 8
160#define PAS_MAC_CFG_TXP_TIFT(x) (((x) << PAS_MAC_CFG_TXP_TIFT_S) & \
161 PAS_MAC_CFG_TXP_TIFT_M)
162#define PAS_MAC_CFG_TXP_TIFG_M 0x000000ff
163#define PAS_MAC_CFG_TXP_TIFG_S 0
164#define PAS_MAC_CFG_TXP_TIFG(x) (((x) << PAS_MAC_CFG_TXP_TIFG_S) & \
165 PAS_MAC_CFG_TXP_TIFG_M)
166
167#define PAS_MAC_IPC_CHNL_DCHNO_M 0x003f0000
168#define PAS_MAC_IPC_CHNL_DCHNO_S 16
169#define PAS_MAC_IPC_CHNL_DCHNO(x) (((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \
170 PAS_MAC_IPC_CHNL_DCHNO_M)
171#define PAS_MAC_IPC_CHNL_BCH_M 0x0000003f
172#define PAS_MAC_IPC_CHNL_BCH_S 0
173#define PAS_MAC_IPC_CHNL_BCH(x) (((x) << PAS_MAC_IPC_CHNL_BCH_S) & \
174 PAS_MAC_IPC_CHNL_BCH_M)
175
176/* All these registers live in the PCI configuration space for the DMA PCI
177 * device. Use the normal PCI config access functions for them.
178 */
179enum {
180 PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */
181 PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */
182 PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */
183 PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */
184};
185#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */
186#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */
187#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */
188#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */
189
190
191/* Per-interface and per-channel registers */
192#define _PAS_DMA_RXINT_STRIDE 0x20
193#define PAS_DMA_RXINT_RCMDSTA(i) (0x200+(i)*_PAS_DMA_RXINT_STRIDE)
194#define PAS_DMA_RXINT_RCMDSTA_EN 0x00000001
195#define PAS_DMA_RXINT_RCMDSTA_ST 0x00000002
196#define PAS_DMA_RXINT_RCMDSTA_OO 0x00000100
197#define PAS_DMA_RXINT_RCMDSTA_BP 0x00000200
198#define PAS_DMA_RXINT_RCMDSTA_DR 0x00000400
199#define PAS_DMA_RXINT_RCMDSTA_BT 0x00000800
200#define PAS_DMA_RXINT_RCMDSTA_TB 0x00001000
201#define PAS_DMA_RXINT_RCMDSTA_ACT 0x00010000
202#define PAS_DMA_RXINT_RCMDSTA_DROPS_M 0xfffe0000
203#define PAS_DMA_RXINT_RCMDSTA_DROPS_S 17
204#define PAS_DMA_RXINT_INCR(i) (0x210+(i)*_PAS_DMA_RXINT_STRIDE)
205#define PAS_DMA_RXINT_INCR_INCR_M 0x0000ffff
206#define PAS_DMA_RXINT_INCR_INCR_S 0
207#define PAS_DMA_RXINT_INCR_INCR(x) ((x) & 0x0000ffff)
208#define PAS_DMA_RXINT_BASEL(i) (0x218+(i)*_PAS_DMA_RXINT_STRIDE)
209#define PAS_DMA_RXINT_BASEL_BRBL(x) ((x) & ~0x3f)
210#define PAS_DMA_RXINT_BASEU(i) (0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
211#define PAS_DMA_RXINT_BASEU_BRBH(x) ((x) & 0xfff)
212#define PAS_DMA_RXINT_BASEU_SIZ_M 0x3fff0000 /* # of cache lines worth of buffer ring */
213#define PAS_DMA_RXINT_BASEU_SIZ_S 16 /* 0 = 16K */
214#define PAS_DMA_RXINT_BASEU_SIZ(x) (((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
215 PAS_DMA_RXINT_BASEU_SIZ_M)
216
217
218#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */
219#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */
220#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */
221#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */
222#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */
223#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */
224#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */
225#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */
226#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
227#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */
228#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */
229#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */
230#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
231#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */
232#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c
233#define PAS_DMA_TXCHAN_CFG_TATTR_S 2
234#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
235 PAS_DMA_TXCHAN_CFG_TATTR_M)
236#define PAS_DMA_TXCHAN_CFG_WT_M 0x000001c0
237#define PAS_DMA_TXCHAN_CFG_WT_S 6
238#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
239 PAS_DMA_TXCHAN_CFG_WT_M)
240#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */
241#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */
242#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */
243#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
244#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
245#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
246#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0
247#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
248 PAS_DMA_TXCHAN_BASEL_BRBL_M)
249#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
250#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
251#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0
252#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
253 PAS_DMA_TXCHAN_BASEU_BRBH_M)
254/* # of cache lines worth of buffer ring */
255#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000
256#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
257#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
258 PAS_DMA_TXCHAN_BASEU_SIZ_M)
259
260#define _PAS_DMA_RXCHAN_STRIDE 0x20 /* Size per channel */
261#define _PAS_DMA_RXCHAN_CCMDSTA 0x800 /* Command / Status */
262#define _PAS_DMA_RXCHAN_CFG 0x804 /* Configuration */
263#define _PAS_DMA_RXCHAN_INCR 0x810 /* Descriptor increment */
264#define _PAS_DMA_RXCHAN_CNT 0x814 /* Descriptor count/offset */
265#define _PAS_DMA_RXCHAN_BASEL 0x818 /* Descriptor ring base (low) */
266#define _PAS_DMA_RXCHAN_BASEU 0x81c /* (high) */
267#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
268#define PAS_DMA_RXCHAN_CCMDSTA_EN 0x00000001 /* Enabled */
269#define PAS_DMA_RXCHAN_CCMDSTA_ST 0x00000002 /* Stop interface */
270#define PAS_DMA_RXCHAN_CCMDSTA_ACT 0x00010000 /* Active */
271#define PAS_DMA_RXCHAN_CCMDSTA_DU 0x00020000
272#define PAS_DMA_RXCHAN_CFG(c) (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
273#define PAS_DMA_RXCHAN_CFG_HBU_M 0x00000380
274#define PAS_DMA_RXCHAN_CFG_HBU_S 7
275#define PAS_DMA_RXCHAN_CFG_HBU(x) (((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
276 PAS_DMA_RXCHAN_CFG_HBU_M)
277#define PAS_DMA_RXCHAN_INCR(c) (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
278#define PAS_DMA_RXCHAN_BASEL(c) (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
279#define PAS_DMA_RXCHAN_BASEL_BRBL_M 0xffffffc0
280#define PAS_DMA_RXCHAN_BASEL_BRBL_S 0
281#define PAS_DMA_RXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
282 PAS_DMA_RXCHAN_BASEL_BRBL_M)
283#define PAS_DMA_RXCHAN_BASEU(c) (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
284#define PAS_DMA_RXCHAN_BASEU_BRBH_M 0x00000fff
285#define PAS_DMA_RXCHAN_BASEU_BRBH_S 0
286#define PAS_DMA_RXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
287 PAS_DMA_RXCHAN_BASEU_BRBH_M)
288/* # of cache lines worth of buffer ring */
289#define PAS_DMA_RXCHAN_BASEU_SIZ_M 0x3fff0000
290#define PAS_DMA_RXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
291#define PAS_DMA_RXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
292 PAS_DMA_RXCHAN_BASEU_SIZ_M)
293
294#define PAS_STATUS_PCNT_M 0x000000000000ffffull
295#define PAS_STATUS_PCNT_S 0
296#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull
297#define PAS_STATUS_DCNT_S 16
298#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull
299#define PAS_STATUS_BPCNT_S 32
300#define PAS_STATUS_TIMER 0x1000000000000000ull
301#define PAS_STATUS_ERROR 0x2000000000000000ull
302#define PAS_STATUS_SOFT 0x4000000000000000ull
303#define PAS_STATUS_INT 0x8000000000000000ull
304
305#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4)
306#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff
307#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0
308#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
309 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
310#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4)
311#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff
312#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0
313#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
314 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
315#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4)
316#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000
317#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff
318#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0
319#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
320 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
321#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4)
322#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000
323#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff
324#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0
325#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
326 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
327#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4)
328#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000
329#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 0
330#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
331 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
332#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020
333#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010
334#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008
335#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004
336#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002
337#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001
338#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4)
339#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000
340#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 0
341#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
342 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
343#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020
344#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010
345#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008
346#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004
347#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002
348#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001
349
350#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700
351#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff
352#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0
353#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
354 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
355
356/* Transmit descriptor fields */
357#define XCT_MACTX_T 0x8000000000000000ull
358#define XCT_MACTX_ST 0x4000000000000000ull
359#define XCT_MACTX_NORES 0x0000000000000000ull
360#define XCT_MACTX_8BRES 0x1000000000000000ull
361#define XCT_MACTX_24BRES 0x2000000000000000ull
362#define XCT_MACTX_40BRES 0x3000000000000000ull
363#define XCT_MACTX_I 0x0800000000000000ull
364#define XCT_MACTX_O 0x0400000000000000ull
365#define XCT_MACTX_E 0x0200000000000000ull
366#define XCT_MACTX_VLAN_M 0x0180000000000000ull
367#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull
368#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull
369#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull
370#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull
371#define XCT_MACTX_CRC_M 0x0060000000000000ull
372#define XCT_MACTX_CRC_NOP 0x0000000000000000ull
373#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull
374#define XCT_MACTX_CRC_PAD 0x0040000000000000ull
375#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull
376#define XCT_MACTX_SS 0x0010000000000000ull
377#define XCT_MACTX_LLEN_M 0x00007fff00000000ull
378#define XCT_MACTX_LLEN_S 32ull
379#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \
380 XCT_MACTX_LLEN_M)
381#define XCT_MACTX_IPH_M 0x00000000f8000000ull
382#define XCT_MACTX_IPH_S 27ull
383#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \
384 XCT_MACTX_IPH_M)
385#define XCT_MACTX_IPO_M 0x0000000007c00000ull
386#define XCT_MACTX_IPO_S 22ull
387#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \
388 XCT_MACTX_IPO_M)
389#define XCT_MACTX_CSUM_M 0x0000000000000060ull
390#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull
391#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull
392#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull
393#define XCT_MACTX_V6 0x0000000000000010ull
394#define XCT_MACTX_C 0x0000000000000004ull
395#define XCT_MACTX_AL2 0x0000000000000002ull
396
397/* Receive descriptor fields */
398#define XCT_MACRX_T 0x8000000000000000ull
399#define XCT_MACRX_ST 0x4000000000000000ull
400#define XCT_MACRX_NORES 0x0000000000000000ull
401#define XCT_MACRX_8BRES 0x1000000000000000ull
402#define XCT_MACRX_24BRES 0x2000000000000000ull
403#define XCT_MACRX_40BRES 0x3000000000000000ull
404#define XCT_MACRX_O 0x0400000000000000ull
405#define XCT_MACRX_E 0x0200000000000000ull
406#define XCT_MACRX_FF 0x0100000000000000ull
407#define XCT_MACRX_PF 0x0080000000000000ull
408#define XCT_MACRX_OB 0x0040000000000000ull
409#define XCT_MACRX_OD 0x0020000000000000ull
410#define XCT_MACRX_FS 0x0010000000000000ull
411#define XCT_MACRX_NB_M 0x000fc00000000000ull
412#define XCT_MACRX_NB_S 46ULL
413#define XCT_MACRX_NB(x) ((((long)(x)) << XCT_MACRX_NB_S) & \
414 XCT_MACRX_NB_M)
415#define XCT_MACRX_LLEN_M 0x00003fff00000000ull
416#define XCT_MACRX_LLEN_S 32ULL
417#define XCT_MACRX_LLEN(x) ((((long)(x)) << XCT_MACRX_LLEN_S) & \
418 XCT_MACRX_LLEN_M)
419#define XCT_MACRX_CRC 0x0000000080000000ull
420#define XCT_MACRX_LEN_M 0x0000000060000000ull
421#define XCT_MACRX_LEN_TOOSHORT 0x0000000020000000ull
422#define XCT_MACRX_LEN_BELOWMIN 0x0000000040000000ull
423#define XCT_MACRX_LEN_TRUNC 0x0000000060000000ull
424#define XCT_MACRX_CAST_M 0x0000000018000000ull
425#define XCT_MACRX_CAST_UNI 0x0000000000000000ull
426#define XCT_MACRX_CAST_MULTI 0x0000000008000000ull
427#define XCT_MACRX_CAST_BROAD 0x0000000010000000ull
428#define XCT_MACRX_CAST_PAUSE 0x0000000018000000ull
429#define XCT_MACRX_VLC_M 0x0000000006000000ull
430#define XCT_MACRX_FM 0x0000000001000000ull
431#define XCT_MACRX_HTY_M 0x0000000000c00000ull
432#define XCT_MACRX_HTY_IPV4_OK 0x0000000000000000ull
433#define XCT_MACRX_HTY_IPV6 0x0000000000400000ull
434#define XCT_MACRX_HTY_IPV4_BAD 0x0000000000800000ull
435#define XCT_MACRX_HTY_NONIP 0x0000000000c00000ull
436#define XCT_MACRX_IPP_M 0x00000000003f0000ull
437#define XCT_MACRX_IPP_S 16
438#define XCT_MACRX_CSUM_M 0x000000000000ffffull
439#define XCT_MACRX_CSUM_S 0
440
441#define XCT_PTR_T 0x8000000000000000ull
442#define XCT_PTR_LEN_M 0x7ffff00000000000ull
443#define XCT_PTR_LEN_S 44
444#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \
445 XCT_PTR_LEN_M)
446#define XCT_PTR_ADDR_M 0x00000fffffffffffull
447#define XCT_PTR_ADDR_S 0
448#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \
449 XCT_PTR_ADDR_M)
450
451/* Receive interface buffer fields */
452#define XCT_RXB_LEN_M 0x0ffff00000000000ull
453#define XCT_RXB_LEN_S 44
454#define XCT_RXB_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
455#define XCT_RXB_ADDR_M 0x00000fffffffffffull
456#define XCT_RXB_ADDR_S 0
457#define XCT_RXB_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
458
459
460#endif /* PASEMI_MAC_H */
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 8844c20eac2d..2429b274f0b0 100644..100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -22,6 +22,7 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/ip.h> 24#include <linux/ip.h>
25#include <linux/in.h>
25#include <linux/if_arp.h> 26#include <linux/if_arp.h>
26#include <linux/if_ether.h> 27#include <linux/if_ether.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
@@ -63,6 +64,7 @@ MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63 64
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { 65static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
67 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
66 /* required last entry */ 68 /* required last entry */
67 {0,} 69 {0,}
68}; 70};
@@ -1475,6 +1477,10 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
1475 2) << 7)) 1477 2) << 7))
1476 return -1; 1478 return -1;
1477 1479
1480 if (qdev->device_id == QL3032_DEVICE_ID)
1481 ql_write_page0_reg(qdev,
1482 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1483
1478 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1484 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1479 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1485 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1480 1486
@@ -1706,18 +1712,42 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1706 struct ob_mac_iocb_rsp *mac_rsp) 1712 struct ob_mac_iocb_rsp *mac_rsp)
1707{ 1713{
1708 struct ql_tx_buf_cb *tx_cb; 1714 struct ql_tx_buf_cb *tx_cb;
1715 int i;
1709 1716
1710 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1717 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1711 pci_unmap_single(qdev->pdev, 1718 pci_unmap_single(qdev->pdev,
1712 pci_unmap_addr(tx_cb, mapaddr), 1719 pci_unmap_addr(&tx_cb->map[0], mapaddr),
1713 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); 1720 pci_unmap_len(&tx_cb->map[0], maplen),
1714 dev_kfree_skb_irq(tx_cb->skb); 1721 PCI_DMA_TODEVICE);
1722 tx_cb->seg_count--;
1723 if (tx_cb->seg_count) {
1724 for (i = 1; i < tx_cb->seg_count; i++) {
1725 pci_unmap_page(qdev->pdev,
1726 pci_unmap_addr(&tx_cb->map[i],
1727 mapaddr),
1728 pci_unmap_len(&tx_cb->map[i], maplen),
1729 PCI_DMA_TODEVICE);
1730 }
1731 }
1715 qdev->stats.tx_packets++; 1732 qdev->stats.tx_packets++;
1716 qdev->stats.tx_bytes += tx_cb->skb->len; 1733 qdev->stats.tx_bytes += tx_cb->skb->len;
1734 dev_kfree_skb_irq(tx_cb->skb);
1717 tx_cb->skb = NULL; 1735 tx_cb->skb = NULL;
1718 atomic_inc(&qdev->tx_count); 1736 atomic_inc(&qdev->tx_count);
1719} 1737}
1720 1738
1739/*
1740 * The difference between 3022 and 3032 for inbound completions:
1741 * 3022 uses two buffers per completion. The first buffer contains
1742 * (some) header info, the second the remainder of the headers plus
1743 * the data. For this chip we reserve some space at the top of the
1744 * receive buffer so that the header info in buffer one can be
1745 * prepended to the buffer two. Buffer two is the sent up while
1746 * buffer one is returned to the hardware to be reused.
1747 * 3032 receives all of it's data and headers in one buffer for a
1748 * simpler process. 3032 also supports checksum verification as
1749 * can be seen in ql_process_macip_rx_intr().
1750 */
1721static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 1751static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1722 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 1752 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1723{ 1753{
@@ -1740,14 +1770,17 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1740 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1770 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1741 qdev->small_buf_release_cnt++; 1771 qdev->small_buf_release_cnt++;
1742 1772
1743 /* start of first buffer */ 1773 if (qdev->device_id == QL3022_DEVICE_ID) {
1744 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1774 /* start of first buffer (3022 only) */
1745 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; 1775 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1746 qdev->lrg_buf_release_cnt++; 1776 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1747 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1777 qdev->lrg_buf_release_cnt++;
1748 qdev->lrg_buf_index = 0; 1778 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
1749 curr_ial_ptr++; /* 64-bit pointers require two incs. */ 1779 qdev->lrg_buf_index = 0;
1750 curr_ial_ptr++; 1780 }
1781 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1782 curr_ial_ptr++;
1783 }
1751 1784
1752 /* start of second buffer */ 1785 /* start of second buffer */
1753 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1786 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1778,7 +1811,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1778 qdev->ndev->last_rx = jiffies; 1811 qdev->ndev->last_rx = jiffies;
1779 lrg_buf_cb2->skb = NULL; 1812 lrg_buf_cb2->skb = NULL;
1780 1813
1781 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 1814 if (qdev->device_id == QL3022_DEVICE_ID)
1815 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1782 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 1816 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1783} 1817}
1784 1818
@@ -1790,7 +1824,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1790 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1824 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1791 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1825 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1792 u32 *curr_ial_ptr; 1826 u32 *curr_ial_ptr;
1793 struct sk_buff *skb1, *skb2; 1827 struct sk_buff *skb1 = NULL, *skb2;
1794 struct net_device *ndev = qdev->ndev; 1828 struct net_device *ndev = qdev->ndev;
1795 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 1829 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
1796 u16 size = 0; 1830 u16 size = 0;
@@ -1806,16 +1840,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1806 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1840 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1807 qdev->small_buf_release_cnt++; 1841 qdev->small_buf_release_cnt++;
1808 1842
1809 /* start of first buffer */ 1843 if (qdev->device_id == QL3022_DEVICE_ID) {
1810 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1844 /* start of first buffer on 3022 */
1811 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; 1845 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1812 1846 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1813 qdev->lrg_buf_release_cnt++; 1847 qdev->lrg_buf_release_cnt++;
1814 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1848 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1815 qdev->lrg_buf_index = 0; 1849 qdev->lrg_buf_index = 0;
1816 skb1 = lrg_buf_cb1->skb; 1850 skb1 = lrg_buf_cb1->skb;
1817 curr_ial_ptr++; /* 64-bit pointers require two incs. */ 1851 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1818 curr_ial_ptr++; 1852 curr_ial_ptr++;
1853 size = ETH_HLEN;
1854 if (*((u16 *) skb1->data) != 0xFFFF)
1855 size += VLAN_ETH_HLEN - ETH_HLEN;
1856 }
1819 1857
1820 /* start of second buffer */ 1858 /* start of second buffer */
1821 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1859 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1825,18 +1863,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1825 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1863 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1826 qdev->lrg_buf_index = 0; 1864 qdev->lrg_buf_index = 0;
1827 1865
1828 qdev->stats.rx_packets++;
1829 qdev->stats.rx_bytes += length;
1830
1831 /*
1832 * Copy the ethhdr from first buffer to second. This
1833 * is necessary for IP completions.
1834 */
1835 if (*((u16 *) skb1->data) != 0xFFFF)
1836 size = VLAN_ETH_HLEN;
1837 else
1838 size = ETH_HLEN;
1839
1840 skb_put(skb2, length); /* Just the second buffer length here. */ 1866 skb_put(skb2, length); /* Just the second buffer length here. */
1841 pci_unmap_single(qdev->pdev, 1867 pci_unmap_single(qdev->pdev,
1842 pci_unmap_addr(lrg_buf_cb2, mapaddr), 1868 pci_unmap_addr(lrg_buf_cb2, mapaddr),
@@ -1844,16 +1870,40 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1844 PCI_DMA_FROMDEVICE); 1870 PCI_DMA_FROMDEVICE);
1845 prefetch(skb2->data); 1871 prefetch(skb2->data);
1846 1872
1847 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1848 skb2->dev = qdev->ndev;
1849 skb2->ip_summed = CHECKSUM_NONE; 1873 skb2->ip_summed = CHECKSUM_NONE;
1874 if (qdev->device_id == QL3022_DEVICE_ID) {
1875 /*
1876 * Copy the ethhdr from first buffer to second. This
1877 * is necessary for 3022 IP completions.
1878 */
1879 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1880 } else {
1881 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
1882 if (checksum &
1883 (IB_IP_IOCB_RSP_3032_ICE |
1884 IB_IP_IOCB_RSP_3032_CE |
1885 IB_IP_IOCB_RSP_3032_NUC)) {
1886 printk(KERN_ERR
1887 "%s: Bad checksum for this %s packet, checksum = %x.\n",
1888 __func__,
1889 ((checksum &
1890 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
1891 "UDP"),checksum);
1892 } else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
1893 skb2->ip_summed = CHECKSUM_UNNECESSARY;
1894 }
1895 }
1896 skb2->dev = qdev->ndev;
1850 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 1897 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1851 1898
1852 netif_receive_skb(skb2); 1899 netif_receive_skb(skb2);
1900 qdev->stats.rx_packets++;
1901 qdev->stats.rx_bytes += length;
1853 ndev->last_rx = jiffies; 1902 ndev->last_rx = jiffies;
1854 lrg_buf_cb2->skb = NULL; 1903 lrg_buf_cb2->skb = NULL;
1855 1904
1856 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 1905 if (qdev->device_id == QL3022_DEVICE_ID)
1906 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1857 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 1907 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1858} 1908}
1859 1909
@@ -1880,12 +1930,14 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1880 break; 1930 break;
1881 1931
1882 case OPCODE_IB_MAC_IOCB: 1932 case OPCODE_IB_MAC_IOCB:
1933 case OPCODE_IB_3032_MAC_IOCB:
1883 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 1934 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
1884 net_rsp); 1935 net_rsp);
1885 (*rx_cleaned)++; 1936 (*rx_cleaned)++;
1886 break; 1937 break;
1887 1938
1888 case OPCODE_IB_IP_IOCB: 1939 case OPCODE_IB_IP_IOCB:
1940 case OPCODE_IB_3032_IP_IOCB:
1889 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 1941 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
1890 net_rsp); 1942 net_rsp);
1891 (*rx_cleaned)++; 1943 (*rx_cleaned)++;
@@ -2032,13 +2084,96 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2032 return IRQ_RETVAL(handled); 2084 return IRQ_RETVAL(handled);
2033} 2085}
2034 2086
2087/*
2088 * Get the total number of segments needed for the
2089 * given number of fragments. This is necessary because
2090 * outbound address lists (OAL) will be used when more than
2091 * two frags are given. Each address list has 5 addr/len
2092 * pairs. The 5th pair in each AOL is used to point to
2093 * the next AOL if more frags are coming.
2094 * That is why the frags:segment count ratio is not linear.
2095 */
2096static int ql_get_seg_count(unsigned short frags)
2097{
2098 switch(frags) {
2099 case 0: return 1; /* just the skb->data seg */
2100 case 1: return 2; /* skb->data + 1 frag */
2101 case 2: return 3; /* skb->data + 2 frags */
2102 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2103 case 4: return 6;
2104 case 5: return 7;
2105 case 6: return 8;
2106 case 7: return 10;
2107 case 8: return 11;
2108 case 9: return 12;
2109 case 10: return 13;
2110 case 11: return 15;
2111 case 12: return 16;
2112 case 13: return 17;
2113 case 14: return 18;
2114 case 15: return 20;
2115 case 16: return 21;
2116 case 17: return 22;
2117 case 18: return 23;
2118 }
2119 return -1;
2120}
2121
2122static void ql_hw_csum_setup(struct sk_buff *skb,
2123 struct ob_mac_iocb_req *mac_iocb_ptr)
2124{
2125 struct ethhdr *eth;
2126 struct iphdr *ip = NULL;
2127 u8 offset = ETH_HLEN;
2128
2129 eth = (struct ethhdr *)(skb->data);
2130
2131 if (eth->h_proto == __constant_htons(ETH_P_IP)) {
2132 ip = (struct iphdr *)&skb->data[ETH_HLEN];
2133 } else if (eth->h_proto == htons(ETH_P_8021Q) &&
2134 ((struct vlan_ethhdr *)skb->data)->
2135 h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
2136 ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
2137 offset = VLAN_ETH_HLEN;
2138 }
2139
2140 if (ip) {
2141 if (ip->protocol == IPPROTO_TCP) {
2142 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
2143 mac_iocb_ptr->ip_hdr_off = offset;
2144 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2145 } else if (ip->protocol == IPPROTO_UDP) {
2146 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
2147 mac_iocb_ptr->ip_hdr_off = offset;
2148 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2149 }
2150 }
2151}
2152
2153/*
2154 * The difference between 3022 and 3032 sends:
2155 * 3022 only supports a simple single segment transmission.
2156 * 3032 supports checksumming and scatter/gather lists (fragments).
2157 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2158 * in the IOCB plus a chain of outbound address lists (OAL) that
2159 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2160 * will used to point to an OAL when more ALP entries are required.
2161 * The IOCB is always the top of the chain followed by one or more
2162 * OALs (when necessary).
2163 */
2035static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2164static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2036{ 2165{
2037 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2166 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2038 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2167 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2039 struct ql_tx_buf_cb *tx_cb; 2168 struct ql_tx_buf_cb *tx_cb;
2169 u32 tot_len = skb->len;
2170 struct oal *oal;
2171 struct oal_entry *oal_entry;
2172 int len;
2040 struct ob_mac_iocb_req *mac_iocb_ptr; 2173 struct ob_mac_iocb_req *mac_iocb_ptr;
2041 u64 map; 2174 u64 map;
2175 int seg_cnt, seg = 0;
2176 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2042 2177
2043 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2178 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2044 if (!netif_queue_stopped(ndev)) 2179 if (!netif_queue_stopped(ndev))
@@ -2046,21 +2181,79 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2046 return NETDEV_TX_BUSY; 2181 return NETDEV_TX_BUSY;
2047 } 2182 }
2048 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2183 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2184 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
2185 if(seg_cnt == -1) {
2186 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2187 return NETDEV_TX_OK;
2188
2189 }
2049 mac_iocb_ptr = tx_cb->queue_entry; 2190 mac_iocb_ptr = tx_cb->queue_entry;
2050 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2191 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2051 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2192 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2052 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2193 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2053 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2194 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2054 mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len); 2195 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2055 tx_cb->skb = skb; 2196 tx_cb->skb = skb;
2056 map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 2197 if (skb->ip_summed == CHECKSUM_PARTIAL)
2057 mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map)); 2198 ql_hw_csum_setup(skb, mac_iocb_ptr);
2058 mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map)); 2199 len = skb_headlen(skb);
2059 mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E); 2200 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2060 pci_unmap_addr_set(tx_cb, mapaddr, map); 2201 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2061 pci_unmap_len_set(tx_cb, maplen, skb->len); 2202 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2062 atomic_dec(&qdev->tx_count); 2203 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2204 oal_entry->len = cpu_to_le32(len);
2205 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2206 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2207 seg++;
2208
2209 if (!skb_shinfo(skb)->nr_frags) {
2210 /* Terminate the last segment. */
2211 oal_entry->len =
2212 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2213 } else {
2214 int i;
2215 oal = tx_cb->oal;
2216 for (i=0; i<frag_cnt; i++,seg++) {
2217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2218 oal_entry++;
2219 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2220 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2221 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2222 (seg == 17 && seg_cnt > 18)) {
2223 /* Continuation entry points to outbound address list. */
2224 map = pci_map_single(qdev->pdev, oal,
2225 sizeof(struct oal),
2226 PCI_DMA_TODEVICE);
2227 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2228 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2229 oal_entry->len =
2230 cpu_to_le32(sizeof(struct oal) |
2231 OAL_CONT_ENTRY);
2232 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2233 map);
2234 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2235 len);
2236 oal_entry = (struct oal_entry *)oal;
2237 oal++;
2238 seg++;
2239 }
2063 2240
2241 map =
2242 pci_map_page(qdev->pdev, frag->page,
2243 frag->page_offset, frag->size,
2244 PCI_DMA_TODEVICE);
2245 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2246 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2247 oal_entry->len = cpu_to_le32(frag->size);
2248 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2249 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2250 frag->size);
2251 }
2252 /* Terminate the last segment. */
2253 oal_entry->len =
2254 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2255 }
2256 wmb();
2064 qdev->req_producer_index++; 2257 qdev->req_producer_index++;
2065 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2258 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2066 qdev->req_producer_index = 0; 2259 qdev->req_producer_index = 0;
@@ -2074,8 +2267,10 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2074 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", 2267 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2075 ndev->name, qdev->req_producer_index, skb->len); 2268 ndev->name, qdev->req_producer_index, skb->len);
2076 2269
2270 atomic_dec(&qdev->tx_count);
2077 return NETDEV_TX_OK; 2271 return NETDEV_TX_OK;
2078} 2272}
2273
2079static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2274static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2080{ 2275{
2081 qdev->req_q_size = 2276 qdev->req_q_size =
@@ -2359,7 +2554,22 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2359 return 0; 2554 return 0;
2360} 2555}
2361 2556
2362static void ql_create_send_free_list(struct ql3_adapter *qdev) 2557static void ql_free_send_free_list(struct ql3_adapter *qdev)
2558{
2559 struct ql_tx_buf_cb *tx_cb;
2560 int i;
2561
2562 tx_cb = &qdev->tx_buf[0];
2563 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2564 if (tx_cb->oal) {
2565 kfree(tx_cb->oal);
2566 tx_cb->oal = NULL;
2567 }
2568 tx_cb++;
2569 }
2570}
2571
2572static int ql_create_send_free_list(struct ql3_adapter *qdev)
2363{ 2573{
2364 struct ql_tx_buf_cb *tx_cb; 2574 struct ql_tx_buf_cb *tx_cb;
2365 int i; 2575 int i;
@@ -2368,11 +2578,16 @@ static void ql_create_send_free_list(struct ql3_adapter *qdev)
2368 2578
2369 /* Create free list of transmit buffers */ 2579 /* Create free list of transmit buffers */
2370 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2580 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2581
2371 tx_cb = &qdev->tx_buf[i]; 2582 tx_cb = &qdev->tx_buf[i];
2372 tx_cb->skb = NULL; 2583 tx_cb->skb = NULL;
2373 tx_cb->queue_entry = req_q_curr; 2584 tx_cb->queue_entry = req_q_curr;
2374 req_q_curr++; 2585 req_q_curr++;
2586 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2587 if (tx_cb->oal == NULL)
2588 return -1;
2375 } 2589 }
2590 return 0;
2376} 2591}
2377 2592
2378static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2593static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
@@ -2447,12 +2662,14 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2447 2662
2448 /* Initialize the large buffer queue. */ 2663 /* Initialize the large buffer queue. */
2449 ql_init_large_buffers(qdev); 2664 ql_init_large_buffers(qdev);
2450 ql_create_send_free_list(qdev); 2665 if (ql_create_send_free_list(qdev))
2666 goto err_free_list;
2451 2667
2452 qdev->rsp_current = qdev->rsp_q_virt_addr; 2668 qdev->rsp_current = qdev->rsp_q_virt_addr;
2453 2669
2454 return 0; 2670 return 0;
2455 2671err_free_list:
2672 ql_free_send_free_list(qdev);
2456err_small_buffers: 2673err_small_buffers:
2457 ql_free_buffer_queues(qdev); 2674 ql_free_buffer_queues(qdev);
2458err_buffer_queues: 2675err_buffer_queues:
@@ -2468,6 +2685,7 @@ err_req_rsp:
2468 2685
2469static void ql_free_mem_resources(struct ql3_adapter *qdev) 2686static void ql_free_mem_resources(struct ql3_adapter *qdev)
2470{ 2687{
2688 ql_free_send_free_list(qdev);
2471 ql_free_large_buffers(qdev); 2689 ql_free_large_buffers(qdev);
2472 ql_free_small_buffers(qdev); 2690 ql_free_small_buffers(qdev);
2473 ql_free_buffer_queues(qdev); 2691 ql_free_buffer_queues(qdev);
@@ -2766,11 +2984,20 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2766 } 2984 }
2767 2985
2768 /* Enable Ethernet Function */ 2986 /* Enable Ethernet Function */
2769 value = 2987 if (qdev->device_id == QL3032_DEVICE_ID) {
2770 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 2988 value =
2771 PORT_CONTROL_HH); 2989 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
2772 ql_write_page0_reg(qdev, &port_regs->portControl, 2990 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
2773 ((value << 16) | value)); 2991 ql_write_page0_reg(qdev, &port_regs->functionControl,
2992 ((value << 16) | value));
2993 } else {
2994 value =
2995 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
2996 PORT_CONTROL_HH);
2997 ql_write_page0_reg(qdev, &port_regs->portControl,
2998 ((value << 16) | value));
2999 }
3000
2774 3001
2775out: 3002out:
2776 return status; 3003 return status;
@@ -2917,8 +3144,10 @@ static void ql_display_dev_info(struct net_device *ndev)
2917 struct pci_dev *pdev = qdev->pdev; 3144 struct pci_dev *pdev = qdev->pdev;
2918 3145
2919 printk(KERN_INFO PFX 3146 printk(KERN_INFO PFX
2920 "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n", 3147 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
2921 DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot); 3148 DRV_NAME, qdev->index, qdev->chip_rev_id,
3149 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3150 qdev->pci_slot);
2922 printk(KERN_INFO PFX 3151 printk(KERN_INFO PFX
2923 "%s Interface.\n", 3152 "%s Interface.\n",
2924 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); 3153 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
@@ -3212,15 +3441,22 @@ static void ql_reset_work(struct work_struct *work)
3212 * Loop through the active list and return the skb. 3441 * Loop through the active list and return the skb.
3213 */ 3442 */
3214 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3443 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3444 int j;
3215 tx_cb = &qdev->tx_buf[i]; 3445 tx_cb = &qdev->tx_buf[i];
3216 if (tx_cb->skb) { 3446 if (tx_cb->skb) {
3217
3218 printk(KERN_DEBUG PFX 3447 printk(KERN_DEBUG PFX
3219 "%s: Freeing lost SKB.\n", 3448 "%s: Freeing lost SKB.\n",
3220 qdev->ndev->name); 3449 qdev->ndev->name);
3221 pci_unmap_single(qdev->pdev, 3450 pci_unmap_single(qdev->pdev,
3222 pci_unmap_addr(tx_cb, mapaddr), 3451 pci_unmap_addr(&tx_cb->map[0], mapaddr),
3223 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); 3452 pci_unmap_len(&tx_cb->map[0], maplen),
3453 PCI_DMA_TODEVICE);
3454 for(j=1;j<tx_cb->seg_count;j++) {
3455 pci_unmap_page(qdev->pdev,
3456 pci_unmap_addr(&tx_cb->map[j],mapaddr),
3457 pci_unmap_len(&tx_cb->map[j],maplen),
3458 PCI_DMA_TODEVICE);
3459 }
3224 dev_kfree_skb(tx_cb->skb); 3460 dev_kfree_skb(tx_cb->skb);
3225 tx_cb->skb = NULL; 3461 tx_cb->skb = NULL;
3226 } 3462 }
@@ -3379,21 +3615,24 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3379 SET_MODULE_OWNER(ndev); 3615 SET_MODULE_OWNER(ndev);
3380 SET_NETDEV_DEV(ndev, &pdev->dev); 3616 SET_NETDEV_DEV(ndev, &pdev->dev);
3381 3617
3382 if (pci_using_dac)
3383 ndev->features |= NETIF_F_HIGHDMA;
3384
3385 pci_set_drvdata(pdev, ndev); 3618 pci_set_drvdata(pdev, ndev);
3386 3619
3387 qdev = netdev_priv(ndev); 3620 qdev = netdev_priv(ndev);
3388 qdev->index = cards_found; 3621 qdev->index = cards_found;
3389 qdev->ndev = ndev; 3622 qdev->ndev = ndev;
3390 qdev->pdev = pdev; 3623 qdev->pdev = pdev;
3624 qdev->device_id = pci_entry->device;
3391 qdev->port_link_state = LS_DOWN; 3625 qdev->port_link_state = LS_DOWN;
3392 if (msi) 3626 if (msi)
3393 qdev->msi = 1; 3627 qdev->msi = 1;
3394 3628
3395 qdev->msg_enable = netif_msg_init(debug, default_msg); 3629 qdev->msg_enable = netif_msg_init(debug, default_msg);
3396 3630
3631 if (pci_using_dac)
3632 ndev->features |= NETIF_F_HIGHDMA;
3633 if (qdev->device_id == QL3032_DEVICE_ID)
3634 ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
3635
3397 qdev->mem_map_registers = 3636 qdev->mem_map_registers =
3398 ioremap_nocache(pci_resource_start(pdev, 1), 3637 ioremap_nocache(pci_resource_start(pdev, 1),
3399 pci_resource_len(qdev->pdev, 1)); 3638 pci_resource_len(qdev->pdev, 1));
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index ea94de7fd071..b2d76ea68827 100644..100755
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -21,7 +21,9 @@
21 21
22#define OPCODE_UPDATE_NCB_IOCB 0xF0 22#define OPCODE_UPDATE_NCB_IOCB 0xF0
23#define OPCODE_IB_MAC_IOCB 0xF9 23#define OPCODE_IB_MAC_IOCB 0xF9
24#define OPCODE_IB_3032_MAC_IOCB 0x09
24#define OPCODE_IB_IP_IOCB 0xFA 25#define OPCODE_IB_IP_IOCB 0xFA
26#define OPCODE_IB_3032_IP_IOCB 0x0A
25#define OPCODE_IB_TCP_IOCB 0xFB 27#define OPCODE_IB_TCP_IOCB 0xFB
26#define OPCODE_DUMP_PROTO_IOCB 0xFE 28#define OPCODE_DUMP_PROTO_IOCB 0xFE
27#define OPCODE_BUFFER_ALERT_IOCB 0xFB 29#define OPCODE_BUFFER_ALERT_IOCB 0xFB
@@ -37,18 +39,23 @@
37struct ob_mac_iocb_req { 39struct ob_mac_iocb_req {
38 u8 opcode; 40 u8 opcode;
39 u8 flags; 41 u8 flags;
40#define OB_MAC_IOCB_REQ_MA 0xC0 42#define OB_MAC_IOCB_REQ_MA 0xe0
41#define OB_MAC_IOCB_REQ_F 0x20 43#define OB_MAC_IOCB_REQ_F 0x10
42#define OB_MAC_IOCB_REQ_X 0x10 44#define OB_MAC_IOCB_REQ_X 0x08
43#define OB_MAC_IOCB_REQ_D 0x02 45#define OB_MAC_IOCB_REQ_D 0x02
44#define OB_MAC_IOCB_REQ_I 0x01 46#define OB_MAC_IOCB_REQ_I 0x01
45 __le16 reserved0; 47 u8 flags1;
48#define OB_3032MAC_IOCB_REQ_IC 0x04
49#define OB_3032MAC_IOCB_REQ_TC 0x02
50#define OB_3032MAC_IOCB_REQ_UC 0x01
51 u8 reserved0;
46 52
47 __le32 transaction_id; 53 __le32 transaction_id;
48 __le16 data_len; 54 __le16 data_len;
49 __le16 reserved1; 55 u8 ip_hdr_off;
56 u8 ip_hdr_len;
57 __le32 reserved1;
50 __le32 reserved2; 58 __le32 reserved2;
51 __le32 reserved3;
52 __le32 buf_addr0_low; 59 __le32 buf_addr0_low;
53 __le32 buf_addr0_high; 60 __le32 buf_addr0_high;
54 __le32 buf_0_len; 61 __le32 buf_0_len;
@@ -58,8 +65,8 @@ struct ob_mac_iocb_req {
58 __le32 buf_addr2_low; 65 __le32 buf_addr2_low;
59 __le32 buf_addr2_high; 66 __le32 buf_addr2_high;
60 __le32 buf_2_len; 67 __le32 buf_2_len;
68 __le32 reserved3;
61 __le32 reserved4; 69 __le32 reserved4;
62 __le32 reserved5;
63}; 70};
64/* 71/*
65 * The following constants define control bits for buffer 72 * The following constants define control bits for buffer
@@ -74,6 +81,7 @@ struct ob_mac_iocb_rsp {
74 u8 opcode; 81 u8 opcode;
75 u8 flags; 82 u8 flags;
76#define OB_MAC_IOCB_RSP_P 0x08 83#define OB_MAC_IOCB_RSP_P 0x08
84#define OB_MAC_IOCB_RSP_L 0x04
77#define OB_MAC_IOCB_RSP_S 0x02 85#define OB_MAC_IOCB_RSP_S 0x02
78#define OB_MAC_IOCB_RSP_I 0x01 86#define OB_MAC_IOCB_RSP_I 0x01
79 87
@@ -85,6 +93,7 @@ struct ob_mac_iocb_rsp {
85 93
86struct ib_mac_iocb_rsp { 94struct ib_mac_iocb_rsp {
87 u8 opcode; 95 u8 opcode;
96#define IB_MAC_IOCB_RSP_V 0x80
88 u8 flags; 97 u8 flags;
89#define IB_MAC_IOCB_RSP_S 0x80 98#define IB_MAC_IOCB_RSP_S 0x80
90#define IB_MAC_IOCB_RSP_H1 0x40 99#define IB_MAC_IOCB_RSP_H1 0x40
@@ -138,6 +147,7 @@ struct ob_ip_iocb_req {
138struct ob_ip_iocb_rsp { 147struct ob_ip_iocb_rsp {
139 u8 opcode; 148 u8 opcode;
140 u8 flags; 149 u8 flags;
150#define OB_MAC_IOCB_RSP_H 0x10
141#define OB_MAC_IOCB_RSP_E 0x08 151#define OB_MAC_IOCB_RSP_E 0x08
142#define OB_MAC_IOCB_RSP_L 0x04 152#define OB_MAC_IOCB_RSP_L 0x04
143#define OB_MAC_IOCB_RSP_S 0x02 153#define OB_MAC_IOCB_RSP_S 0x02
@@ -220,6 +230,10 @@ struct ob_tcp_iocb_rsp {
220 230
221struct ib_ip_iocb_rsp { 231struct ib_ip_iocb_rsp {
222 u8 opcode; 232 u8 opcode;
233#define IB_IP_IOCB_RSP_3032_V 0x80
234#define IB_IP_IOCB_RSP_3032_O 0x40
235#define IB_IP_IOCB_RSP_3032_I 0x20
236#define IB_IP_IOCB_RSP_3032_R 0x10
223 u8 flags; 237 u8 flags;
224#define IB_IP_IOCB_RSP_S 0x80 238#define IB_IP_IOCB_RSP_S 0x80
225#define IB_IP_IOCB_RSP_H1 0x40 239#define IB_IP_IOCB_RSP_H1 0x40
@@ -230,6 +244,12 @@ struct ib_ip_iocb_rsp {
230 244
231 __le16 length; 245 __le16 length;
232 __le16 checksum; 246 __le16 checksum;
247#define IB_IP_IOCB_RSP_3032_ICE 0x01
248#define IB_IP_IOCB_RSP_3032_CE 0x02
249#define IB_IP_IOCB_RSP_3032_NUC 0x04
250#define IB_IP_IOCB_RSP_3032_UDP 0x08
251#define IB_IP_IOCB_RSP_3032_TCP 0x10
252#define IB_IP_IOCB_RSP_3032_IPE 0x20
233 __le16 reserved; 253 __le16 reserved;
234#define IB_IP_IOCB_RSP_R 0x01 254#define IB_IP_IOCB_RSP_R 0x01
235 __le32 ial_low; 255 __le32 ial_low;
@@ -524,6 +544,21 @@ enum {
524 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, 544 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
525 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, 545 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
526 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, 546 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
547 IP_ADDR_INDEX_REG_6 = 0x0008,
548 IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
549 IP_ADDR_INDEX_REG_E = 0x0040,
550};
551enum {
552 QL3032_PORT_CONTROL_DS = 0x0001,
553 QL3032_PORT_CONTROL_HH = 0x0002,
554 QL3032_PORT_CONTROL_EIv6 = 0x0004,
555 QL3032_PORT_CONTROL_EIv4 = 0x0008,
556 QL3032_PORT_CONTROL_ET = 0x0010,
557 QL3032_PORT_CONTROL_EF = 0x0020,
558 QL3032_PORT_CONTROL_DRM = 0x0040,
559 QL3032_PORT_CONTROL_RLB = 0x0080,
560 QL3032_PORT_CONTROL_RCB = 0x0100,
561 QL3032_PORT_CONTROL_KIE = 0x0200,
527}; 562};
528 563
529enum { 564enum {
@@ -657,7 +692,8 @@ struct ql3xxx_port_registers {
657 u32 internalRamWDataReg; 692 u32 internalRamWDataReg;
658 u32 reclaimedBufferAddrRegLow; 693 u32 reclaimedBufferAddrRegLow;
659 u32 reclaimedBufferAddrRegHigh; 694 u32 reclaimedBufferAddrRegHigh;
660 u32 reserved[2]; 695 u32 tcpConfiguration;
696 u32 functionControl;
661 u32 fpgaRevID; 697 u32 fpgaRevID;
662 u32 localRamAddr; 698 u32 localRamAddr;
663 u32 localRamDataAutoIncr; 699 u32 localRamDataAutoIncr;
@@ -963,6 +999,7 @@ struct eeprom_data {
963 999
964#define QL3XXX_VENDOR_ID 0x1077 1000#define QL3XXX_VENDOR_ID 0x1077
965#define QL3022_DEVICE_ID 0x3022 1001#define QL3022_DEVICE_ID 0x3022
1002#define QL3032_DEVICE_ID 0x3032
966 1003
967/* MTU & Frame Size stuff */ 1004/* MTU & Frame Size stuff */
968#define NORMAL_MTU_SIZE ETH_DATA_LEN 1005#define NORMAL_MTU_SIZE ETH_DATA_LEN
@@ -1038,11 +1075,41 @@ struct ql_rcv_buf_cb {
1038 int index; 1075 int index;
1039}; 1076};
1040 1077
1078/*
1079 * Original IOCB has 3 sg entries:
1080 * first points to skb-data area
1081 * second points to first frag
1082 * third points to next oal.
1083 * OAL has 5 entries:
1084 * 1 thru 4 point to frags
1085 * fifth points to next oal.
1086 */
1087#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
1088
1089struct oal_entry {
1090 u32 dma_lo;
1091 u32 dma_hi;
1092 u32 len;
1093#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
1094#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
1095 u32 reserved;
1096};
1097
1098struct oal {
1099 struct oal_entry oal_entry[5];
1100};
1101
1102struct map_list {
1103 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1104 DECLARE_PCI_UNMAP_LEN(maplen);
1105};
1106
1041struct ql_tx_buf_cb { 1107struct ql_tx_buf_cb {
1042 struct sk_buff *skb; 1108 struct sk_buff *skb;
1043 struct ob_mac_iocb_req *queue_entry ; 1109 struct ob_mac_iocb_req *queue_entry ;
1044 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1110 int seg_count;
1045 DECLARE_PCI_UNMAP_LEN(maplen); 1111 struct oal *oal;
1112 struct map_list map[MAX_SKB_FRAGS+1];
1046}; 1113};
1047 1114
1048/* definitions for type field */ 1115/* definitions for type field */
@@ -1189,6 +1256,7 @@ struct ql3_adapter {
1189 struct delayed_work reset_work; 1256 struct delayed_work reset_work;
1190 struct delayed_work tx_timeout_work; 1257 struct delayed_work tx_timeout_work;
1191 u32 max_frame_size; 1258 u32 max_frame_size;
1259 u32 device_id;
1192}; 1260};
1193 1261
1194#endif /* _QLA3XXX_H_ */ 1262#endif /* _QLA3XXX_H_ */
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index a914fef44309..0e345cbc2bf9 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -15,7 +15,7 @@
15 15
16#define TBD 0 16#define TBD 0
17 17
18typedef struct _XENA_dev_config { 18struct XENA_dev_config {
19/* Convention: mHAL_XXX is mask, vHAL_XXX is value */ 19/* Convention: mHAL_XXX is mask, vHAL_XXX is value */
20 20
21/* General Control-Status Registers */ 21/* General Control-Status Registers */
@@ -300,6 +300,7 @@ typedef struct _XENA_dev_config {
300 u64 gpio_control; 300 u64 gpio_control;
301#define GPIO_CTRL_GPIO_0 BIT(8) 301#define GPIO_CTRL_GPIO_0 BIT(8)
302 u64 misc_control; 302 u64 misc_control;
303#define FAULT_BEHAVIOUR BIT(0)
303#define EXT_REQ_EN BIT(1) 304#define EXT_REQ_EN BIT(1)
304#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3) 305#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
305 306
@@ -851,9 +852,9 @@ typedef struct _XENA_dev_config {
851#define SPI_CONTROL_DONE BIT(6) 852#define SPI_CONTROL_DONE BIT(6)
852 u64 spi_data; 853 u64 spi_data;
853#define SPI_DATA_WRITE(data,len) vBIT(data,0,len) 854#define SPI_DATA_WRITE(data,len) vBIT(data,0,len)
854} XENA_dev_config_t; 855};
855 856
856#define XENA_REG_SPACE sizeof(XENA_dev_config_t) 857#define XENA_REG_SPACE sizeof(struct XENA_dev_config)
857#define XENA_EEPROM_SPACE (0x01 << 11) 858#define XENA_EEPROM_SPACE (0x01 << 11)
858 859
859#endif /* _REGS_H */ 860#endif /* _REGS_H */
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 1dd66b8ea0fa..639fbc0f16f3 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -77,7 +77,7 @@
77#include "s2io.h" 77#include "s2io.h"
78#include "s2io-regs.h" 78#include "s2io-regs.h"
79 79
80#define DRV_VERSION "2.0.15.2" 80#define DRV_VERSION "2.0.16.1"
81 81
82/* S2io Driver name & version. */ 82/* S2io Driver name & version. */
83static char s2io_driver_name[] = "Neterion"; 83static char s2io_driver_name[] = "Neterion";
@@ -86,7 +86,7 @@ static char s2io_driver_version[] = DRV_VERSION;
86static int rxd_size[4] = {32,48,48,64}; 86static int rxd_size[4] = {32,48,48,64};
87static int rxd_count[4] = {127,85,85,63}; 87static int rxd_count[4] = {127,85,85,63};
88 88
89static inline int RXD_IS_UP2DT(RxD_t *rxdp) 89static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
90{ 90{
91 int ret; 91 int ret;
92 92
@@ -111,9 +111,9 @@ static inline int RXD_IS_UP2DT(RxD_t *rxdp)
111#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status)) 111#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
112#define PANIC 1 112#define PANIC 1
113#define LOW 2 113#define LOW 2
114static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring) 114static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
115{ 115{
116 mac_info_t *mac_control; 116 struct mac_info *mac_control;
117 117
118 mac_control = &sp->mac_control; 118 mac_control = &sp->mac_control;
119 if (rxb_size <= rxd_count[sp->rxd_mode]) 119 if (rxb_size <= rxd_count[sp->rxd_mode])
@@ -286,7 +286,7 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
286static void s2io_vlan_rx_register(struct net_device *dev, 286static void s2io_vlan_rx_register(struct net_device *dev,
287 struct vlan_group *grp) 287 struct vlan_group *grp)
288{ 288{
289 nic_t *nic = dev->priv; 289 struct s2io_nic *nic = dev->priv;
290 unsigned long flags; 290 unsigned long flags;
291 291
292 spin_lock_irqsave(&nic->tx_lock, flags); 292 spin_lock_irqsave(&nic->tx_lock, flags);
@@ -297,7 +297,7 @@ static void s2io_vlan_rx_register(struct net_device *dev,
297/* Unregister the vlan */ 297/* Unregister the vlan */
298static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) 298static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
299{ 299{
300 nic_t *nic = dev->priv; 300 struct s2io_nic *nic = dev->priv;
301 unsigned long flags; 301 unsigned long flags;
302 302
303 spin_lock_irqsave(&nic->tx_lock, flags); 303 spin_lock_irqsave(&nic->tx_lock, flags);
@@ -401,9 +401,10 @@ S2IO_PARM_INT(lro, 0);
401 * aggregation happens until we hit max IP pkt size(64K) 401 * aggregation happens until we hit max IP pkt size(64K)
402 */ 402 */
403S2IO_PARM_INT(lro_max_pkts, 0xFFFF); 403S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
404#ifndef CONFIG_S2IO_NAPI
405S2IO_PARM_INT(indicate_max_pkts, 0); 404S2IO_PARM_INT(indicate_max_pkts, 0);
406#endif 405
406S2IO_PARM_INT(napi, 1);
407S2IO_PARM_INT(ufo, 0);
407 408
408static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 409static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
409 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 410 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
@@ -457,14 +458,14 @@ static int init_shared_mem(struct s2io_nic *nic)
457 u32 size; 458 u32 size;
458 void *tmp_v_addr, *tmp_v_addr_next; 459 void *tmp_v_addr, *tmp_v_addr_next;
459 dma_addr_t tmp_p_addr, tmp_p_addr_next; 460 dma_addr_t tmp_p_addr, tmp_p_addr_next;
460 RxD_block_t *pre_rxd_blk = NULL; 461 struct RxD_block *pre_rxd_blk = NULL;
461 int i, j, blk_cnt, rx_sz, tx_sz; 462 int i, j, blk_cnt;
462 int lst_size, lst_per_page; 463 int lst_size, lst_per_page;
463 struct net_device *dev = nic->dev; 464 struct net_device *dev = nic->dev;
464 unsigned long tmp; 465 unsigned long tmp;
465 buffAdd_t *ba; 466 struct buffAdd *ba;
466 467
467 mac_info_t *mac_control; 468 struct mac_info *mac_control;
468 struct config_param *config; 469 struct config_param *config;
469 470
470 mac_control = &nic->mac_control; 471 mac_control = &nic->mac_control;
@@ -482,13 +483,12 @@ static int init_shared_mem(struct s2io_nic *nic)
482 return -EINVAL; 483 return -EINVAL;
483 } 484 }
484 485
485 lst_size = (sizeof(TxD_t) * config->max_txds); 486 lst_size = (sizeof(struct TxD) * config->max_txds);
486 tx_sz = lst_size * size;
487 lst_per_page = PAGE_SIZE / lst_size; 487 lst_per_page = PAGE_SIZE / lst_size;
488 488
489 for (i = 0; i < config->tx_fifo_num; i++) { 489 for (i = 0; i < config->tx_fifo_num; i++) {
490 int fifo_len = config->tx_cfg[i].fifo_len; 490 int fifo_len = config->tx_cfg[i].fifo_len;
491 int list_holder_size = fifo_len * sizeof(list_info_hold_t); 491 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
492 mac_control->fifos[i].list_info = kmalloc(list_holder_size, 492 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
493 GFP_KERNEL); 493 GFP_KERNEL);
494 if (!mac_control->fifos[i].list_info) { 494 if (!mac_control->fifos[i].list_info) {
@@ -579,10 +579,9 @@ static int init_shared_mem(struct s2io_nic *nic)
579 mac_control->rings[i].block_count; 579 mac_control->rings[i].block_count;
580 } 580 }
581 if (nic->rxd_mode == RXD_MODE_1) 581 if (nic->rxd_mode == RXD_MODE_1)
582 size = (size * (sizeof(RxD1_t))); 582 size = (size * (sizeof(struct RxD1)));
583 else 583 else
584 size = (size * (sizeof(RxD3_t))); 584 size = (size * (sizeof(struct RxD3)));
585 rx_sz = size;
586 585
587 for (i = 0; i < config->rx_ring_num; i++) { 586 for (i = 0; i < config->rx_ring_num; i++) {
588 mac_control->rings[i].rx_curr_get_info.block_index = 0; 587 mac_control->rings[i].rx_curr_get_info.block_index = 0;
@@ -600,7 +599,7 @@ static int init_shared_mem(struct s2io_nic *nic)
600 (rxd_count[nic->rxd_mode] + 1); 599 (rxd_count[nic->rxd_mode] + 1);
601 /* Allocating all the Rx blocks */ 600 /* Allocating all the Rx blocks */
602 for (j = 0; j < blk_cnt; j++) { 601 for (j = 0; j < blk_cnt; j++) {
603 rx_block_info_t *rx_blocks; 602 struct rx_block_info *rx_blocks;
604 int l; 603 int l;
605 604
606 rx_blocks = &mac_control->rings[i].rx_blocks[j]; 605 rx_blocks = &mac_control->rings[i].rx_blocks[j];
@@ -620,9 +619,11 @@ static int init_shared_mem(struct s2io_nic *nic)
620 memset(tmp_v_addr, 0, size); 619 memset(tmp_v_addr, 0, size);
621 rx_blocks->block_virt_addr = tmp_v_addr; 620 rx_blocks->block_virt_addr = tmp_v_addr;
622 rx_blocks->block_dma_addr = tmp_p_addr; 621 rx_blocks->block_dma_addr = tmp_p_addr;
623 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)* 622 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
624 rxd_count[nic->rxd_mode], 623 rxd_count[nic->rxd_mode],
625 GFP_KERNEL); 624 GFP_KERNEL);
625 if (!rx_blocks->rxds)
626 return -ENOMEM;
626 for (l=0; l<rxd_count[nic->rxd_mode];l++) { 627 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
627 rx_blocks->rxds[l].virt_addr = 628 rx_blocks->rxds[l].virt_addr =
628 rx_blocks->block_virt_addr + 629 rx_blocks->block_virt_addr +
@@ -645,7 +646,7 @@ static int init_shared_mem(struct s2io_nic *nic)
645 mac_control->rings[i].rx_blocks[(j + 1) % 646 mac_control->rings[i].rx_blocks[(j + 1) %
646 blk_cnt].block_dma_addr; 647 blk_cnt].block_dma_addr;
647 648
648 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 649 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
649 pre_rxd_blk->reserved_2_pNext_RxD_block = 650 pre_rxd_blk->reserved_2_pNext_RxD_block =
650 (unsigned long) tmp_v_addr_next; 651 (unsigned long) tmp_v_addr_next;
651 pre_rxd_blk->pNext_RxD_Blk_physical = 652 pre_rxd_blk->pNext_RxD_Blk_physical =
@@ -661,14 +662,14 @@ static int init_shared_mem(struct s2io_nic *nic)
661 blk_cnt = config->rx_cfg[i].num_rxd / 662 blk_cnt = config->rx_cfg[i].num_rxd /
662 (rxd_count[nic->rxd_mode]+ 1); 663 (rxd_count[nic->rxd_mode]+ 1);
663 mac_control->rings[i].ba = 664 mac_control->rings[i].ba =
664 kmalloc((sizeof(buffAdd_t *) * blk_cnt), 665 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
665 GFP_KERNEL); 666 GFP_KERNEL);
666 if (!mac_control->rings[i].ba) 667 if (!mac_control->rings[i].ba)
667 return -ENOMEM; 668 return -ENOMEM;
668 for (j = 0; j < blk_cnt; j++) { 669 for (j = 0; j < blk_cnt; j++) {
669 int k = 0; 670 int k = 0;
670 mac_control->rings[i].ba[j] = 671 mac_control->rings[i].ba[j] =
671 kmalloc((sizeof(buffAdd_t) * 672 kmalloc((sizeof(struct buffAdd) *
672 (rxd_count[nic->rxd_mode] + 1)), 673 (rxd_count[nic->rxd_mode] + 1)),
673 GFP_KERNEL); 674 GFP_KERNEL);
674 if (!mac_control->rings[i].ba[j]) 675 if (!mac_control->rings[i].ba[j])
@@ -700,7 +701,7 @@ static int init_shared_mem(struct s2io_nic *nic)
700 } 701 }
701 702
702 /* Allocation and initialization of Statistics block */ 703 /* Allocation and initialization of Statistics block */
703 size = sizeof(StatInfo_t); 704 size = sizeof(struct stat_block);
704 mac_control->stats_mem = pci_alloc_consistent 705 mac_control->stats_mem = pci_alloc_consistent
705 (nic->pdev, size, &mac_control->stats_mem_phy); 706 (nic->pdev, size, &mac_control->stats_mem_phy);
706 707
@@ -715,7 +716,7 @@ static int init_shared_mem(struct s2io_nic *nic)
715 mac_control->stats_mem_sz = size; 716 mac_control->stats_mem_sz = size;
716 717
717 tmp_v_addr = mac_control->stats_mem; 718 tmp_v_addr = mac_control->stats_mem;
718 mac_control->stats_info = (StatInfo_t *) tmp_v_addr; 719 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
719 memset(tmp_v_addr, 0, size); 720 memset(tmp_v_addr, 0, size);
720 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name, 721 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
721 (unsigned long long) tmp_p_addr); 722 (unsigned long long) tmp_p_addr);
@@ -735,7 +736,7 @@ static void free_shared_mem(struct s2io_nic *nic)
735 int i, j, blk_cnt, size; 736 int i, j, blk_cnt, size;
736 void *tmp_v_addr; 737 void *tmp_v_addr;
737 dma_addr_t tmp_p_addr; 738 dma_addr_t tmp_p_addr;
738 mac_info_t *mac_control; 739 struct mac_info *mac_control;
739 struct config_param *config; 740 struct config_param *config;
740 int lst_size, lst_per_page; 741 int lst_size, lst_per_page;
741 struct net_device *dev = nic->dev; 742 struct net_device *dev = nic->dev;
@@ -746,7 +747,7 @@ static void free_shared_mem(struct s2io_nic *nic)
746 mac_control = &nic->mac_control; 747 mac_control = &nic->mac_control;
747 config = &nic->config; 748 config = &nic->config;
748 749
749 lst_size = (sizeof(TxD_t) * config->max_txds); 750 lst_size = (sizeof(struct TxD) * config->max_txds);
750 lst_per_page = PAGE_SIZE / lst_size; 751 lst_per_page = PAGE_SIZE / lst_size;
751 752
752 for (i = 0; i < config->tx_fifo_num; i++) { 753 for (i = 0; i < config->tx_fifo_num; i++) {
@@ -809,7 +810,7 @@ static void free_shared_mem(struct s2io_nic *nic)
809 if (!mac_control->rings[i].ba[j]) 810 if (!mac_control->rings[i].ba[j])
810 continue; 811 continue;
811 while (k != rxd_count[nic->rxd_mode]) { 812 while (k != rxd_count[nic->rxd_mode]) {
812 buffAdd_t *ba = 813 struct buffAdd *ba =
813 &mac_control->rings[i].ba[j][k]; 814 &mac_control->rings[i].ba[j][k];
814 kfree(ba->ba_0_org); 815 kfree(ba->ba_0_org);
815 kfree(ba->ba_1_org); 816 kfree(ba->ba_1_org);
@@ -835,9 +836,9 @@ static void free_shared_mem(struct s2io_nic *nic)
835 * s2io_verify_pci_mode - 836 * s2io_verify_pci_mode -
836 */ 837 */
837 838
838static int s2io_verify_pci_mode(nic_t *nic) 839static int s2io_verify_pci_mode(struct s2io_nic *nic)
839{ 840{
840 XENA_dev_config_t __iomem *bar0 = nic->bar0; 841 struct XENA_dev_config __iomem *bar0 = nic->bar0;
841 register u64 val64 = 0; 842 register u64 val64 = 0;
842 int mode; 843 int mode;
843 844
@@ -868,9 +869,9 @@ static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
868/** 869/**
869 * s2io_print_pci_mode - 870 * s2io_print_pci_mode -
870 */ 871 */
871static int s2io_print_pci_mode(nic_t *nic) 872static int s2io_print_pci_mode(struct s2io_nic *nic)
872{ 873{
873 XENA_dev_config_t __iomem *bar0 = nic->bar0; 874 struct XENA_dev_config __iomem *bar0 = nic->bar0;
874 register u64 val64 = 0; 875 register u64 val64 = 0;
875 int mode; 876 int mode;
876 struct config_param *config = &nic->config; 877 struct config_param *config = &nic->config;
@@ -938,13 +939,13 @@ static int s2io_print_pci_mode(nic_t *nic)
938 939
939static int init_nic(struct s2io_nic *nic) 940static int init_nic(struct s2io_nic *nic)
940{ 941{
941 XENA_dev_config_t __iomem *bar0 = nic->bar0; 942 struct XENA_dev_config __iomem *bar0 = nic->bar0;
942 struct net_device *dev = nic->dev; 943 struct net_device *dev = nic->dev;
943 register u64 val64 = 0; 944 register u64 val64 = 0;
944 void __iomem *add; 945 void __iomem *add;
945 u32 time; 946 u32 time;
946 int i, j; 947 int i, j;
947 mac_info_t *mac_control; 948 struct mac_info *mac_control;
948 struct config_param *config; 949 struct config_param *config;
949 int dtx_cnt = 0; 950 int dtx_cnt = 0;
950 unsigned long long mem_share; 951 unsigned long long mem_share;
@@ -1414,7 +1415,7 @@ static int init_nic(struct s2io_nic *nic)
1414 1415
1415 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1416 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1416 TTI_DATA2_MEM_TX_UFC_B(0x20) | 1417 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1417 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80); 1418 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1418 writeq(val64, &bar0->tti_data2_mem); 1419 writeq(val64, &bar0->tti_data2_mem);
1419 1420
1420 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD; 1421 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
@@ -1610,7 +1611,8 @@ static int init_nic(struct s2io_nic *nic)
1610 * that does not start on an ADB to reduce disconnects. 1611 * that does not start on an ADB to reduce disconnects.
1611 */ 1612 */
1612 if (nic->device_type == XFRAME_II_DEVICE) { 1613 if (nic->device_type == XFRAME_II_DEVICE) {
1613 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3); 1614 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1615 MISC_LINK_STABILITY_PRD(3);
1614 writeq(val64, &bar0->misc_control); 1616 writeq(val64, &bar0->misc_control);
1615 val64 = readq(&bar0->pic_control2); 1617 val64 = readq(&bar0->pic_control2);
1616 val64 &= ~(BIT(13)|BIT(14)|BIT(15)); 1618 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
@@ -1626,7 +1628,7 @@ static int init_nic(struct s2io_nic *nic)
1626#define LINK_UP_DOWN_INTERRUPT 1 1628#define LINK_UP_DOWN_INTERRUPT 1
1627#define MAC_RMAC_ERR_TIMER 2 1629#define MAC_RMAC_ERR_TIMER 2
1628 1630
1629static int s2io_link_fault_indication(nic_t *nic) 1631static int s2io_link_fault_indication(struct s2io_nic *nic)
1630{ 1632{
1631 if (nic->intr_type != INTA) 1633 if (nic->intr_type != INTA)
1632 return MAC_RMAC_ERR_TIMER; 1634 return MAC_RMAC_ERR_TIMER;
@@ -1649,14 +1651,14 @@ static int s2io_link_fault_indication(nic_t *nic)
1649 1651
1650static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) 1652static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1651{ 1653{
1652 XENA_dev_config_t __iomem *bar0 = nic->bar0; 1654 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1653 register u64 val64 = 0, temp64 = 0; 1655 register u64 val64 = 0, temp64 = 0;
1654 1656
1655 /* Top level interrupt classification */ 1657 /* Top level interrupt classification */
1656 /* PIC Interrupts */ 1658 /* PIC Interrupts */
1657 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) { 1659 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1658 /* Enable PIC Intrs in the general intr mask register */ 1660 /* Enable PIC Intrs in the general intr mask register */
1659 val64 = TXPIC_INT_M | PIC_RX_INT_M; 1661 val64 = TXPIC_INT_M;
1660 if (flag == ENABLE_INTRS) { 1662 if (flag == ENABLE_INTRS) {
1661 temp64 = readq(&bar0->general_int_mask); 1663 temp64 = readq(&bar0->general_int_mask);
1662 temp64 &= ~((u64) val64); 1664 temp64 &= ~((u64) val64);
@@ -1694,70 +1696,6 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1694 } 1696 }
1695 } 1697 }
1696 1698
1697 /* DMA Interrupts */
1698 /* Enabling/Disabling Tx DMA interrupts */
1699 if (mask & TX_DMA_INTR) {
1700 /* Enable TxDMA Intrs in the general intr mask register */
1701 val64 = TXDMA_INT_M;
1702 if (flag == ENABLE_INTRS) {
1703 temp64 = readq(&bar0->general_int_mask);
1704 temp64 &= ~((u64) val64);
1705 writeq(temp64, &bar0->general_int_mask);
1706 /*
1707 * Keep all interrupts other than PFC interrupt
1708 * and PCC interrupt disabled in DMA level.
1709 */
1710 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1711 TXDMA_PCC_INT_M);
1712 writeq(val64, &bar0->txdma_int_mask);
1713 /*
1714 * Enable only the MISC error 1 interrupt in PFC block
1715 */
1716 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1717 writeq(val64, &bar0->pfc_err_mask);
1718 /*
1719 * Enable only the FB_ECC error interrupt in PCC block
1720 */
1721 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1722 writeq(val64, &bar0->pcc_err_mask);
1723 } else if (flag == DISABLE_INTRS) {
1724 /*
1725 * Disable TxDMA Intrs in the general intr mask
1726 * register
1727 */
1728 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1729 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1730 temp64 = readq(&bar0->general_int_mask);
1731 val64 |= temp64;
1732 writeq(val64, &bar0->general_int_mask);
1733 }
1734 }
1735
1736 /* Enabling/Disabling Rx DMA interrupts */
1737 if (mask & RX_DMA_INTR) {
1738 /* Enable RxDMA Intrs in the general intr mask register */
1739 val64 = RXDMA_INT_M;
1740 if (flag == ENABLE_INTRS) {
1741 temp64 = readq(&bar0->general_int_mask);
1742 temp64 &= ~((u64) val64);
1743 writeq(temp64, &bar0->general_int_mask);
1744 /*
1745 * All RxDMA block interrupts are disabled for now
1746 * TODO
1747 */
1748 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1749 } else if (flag == DISABLE_INTRS) {
1750 /*
1751 * Disable RxDMA Intrs in the general intr mask
1752 * register
1753 */
1754 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1755 temp64 = readq(&bar0->general_int_mask);
1756 val64 |= temp64;
1757 writeq(val64, &bar0->general_int_mask);
1758 }
1759 }
1760
1761 /* MAC Interrupts */ 1699 /* MAC Interrupts */
1762 /* Enabling/Disabling MAC interrupts */ 1700 /* Enabling/Disabling MAC interrupts */
1763 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) { 1701 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
@@ -1784,53 +1722,6 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1784 } 1722 }
1785 } 1723 }
1786 1724
1787 /* XGXS Interrupts */
1788 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1789 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1790 if (flag == ENABLE_INTRS) {
1791 temp64 = readq(&bar0->general_int_mask);
1792 temp64 &= ~((u64) val64);
1793 writeq(temp64, &bar0->general_int_mask);
1794 /*
1795 * All XGXS block error interrupts are disabled for now
1796 * TODO
1797 */
1798 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1799 } else if (flag == DISABLE_INTRS) {
1800 /*
1801 * Disable MC Intrs in the general intr mask register
1802 */
1803 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1804 temp64 = readq(&bar0->general_int_mask);
1805 val64 |= temp64;
1806 writeq(val64, &bar0->general_int_mask);
1807 }
1808 }
1809
1810 /* Memory Controller(MC) interrupts */
1811 if (mask & MC_INTR) {
1812 val64 = MC_INT_M;
1813 if (flag == ENABLE_INTRS) {
1814 temp64 = readq(&bar0->general_int_mask);
1815 temp64 &= ~((u64) val64);
1816 writeq(temp64, &bar0->general_int_mask);
1817 /*
1818 * Enable all MC Intrs.
1819 */
1820 writeq(0x0, &bar0->mc_int_mask);
1821 writeq(0x0, &bar0->mc_err_mask);
1822 } else if (flag == DISABLE_INTRS) {
1823 /*
1824 * Disable MC Intrs in the general intr mask register
1825 */
1826 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1827 temp64 = readq(&bar0->general_int_mask);
1828 val64 |= temp64;
1829 writeq(val64, &bar0->general_int_mask);
1830 }
1831 }
1832
1833
1834 /* Tx traffic interrupts */ 1725 /* Tx traffic interrupts */
1835 if (mask & TX_TRAFFIC_INTR) { 1726 if (mask & TX_TRAFFIC_INTR) {
1836 val64 = TXTRAFFIC_INT_M; 1727 val64 = TXTRAFFIC_INT_M;
@@ -1877,41 +1768,36 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1877 } 1768 }
1878} 1769}
1879 1770
1880static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc) 1771/**
1772 * verify_pcc_quiescent- Checks for PCC quiescent state
1773 * Return: 1 If PCC is quiescence
1774 * 0 If PCC is not quiescence
1775 */
1776static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1881{ 1777{
1882 int ret = 0; 1778 int ret = 0, herc;
1779 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1780 u64 val64 = readq(&bar0->adapter_status);
1781
1782 herc = (sp->device_type == XFRAME_II_DEVICE);
1883 1783
1884 if (flag == FALSE) { 1784 if (flag == FALSE) {
1885 if ((!herc && (rev_id >= 4)) || herc) { 1785 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1886 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) && 1786 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1887 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1888 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1889 ret = 1; 1787 ret = 1;
1890 } 1788 } else {
1891 }else { 1789 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1892 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1893 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1894 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1895 ret = 1; 1790 ret = 1;
1896 }
1897 } 1791 }
1898 } else { 1792 } else {
1899 if ((!herc && (rev_id >= 4)) || herc) { 1793 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1900 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) == 1794 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1901 ADAPTER_STATUS_RMAC_PCC_IDLE) && 1795 ADAPTER_STATUS_RMAC_PCC_IDLE))
1902 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1903 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1904 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1905 ret = 1; 1796 ret = 1;
1906 }
1907 } else { 1797 } else {
1908 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) == 1798 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1909 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) && 1799 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1910 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1911 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1912 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1913 ret = 1; 1800 ret = 1;
1914 }
1915 } 1801 }
1916 } 1802 }
1917 1803
@@ -1919,9 +1805,6 @@ static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1919} 1805}
1920/** 1806/**
1921 * verify_xena_quiescence - Checks whether the H/W is ready 1807 * verify_xena_quiescence - Checks whether the H/W is ready
1922 * @val64 : Value read from adapter status register.
1923 * @flag : indicates if the adapter enable bit was ever written once
1924 * before.
1925 * Description: Returns whether the H/W is ready to go or not. Depending 1808 * Description: Returns whether the H/W is ready to go or not. Depending
1926 * on whether adapter enable bit was written or not the comparison 1809 * on whether adapter enable bit was written or not the comparison
1927 * differs and the calling function passes the input argument flag to 1810 * differs and the calling function passes the input argument flag to
@@ -1930,24 +1813,63 @@ static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1930 * 0 If Xena is not quiescence 1813 * 0 If Xena is not quiescence
1931 */ 1814 */
1932 1815
1933static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag) 1816static int verify_xena_quiescence(struct s2io_nic *sp)
1934{ 1817{
1935 int ret = 0, herc; 1818 int mode;
1936 u64 tmp64 = ~((u64) val64); 1819 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1937 int rev_id = get_xena_rev_id(sp->pdev); 1820 u64 val64 = readq(&bar0->adapter_status);
1821 mode = s2io_verify_pci_mode(sp);
1938 1822
1939 herc = (sp->device_type == XFRAME_II_DEVICE); 1823 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1940 if (! 1824 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1941 (tmp64 & 1825 return 0;
1942 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY | 1826 }
1943 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY | 1827 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1944 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY | 1828 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1945 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK | 1829 return 0;
1946 ADAPTER_STATUS_P_PLL_LOCK))) { 1830 }
1947 ret = check_prc_pcc_state(val64, flag, rev_id, herc); 1831 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1832 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1833 return 0;
1834 }
1835 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1836 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1837 return 0;
1838 }
1839 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1840 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1841 return 0;
1842 }
1843 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1844 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1845 return 0;
1846 }
1847 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1848 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1849 return 0;
1850 }
1851 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1852 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1853 return 0;
1948 } 1854 }
1949 1855
1950 return ret; 1856 /*
1857 * In PCI 33 mode, the P_PLL is not used, and therefore,
1858 * the the P_PLL_LOCK bit in the adapter_status register will
1859 * not be asserted.
1860 */
1861 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1862 sp->device_type == XFRAME_II_DEVICE && mode !=
1863 PCI_MODE_PCI_33) {
1864 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1865 return 0;
1866 }
1867 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1868 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1869 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1870 return 0;
1871 }
1872 return 1;
1951} 1873}
1952 1874
1953/** 1875/**
@@ -1958,9 +1880,9 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1958 * 1880 *
1959 */ 1881 */
1960 1882
1961static void fix_mac_address(nic_t * sp) 1883static void fix_mac_address(struct s2io_nic * sp)
1962{ 1884{
1963 XENA_dev_config_t __iomem *bar0 = sp->bar0; 1885 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1964 u64 val64; 1886 u64 val64;
1965 int i = 0; 1887 int i = 0;
1966 1888
@@ -1986,11 +1908,11 @@ static void fix_mac_address(nic_t * sp)
1986 1908
1987static int start_nic(struct s2io_nic *nic) 1909static int start_nic(struct s2io_nic *nic)
1988{ 1910{
1989 XENA_dev_config_t __iomem *bar0 = nic->bar0; 1911 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1990 struct net_device *dev = nic->dev; 1912 struct net_device *dev = nic->dev;
1991 register u64 val64 = 0; 1913 register u64 val64 = 0;
1992 u16 subid, i; 1914 u16 subid, i;
1993 mac_info_t *mac_control; 1915 struct mac_info *mac_control;
1994 struct config_param *config; 1916 struct config_param *config;
1995 1917
1996 mac_control = &nic->mac_control; 1918 mac_control = &nic->mac_control;
@@ -2052,7 +1974,7 @@ static int start_nic(struct s2io_nic *nic)
2052 * it. 1974 * it.
2053 */ 1975 */
2054 val64 = readq(&bar0->adapter_status); 1976 val64 = readq(&bar0->adapter_status);
2055 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) { 1977 if (!verify_xena_quiescence(nic)) {
2056 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name); 1978 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2057 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n", 1979 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2058 (unsigned long long) val64); 1980 (unsigned long long) val64);
@@ -2095,11 +2017,12 @@ static int start_nic(struct s2io_nic *nic)
2095/** 2017/**
2096 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb 2018 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2097 */ 2019 */
2098static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off) 2020static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2021 TxD *txdlp, int get_off)
2099{ 2022{
2100 nic_t *nic = fifo_data->nic; 2023 struct s2io_nic *nic = fifo_data->nic;
2101 struct sk_buff *skb; 2024 struct sk_buff *skb;
2102 TxD_t *txds; 2025 struct TxD *txds;
2103 u16 j, frg_cnt; 2026 u16 j, frg_cnt;
2104 2027
2105 txds = txdlp; 2028 txds = txdlp;
@@ -2113,7 +2036,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
2113 skb = (struct sk_buff *) ((unsigned long) 2036 skb = (struct sk_buff *) ((unsigned long)
2114 txds->Host_Control); 2037 txds->Host_Control);
2115 if (!skb) { 2038 if (!skb) {
2116 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds)); 2039 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2117 return NULL; 2040 return NULL;
2118 } 2041 }
2119 pci_unmap_single(nic->pdev, (dma_addr_t) 2042 pci_unmap_single(nic->pdev, (dma_addr_t)
@@ -2132,7 +2055,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
2132 frag->size, PCI_DMA_TODEVICE); 2055 frag->size, PCI_DMA_TODEVICE);
2133 } 2056 }
2134 } 2057 }
2135 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds)); 2058 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2136 return(skb); 2059 return(skb);
2137} 2060}
2138 2061
@@ -2148,9 +2071,9 @@ static void free_tx_buffers(struct s2io_nic *nic)
2148{ 2071{
2149 struct net_device *dev = nic->dev; 2072 struct net_device *dev = nic->dev;
2150 struct sk_buff *skb; 2073 struct sk_buff *skb;
2151 TxD_t *txdp; 2074 struct TxD *txdp;
2152 int i, j; 2075 int i, j;
2153 mac_info_t *mac_control; 2076 struct mac_info *mac_control;
2154 struct config_param *config; 2077 struct config_param *config;
2155 int cnt = 0; 2078 int cnt = 0;
2156 2079
@@ -2159,7 +2082,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
2159 2082
2160 for (i = 0; i < config->tx_fifo_num; i++) { 2083 for (i = 0; i < config->tx_fifo_num; i++) {
2161 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 2084 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2162 txdp = (TxD_t *) mac_control->fifos[i].list_info[j]. 2085 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2163 list_virt_addr; 2086 list_virt_addr;
2164 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); 2087 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2165 if (skb) { 2088 if (skb) {
@@ -2187,10 +2110,10 @@ static void free_tx_buffers(struct s2io_nic *nic)
2187 2110
2188static void stop_nic(struct s2io_nic *nic) 2111static void stop_nic(struct s2io_nic *nic)
2189{ 2112{
2190 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2113 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2191 register u64 val64 = 0; 2114 register u64 val64 = 0;
2192 u16 interruptible; 2115 u16 interruptible;
2193 mac_info_t *mac_control; 2116 struct mac_info *mac_control;
2194 struct config_param *config; 2117 struct config_param *config;
2195 2118
2196 mac_control = &nic->mac_control; 2119 mac_control = &nic->mac_control;
@@ -2208,14 +2131,15 @@ static void stop_nic(struct s2io_nic *nic)
2208 writeq(val64, &bar0->adapter_control); 2131 writeq(val64, &bar0->adapter_control);
2209} 2132}
2210 2133
2211static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) 2134static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2135 sk_buff *skb)
2212{ 2136{
2213 struct net_device *dev = nic->dev; 2137 struct net_device *dev = nic->dev;
2214 struct sk_buff *frag_list; 2138 struct sk_buff *frag_list;
2215 void *tmp; 2139 void *tmp;
2216 2140
2217 /* Buffer-1 receives L3/L4 headers */ 2141 /* Buffer-1 receives L3/L4 headers */
2218 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single 2142 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2219 (nic->pdev, skb->data, l3l4hdr_size + 4, 2143 (nic->pdev, skb->data, l3l4hdr_size + 4,
2220 PCI_DMA_FROMDEVICE); 2144 PCI_DMA_FROMDEVICE);
2221 2145
@@ -2226,13 +2150,14 @@ static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2226 return -ENOMEM ; 2150 return -ENOMEM ;
2227 } 2151 }
2228 frag_list = skb_shinfo(skb)->frag_list; 2152 frag_list = skb_shinfo(skb)->frag_list;
2153 skb->truesize += frag_list->truesize;
2229 frag_list->next = NULL; 2154 frag_list->next = NULL;
2230 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); 2155 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2231 frag_list->data = tmp; 2156 frag_list->data = tmp;
2232 frag_list->tail = tmp; 2157 frag_list->tail = tmp;
2233 2158
2234 /* Buffer-2 receives L4 data payload */ 2159 /* Buffer-2 receives L4 data payload */
2235 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, 2160 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2236 frag_list->data, dev->mtu, 2161 frag_list->data, dev->mtu,
2237 PCI_DMA_FROMDEVICE); 2162 PCI_DMA_FROMDEVICE);
2238 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); 2163 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
@@ -2266,18 +2191,16 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2266{ 2191{
2267 struct net_device *dev = nic->dev; 2192 struct net_device *dev = nic->dev;
2268 struct sk_buff *skb; 2193 struct sk_buff *skb;
2269 RxD_t *rxdp; 2194 struct RxD_t *rxdp;
2270 int off, off1, size, block_no, block_no1; 2195 int off, off1, size, block_no, block_no1;
2271 u32 alloc_tab = 0; 2196 u32 alloc_tab = 0;
2272 u32 alloc_cnt; 2197 u32 alloc_cnt;
2273 mac_info_t *mac_control; 2198 struct mac_info *mac_control;
2274 struct config_param *config; 2199 struct config_param *config;
2275 u64 tmp; 2200 u64 tmp;
2276 buffAdd_t *ba; 2201 struct buffAdd *ba;
2277#ifndef CONFIG_S2IO_NAPI
2278 unsigned long flags; 2202 unsigned long flags;
2279#endif 2203 struct RxD_t *first_rxdp = NULL;
2280 RxD_t *first_rxdp = NULL;
2281 2204
2282 mac_control = &nic->mac_control; 2205 mac_control = &nic->mac_control;
2283 config = &nic->config; 2206 config = &nic->config;
@@ -2320,12 +2243,15 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2320 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2243 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2321 dev->name, rxdp); 2244 dev->name, rxdp);
2322 } 2245 }
2323#ifndef CONFIG_S2IO_NAPI 2246 if(!napi) {
2324 spin_lock_irqsave(&nic->put_lock, flags); 2247 spin_lock_irqsave(&nic->put_lock, flags);
2325 mac_control->rings[ring_no].put_pos = 2248 mac_control->rings[ring_no].put_pos =
2326 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; 2249 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2327 spin_unlock_irqrestore(&nic->put_lock, flags); 2250 spin_unlock_irqrestore(&nic->put_lock, flags);
2328#endif 2251 } else {
2252 mac_control->rings[ring_no].put_pos =
2253 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2254 }
2329 if ((rxdp->Control_1 & RXD_OWN_XENA) && 2255 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2330 ((nic->rxd_mode >= RXD_MODE_3A) && 2256 ((nic->rxd_mode >= RXD_MODE_3A) &&
2331 (rxdp->Control_2 & BIT(0)))) { 2257 (rxdp->Control_2 & BIT(0)))) {
@@ -2356,9 +2282,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2356 } 2282 }
2357 if (nic->rxd_mode == RXD_MODE_1) { 2283 if (nic->rxd_mode == RXD_MODE_1) {
2358 /* 1 buffer mode - normal operation mode */ 2284 /* 1 buffer mode - normal operation mode */
2359 memset(rxdp, 0, sizeof(RxD1_t)); 2285 memset(rxdp, 0, sizeof(struct RxD1));
2360 skb_reserve(skb, NET_IP_ALIGN); 2286 skb_reserve(skb, NET_IP_ALIGN);
2361 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single 2287 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2362 (nic->pdev, skb->data, size - NET_IP_ALIGN, 2288 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2363 PCI_DMA_FROMDEVICE); 2289 PCI_DMA_FROMDEVICE);
2364 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 2290 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
@@ -2375,7 +2301,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2375 * payload 2301 * payload
2376 */ 2302 */
2377 2303
2378 memset(rxdp, 0, sizeof(RxD3_t)); 2304 memset(rxdp, 0, sizeof(struct RxD3));
2379 ba = &mac_control->rings[ring_no].ba[block_no][off]; 2305 ba = &mac_control->rings[ring_no].ba[block_no][off];
2380 skb_reserve(skb, BUF0_LEN); 2306 skb_reserve(skb, BUF0_LEN);
2381 tmp = (u64)(unsigned long) skb->data; 2307 tmp = (u64)(unsigned long) skb->data;
@@ -2384,13 +2310,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2384 skb->data = (void *) (unsigned long)tmp; 2310 skb->data = (void *) (unsigned long)tmp;
2385 skb->tail = (void *) (unsigned long)tmp; 2311 skb->tail = (void *) (unsigned long)tmp;
2386 2312
2387 if (!(((RxD3_t*)rxdp)->Buffer0_ptr)) 2313 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2388 ((RxD3_t*)rxdp)->Buffer0_ptr = 2314 ((struct RxD3*)rxdp)->Buffer0_ptr =
2389 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2315 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2390 PCI_DMA_FROMDEVICE); 2316 PCI_DMA_FROMDEVICE);
2391 else 2317 else
2392 pci_dma_sync_single_for_device(nic->pdev, 2318 pci_dma_sync_single_for_device(nic->pdev,
2393 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, 2319 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2394 BUF0_LEN, PCI_DMA_FROMDEVICE); 2320 BUF0_LEN, PCI_DMA_FROMDEVICE);
2395 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2321 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2396 if (nic->rxd_mode == RXD_MODE_3B) { 2322 if (nic->rxd_mode == RXD_MODE_3B) {
@@ -2400,13 +2326,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2400 * Buffer2 will have L3/L4 header plus 2326 * Buffer2 will have L3/L4 header plus
2401 * L4 payload 2327 * L4 payload
2402 */ 2328 */
2403 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single 2329 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2404 (nic->pdev, skb->data, dev->mtu + 4, 2330 (nic->pdev, skb->data, dev->mtu + 4,
2405 PCI_DMA_FROMDEVICE); 2331 PCI_DMA_FROMDEVICE);
2406 2332
2407 /* Buffer-1 will be dummy buffer. Not used */ 2333 /* Buffer-1 will be dummy buffer. Not used */
2408 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) { 2334 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2409 ((RxD3_t*)rxdp)->Buffer1_ptr = 2335 ((struct RxD3*)rxdp)->Buffer1_ptr =
2410 pci_map_single(nic->pdev, 2336 pci_map_single(nic->pdev,
2411 ba->ba_1, BUF1_LEN, 2337 ba->ba_1, BUF1_LEN,
2412 PCI_DMA_FROMDEVICE); 2338 PCI_DMA_FROMDEVICE);
@@ -2466,9 +2392,9 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2466 struct net_device *dev = sp->dev; 2392 struct net_device *dev = sp->dev;
2467 int j; 2393 int j;
2468 struct sk_buff *skb; 2394 struct sk_buff *skb;
2469 RxD_t *rxdp; 2395 struct RxD_t *rxdp;
2470 mac_info_t *mac_control; 2396 struct mac_info *mac_control;
2471 buffAdd_t *ba; 2397 struct buffAdd *ba;
2472 2398
2473 mac_control = &sp->mac_control; 2399 mac_control = &sp->mac_control;
2474 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { 2400 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
@@ -2481,41 +2407,41 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2481 } 2407 }
2482 if (sp->rxd_mode == RXD_MODE_1) { 2408 if (sp->rxd_mode == RXD_MODE_1) {
2483 pci_unmap_single(sp->pdev, (dma_addr_t) 2409 pci_unmap_single(sp->pdev, (dma_addr_t)
2484 ((RxD1_t*)rxdp)->Buffer0_ptr, 2410 ((struct RxD1*)rxdp)->Buffer0_ptr,
2485 dev->mtu + 2411 dev->mtu +
2486 HEADER_ETHERNET_II_802_3_SIZE 2412 HEADER_ETHERNET_II_802_3_SIZE
2487 + HEADER_802_2_SIZE + 2413 + HEADER_802_2_SIZE +
2488 HEADER_SNAP_SIZE, 2414 HEADER_SNAP_SIZE,
2489 PCI_DMA_FROMDEVICE); 2415 PCI_DMA_FROMDEVICE);
2490 memset(rxdp, 0, sizeof(RxD1_t)); 2416 memset(rxdp, 0, sizeof(struct RxD1));
2491 } else if(sp->rxd_mode == RXD_MODE_3B) { 2417 } else if(sp->rxd_mode == RXD_MODE_3B) {
2492 ba = &mac_control->rings[ring_no]. 2418 ba = &mac_control->rings[ring_no].
2493 ba[blk][j]; 2419 ba[blk][j];
2494 pci_unmap_single(sp->pdev, (dma_addr_t) 2420 pci_unmap_single(sp->pdev, (dma_addr_t)
2495 ((RxD3_t*)rxdp)->Buffer0_ptr, 2421 ((struct RxD3*)rxdp)->Buffer0_ptr,
2496 BUF0_LEN, 2422 BUF0_LEN,
2497 PCI_DMA_FROMDEVICE); 2423 PCI_DMA_FROMDEVICE);
2498 pci_unmap_single(sp->pdev, (dma_addr_t) 2424 pci_unmap_single(sp->pdev, (dma_addr_t)
2499 ((RxD3_t*)rxdp)->Buffer1_ptr, 2425 ((struct RxD3*)rxdp)->Buffer1_ptr,
2500 BUF1_LEN, 2426 BUF1_LEN,
2501 PCI_DMA_FROMDEVICE); 2427 PCI_DMA_FROMDEVICE);
2502 pci_unmap_single(sp->pdev, (dma_addr_t) 2428 pci_unmap_single(sp->pdev, (dma_addr_t)
2503 ((RxD3_t*)rxdp)->Buffer2_ptr, 2429 ((struct RxD3*)rxdp)->Buffer2_ptr,
2504 dev->mtu + 4, 2430 dev->mtu + 4,
2505 PCI_DMA_FROMDEVICE); 2431 PCI_DMA_FROMDEVICE);
2506 memset(rxdp, 0, sizeof(RxD3_t)); 2432 memset(rxdp, 0, sizeof(struct RxD3));
2507 } else { 2433 } else {
2508 pci_unmap_single(sp->pdev, (dma_addr_t) 2434 pci_unmap_single(sp->pdev, (dma_addr_t)
2509 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2435 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2510 PCI_DMA_FROMDEVICE); 2436 PCI_DMA_FROMDEVICE);
2511 pci_unmap_single(sp->pdev, (dma_addr_t) 2437 pci_unmap_single(sp->pdev, (dma_addr_t)
2512 ((RxD3_t*)rxdp)->Buffer1_ptr, 2438 ((struct RxD3*)rxdp)->Buffer1_ptr,
2513 l3l4hdr_size + 4, 2439 l3l4hdr_size + 4,
2514 PCI_DMA_FROMDEVICE); 2440 PCI_DMA_FROMDEVICE);
2515 pci_unmap_single(sp->pdev, (dma_addr_t) 2441 pci_unmap_single(sp->pdev, (dma_addr_t)
2516 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu, 2442 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2517 PCI_DMA_FROMDEVICE); 2443 PCI_DMA_FROMDEVICE);
2518 memset(rxdp, 0, sizeof(RxD3_t)); 2444 memset(rxdp, 0, sizeof(struct RxD3));
2519 } 2445 }
2520 dev_kfree_skb(skb); 2446 dev_kfree_skb(skb);
2521 atomic_dec(&sp->rx_bufs_left[ring_no]); 2447 atomic_dec(&sp->rx_bufs_left[ring_no]);
@@ -2535,7 +2461,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
2535{ 2461{
2536 struct net_device *dev = sp->dev; 2462 struct net_device *dev = sp->dev;
2537 int i, blk = 0, buf_cnt = 0; 2463 int i, blk = 0, buf_cnt = 0;
2538 mac_info_t *mac_control; 2464 struct mac_info *mac_control;
2539 struct config_param *config; 2465 struct config_param *config;
2540 2466
2541 mac_control = &sp->mac_control; 2467 mac_control = &sp->mac_control;
@@ -2568,15 +2494,13 @@ static void free_rx_buffers(struct s2io_nic *sp)
2568 * 0 on success and 1 if there are No Rx packets to be processed. 2494 * 0 on success and 1 if there are No Rx packets to be processed.
2569 */ 2495 */
2570 2496
2571#if defined(CONFIG_S2IO_NAPI)
2572static int s2io_poll(struct net_device *dev, int *budget) 2497static int s2io_poll(struct net_device *dev, int *budget)
2573{ 2498{
2574 nic_t *nic = dev->priv; 2499 struct s2io_nic *nic = dev->priv;
2575 int pkt_cnt = 0, org_pkts_to_process; 2500 int pkt_cnt = 0, org_pkts_to_process;
2576 mac_info_t *mac_control; 2501 struct mac_info *mac_control;
2577 struct config_param *config; 2502 struct config_param *config;
2578 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2503 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2579 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2580 int i; 2504 int i;
2581 2505
2582 atomic_inc(&nic->isr_cnt); 2506 atomic_inc(&nic->isr_cnt);
@@ -2588,8 +2512,8 @@ static int s2io_poll(struct net_device *dev, int *budget)
2588 nic->pkts_to_process = dev->quota; 2512 nic->pkts_to_process = dev->quota;
2589 org_pkts_to_process = nic->pkts_to_process; 2513 org_pkts_to_process = nic->pkts_to_process;
2590 2514
2591 writeq(val64, &bar0->rx_traffic_int); 2515 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2592 val64 = readl(&bar0->rx_traffic_int); 2516 readl(&bar0->rx_traffic_int);
2593 2517
2594 for (i = 0; i < config->rx_ring_num; i++) { 2518 for (i = 0; i < config->rx_ring_num; i++) {
2595 rx_intr_handler(&mac_control->rings[i]); 2519 rx_intr_handler(&mac_control->rings[i]);
@@ -2615,7 +2539,7 @@ static int s2io_poll(struct net_device *dev, int *budget)
2615 } 2539 }
2616 /* Re enable the Rx interrupts. */ 2540 /* Re enable the Rx interrupts. */
2617 writeq(0x0, &bar0->rx_traffic_mask); 2541 writeq(0x0, &bar0->rx_traffic_mask);
2618 val64 = readl(&bar0->rx_traffic_mask); 2542 readl(&bar0->rx_traffic_mask);
2619 atomic_dec(&nic->isr_cnt); 2543 atomic_dec(&nic->isr_cnt);
2620 return 0; 2544 return 0;
2621 2545
@@ -2633,7 +2557,6 @@ no_rx:
2633 atomic_dec(&nic->isr_cnt); 2557 atomic_dec(&nic->isr_cnt);
2634 return 1; 2558 return 1;
2635} 2559}
2636#endif
2637 2560
2638#ifdef CONFIG_NET_POLL_CONTROLLER 2561#ifdef CONFIG_NET_POLL_CONTROLLER
2639/** 2562/**
@@ -2647,10 +2570,10 @@ no_rx:
2647 */ 2570 */
2648static void s2io_netpoll(struct net_device *dev) 2571static void s2io_netpoll(struct net_device *dev)
2649{ 2572{
2650 nic_t *nic = dev->priv; 2573 struct s2io_nic *nic = dev->priv;
2651 mac_info_t *mac_control; 2574 struct mac_info *mac_control;
2652 struct config_param *config; 2575 struct config_param *config;
2653 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2576 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2654 u64 val64 = 0xFFFFFFFFFFFFFFFFULL; 2577 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2655 int i; 2578 int i;
2656 2579
@@ -2699,17 +2622,15 @@ static void s2io_netpoll(struct net_device *dev)
2699 * Return Value: 2622 * Return Value:
2700 * NONE. 2623 * NONE.
2701 */ 2624 */
2702static void rx_intr_handler(ring_info_t *ring_data) 2625static void rx_intr_handler(struct ring_info *ring_data)
2703{ 2626{
2704 nic_t *nic = ring_data->nic; 2627 struct s2io_nic *nic = ring_data->nic;
2705 struct net_device *dev = (struct net_device *) nic->dev; 2628 struct net_device *dev = (struct net_device *) nic->dev;
2706 int get_block, put_block, put_offset; 2629 int get_block, put_block, put_offset;
2707 rx_curr_get_info_t get_info, put_info; 2630 struct rx_curr_get_info get_info, put_info;
2708 RxD_t *rxdp; 2631 struct RxD_t *rxdp;
2709 struct sk_buff *skb; 2632 struct sk_buff *skb;
2710#ifndef CONFIG_S2IO_NAPI
2711 int pkt_cnt = 0; 2633 int pkt_cnt = 0;
2712#endif
2713 int i; 2634 int i;
2714 2635
2715 spin_lock(&nic->rx_lock); 2636 spin_lock(&nic->rx_lock);
@@ -2722,19 +2643,21 @@ static void rx_intr_handler(ring_info_t *ring_data)
2722 2643
2723 get_info = ring_data->rx_curr_get_info; 2644 get_info = ring_data->rx_curr_get_info;
2724 get_block = get_info.block_index; 2645 get_block = get_info.block_index;
2725 put_info = ring_data->rx_curr_put_info; 2646 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2726 put_block = put_info.block_index; 2647 put_block = put_info.block_index;
2727 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; 2648 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2728#ifndef CONFIG_S2IO_NAPI 2649 if (!napi) {
2729 spin_lock(&nic->put_lock); 2650 spin_lock(&nic->put_lock);
2730 put_offset = ring_data->put_pos; 2651 put_offset = ring_data->put_pos;
2731 spin_unlock(&nic->put_lock); 2652 spin_unlock(&nic->put_lock);
2732#else 2653 } else
2733 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) + 2654 put_offset = ring_data->put_pos;
2734 put_info.offset; 2655
2735#endif
2736 while (RXD_IS_UP2DT(rxdp)) { 2656 while (RXD_IS_UP2DT(rxdp)) {
2737 /* If your are next to put index then it's FIFO full condition */ 2657 /*
2658 * If your are next to put index then it's
2659 * FIFO full condition
2660 */
2738 if ((get_block == put_block) && 2661 if ((get_block == put_block) &&
2739 (get_info.offset + 1) == put_info.offset) { 2662 (get_info.offset + 1) == put_info.offset) {
2740 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); 2663 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
@@ -2750,7 +2673,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2750 } 2673 }
2751 if (nic->rxd_mode == RXD_MODE_1) { 2674 if (nic->rxd_mode == RXD_MODE_1) {
2752 pci_unmap_single(nic->pdev, (dma_addr_t) 2675 pci_unmap_single(nic->pdev, (dma_addr_t)
2753 ((RxD1_t*)rxdp)->Buffer0_ptr, 2676 ((struct RxD1*)rxdp)->Buffer0_ptr,
2754 dev->mtu + 2677 dev->mtu +
2755 HEADER_ETHERNET_II_802_3_SIZE + 2678 HEADER_ETHERNET_II_802_3_SIZE +
2756 HEADER_802_2_SIZE + 2679 HEADER_802_2_SIZE +
@@ -2758,22 +2681,22 @@ static void rx_intr_handler(ring_info_t *ring_data)
2758 PCI_DMA_FROMDEVICE); 2681 PCI_DMA_FROMDEVICE);
2759 } else if (nic->rxd_mode == RXD_MODE_3B) { 2682 } else if (nic->rxd_mode == RXD_MODE_3B) {
2760 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2683 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2761 ((RxD3_t*)rxdp)->Buffer0_ptr, 2684 ((struct RxD3*)rxdp)->Buffer0_ptr,
2762 BUF0_LEN, PCI_DMA_FROMDEVICE); 2685 BUF0_LEN, PCI_DMA_FROMDEVICE);
2763 pci_unmap_single(nic->pdev, (dma_addr_t) 2686 pci_unmap_single(nic->pdev, (dma_addr_t)
2764 ((RxD3_t*)rxdp)->Buffer2_ptr, 2687 ((struct RxD3*)rxdp)->Buffer2_ptr,
2765 dev->mtu + 4, 2688 dev->mtu + 4,
2766 PCI_DMA_FROMDEVICE); 2689 PCI_DMA_FROMDEVICE);
2767 } else { 2690 } else {
2768 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2691 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2769 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2692 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2770 PCI_DMA_FROMDEVICE); 2693 PCI_DMA_FROMDEVICE);
2771 pci_unmap_single(nic->pdev, (dma_addr_t) 2694 pci_unmap_single(nic->pdev, (dma_addr_t)
2772 ((RxD3_t*)rxdp)->Buffer1_ptr, 2695 ((struct RxD3*)rxdp)->Buffer1_ptr,
2773 l3l4hdr_size + 4, 2696 l3l4hdr_size + 4,
2774 PCI_DMA_FROMDEVICE); 2697 PCI_DMA_FROMDEVICE);
2775 pci_unmap_single(nic->pdev, (dma_addr_t) 2698 pci_unmap_single(nic->pdev, (dma_addr_t)
2776 ((RxD3_t*)rxdp)->Buffer2_ptr, 2699 ((struct RxD3*)rxdp)->Buffer2_ptr,
2777 dev->mtu, PCI_DMA_FROMDEVICE); 2700 dev->mtu, PCI_DMA_FROMDEVICE);
2778 } 2701 }
2779 prefetch(skb->data); 2702 prefetch(skb->data);
@@ -2792,20 +2715,17 @@ static void rx_intr_handler(ring_info_t *ring_data)
2792 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 2715 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2793 } 2716 }
2794 2717
2795#ifdef CONFIG_S2IO_NAPI
2796 nic->pkts_to_process -= 1; 2718 nic->pkts_to_process -= 1;
2797 if (!nic->pkts_to_process) 2719 if ((napi) && (!nic->pkts_to_process))
2798 break; 2720 break;
2799#else
2800 pkt_cnt++; 2721 pkt_cnt++;
2801 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) 2722 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2802 break; 2723 break;
2803#endif
2804 } 2724 }
2805 if (nic->lro) { 2725 if (nic->lro) {
2806 /* Clear all LRO sessions before exiting */ 2726 /* Clear all LRO sessions before exiting */
2807 for (i=0; i<MAX_LRO_SESSIONS; i++) { 2727 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2808 lro_t *lro = &nic->lro0_n[i]; 2728 struct lro *lro = &nic->lro0_n[i];
2809 if (lro->in_use) { 2729 if (lro->in_use) {
2810 update_L3L4_header(nic, lro); 2730 update_L3L4_header(nic, lro);
2811 queue_rx_frame(lro->parent); 2731 queue_rx_frame(lro->parent);
@@ -2829,17 +2749,17 @@ static void rx_intr_handler(ring_info_t *ring_data)
2829 * NONE 2749 * NONE
2830 */ 2750 */
2831 2751
2832static void tx_intr_handler(fifo_info_t *fifo_data) 2752static void tx_intr_handler(struct fifo_info *fifo_data)
2833{ 2753{
2834 nic_t *nic = fifo_data->nic; 2754 struct s2io_nic *nic = fifo_data->nic;
2835 struct net_device *dev = (struct net_device *) nic->dev; 2755 struct net_device *dev = (struct net_device *) nic->dev;
2836 tx_curr_get_info_t get_info, put_info; 2756 struct tx_curr_get_info get_info, put_info;
2837 struct sk_buff *skb; 2757 struct sk_buff *skb;
2838 TxD_t *txdlp; 2758 struct TxD *txdlp;
2839 2759
2840 get_info = fifo_data->tx_curr_get_info; 2760 get_info = fifo_data->tx_curr_get_info;
2841 put_info = fifo_data->tx_curr_put_info; 2761 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2842 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset]. 2762 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2843 list_virt_addr; 2763 list_virt_addr;
2844 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && 2764 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2845 (get_info.offset != put_info.offset) && 2765 (get_info.offset != put_info.offset) &&
@@ -2854,11 +2774,10 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
2854 } 2774 }
2855 if ((err >> 48) == 0xA) { 2775 if ((err >> 48) == 0xA) {
2856 DBG_PRINT(TX_DBG, "TxD returned due \ 2776 DBG_PRINT(TX_DBG, "TxD returned due \
2857to loss of link\n"); 2777 to loss of link\n");
2858 } 2778 }
2859 else { 2779 else {
2860 DBG_PRINT(ERR_DBG, "***TxD error \ 2780 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2861%llx\n", err);
2862 } 2781 }
2863 } 2782 }
2864 2783
@@ -2877,7 +2796,7 @@ to loss of link\n");
2877 get_info.offset++; 2796 get_info.offset++;
2878 if (get_info.offset == get_info.fifo_len + 1) 2797 if (get_info.offset == get_info.fifo_len + 1)
2879 get_info.offset = 0; 2798 get_info.offset = 0;
2880 txdlp = (TxD_t *) fifo_data->list_info 2799 txdlp = (struct TxD *) fifo_data->list_info
2881 [get_info.offset].list_virt_addr; 2800 [get_info.offset].list_virt_addr;
2882 fifo_data->tx_curr_get_info.offset = 2801 fifo_data->tx_curr_get_info.offset =
2883 get_info.offset; 2802 get_info.offset;
@@ -2902,8 +2821,8 @@ to loss of link\n");
2902static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev) 2821static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2903{ 2822{
2904 u64 val64 = 0x0; 2823 u64 val64 = 0x0;
2905 nic_t *sp = dev->priv; 2824 struct s2io_nic *sp = dev->priv;
2906 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2825 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2907 2826
2908 //address transaction 2827 //address transaction
2909 val64 = val64 | MDIO_MMD_INDX_ADDR(addr) 2828 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
@@ -2951,8 +2870,8 @@ static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2951{ 2870{
2952 u64 val64 = 0x0; 2871 u64 val64 = 0x0;
2953 u64 rval64 = 0x0; 2872 u64 rval64 = 0x0;
2954 nic_t *sp = dev->priv; 2873 struct s2io_nic *sp = dev->priv;
2955 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2874 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2956 2875
2957 /* address transaction */ 2876 /* address transaction */
2958 val64 = val64 | MDIO_MMD_INDX_ADDR(addr) 2877 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
@@ -3055,8 +2974,8 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
3055 u64 val64 = 0x0; 2974 u64 val64 = 0x0;
3056 u64 addr = 0x0; 2975 u64 addr = 0x0;
3057 2976
3058 nic_t *sp = dev->priv; 2977 struct s2io_nic *sp = dev->priv;
3059 StatInfo_t *stat_info = sp->mac_control.stats_info; 2978 struct stat_block *stat_info = sp->mac_control.stats_info;
3060 2979
3061 /* Check the communication with the MDIO slave */ 2980 /* Check the communication with the MDIO slave */
3062 addr = 0x0000; 2981 addr = 0x0000;
@@ -3154,10 +3073,12 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
3154static void alarm_intr_handler(struct s2io_nic *nic) 3073static void alarm_intr_handler(struct s2io_nic *nic)
3155{ 3074{
3156 struct net_device *dev = (struct net_device *) nic->dev; 3075 struct net_device *dev = (struct net_device *) nic->dev;
3157 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3076 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3158 register u64 val64 = 0, err_reg = 0; 3077 register u64 val64 = 0, err_reg = 0;
3159 u64 cnt; 3078 u64 cnt;
3160 int i; 3079 int i;
3080 if (atomic_read(&nic->card_state) == CARD_DOWN)
3081 return;
3161 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0; 3082 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3162 /* Handling the XPAK counters update */ 3083 /* Handling the XPAK counters update */
3163 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) { 3084 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
@@ -3297,6 +3218,25 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
3297 } 3218 }
3298 return ret; 3219 return ret;
3299} 3220}
3221/*
3222 * check_pci_device_id - Checks if the device id is supported
3223 * @id : device id
3224 * Description: Function to check if the pci device id is supported by driver.
3225 * Return value: Actual device id if supported else PCI_ANY_ID
3226 */
3227static u16 check_pci_device_id(u16 id)
3228{
3229 switch (id) {
3230 case PCI_DEVICE_ID_HERC_WIN:
3231 case PCI_DEVICE_ID_HERC_UNI:
3232 return XFRAME_II_DEVICE;
3233 case PCI_DEVICE_ID_S2IO_UNI:
3234 case PCI_DEVICE_ID_S2IO_WIN:
3235 return XFRAME_I_DEVICE;
3236 default:
3237 return PCI_ANY_ID;
3238 }
3239}
3300 3240
3301/** 3241/**
3302 * s2io_reset - Resets the card. 3242 * s2io_reset - Resets the card.
@@ -3308,42 +3248,57 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
3308 * void. 3248 * void.
3309 */ 3249 */
3310 3250
3311static void s2io_reset(nic_t * sp) 3251static void s2io_reset(struct s2io_nic * sp)
3312{ 3252{
3313 XENA_dev_config_t __iomem *bar0 = sp->bar0; 3253 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3314 u64 val64; 3254 u64 val64;
3315 u16 subid, pci_cmd; 3255 u16 subid, pci_cmd;
3256 int i;
3257 u16 val16;
3258 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3259 __FUNCTION__, sp->dev->name);
3316 3260
3317 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ 3261 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3318 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); 3262 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3319 3263
3264 if (sp->device_type == XFRAME_II_DEVICE) {
3265 int ret;
3266 ret = pci_set_power_state(sp->pdev, 3);
3267 if (!ret)
3268 ret = pci_set_power_state(sp->pdev, 0);
3269 else {
3270 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3271 __FUNCTION__);
3272 goto old_way;
3273 }
3274 msleep(20);
3275 goto new_way;
3276 }
3277old_way:
3320 val64 = SW_RESET_ALL; 3278 val64 = SW_RESET_ALL;
3321 writeq(val64, &bar0->sw_reset); 3279 writeq(val64, &bar0->sw_reset);
3322 3280new_way:
3323 /*
3324 * At this stage, if the PCI write is indeed completed, the
3325 * card is reset and so is the PCI Config space of the device.
3326 * So a read cannot be issued at this stage on any of the
3327 * registers to ensure the write into "sw_reset" register
3328 * has gone through.
3329 * Question: Is there any system call that will explicitly force
3330 * all the write commands still pending on the bus to be pushed
3331 * through?
3332 * As of now I'am just giving a 250ms delay and hoping that the
3333 * PCI write to sw_reset register is done by this time.
3334 */
3335 msleep(250);
3336 if (strstr(sp->product_name, "CX4")) { 3281 if (strstr(sp->product_name, "CX4")) {
3337 msleep(750); 3282 msleep(750);
3338 } 3283 }
3284 msleep(250);
3285 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3339 3286
3340 /* Restore the PCI state saved during initialization. */ 3287 /* Restore the PCI state saved during initialization. */
3341 pci_restore_state(sp->pdev); 3288 pci_restore_state(sp->pdev);
3342 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 3289 pci_read_config_word(sp->pdev, 0x2, &val16);
3343 pci_cmd); 3290 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3344 s2io_init_pci(sp); 3291 break;
3292 msleep(200);
3293 }
3345 3294
3346 msleep(250); 3295 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3296 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3297 }
3298
3299 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3300
3301 s2io_init_pci(sp);
3347 3302
3348 /* Set swapper to enable I/O register access */ 3303 /* Set swapper to enable I/O register access */
3349 s2io_set_swapper(sp); 3304 s2io_set_swapper(sp);
@@ -3399,10 +3354,10 @@ static void s2io_reset(nic_t * sp)
3399 * SUCCESS on success and FAILURE on failure. 3354 * SUCCESS on success and FAILURE on failure.
3400 */ 3355 */
3401 3356
3402static int s2io_set_swapper(nic_t * sp) 3357static int s2io_set_swapper(struct s2io_nic * sp)
3403{ 3358{
3404 struct net_device *dev = sp->dev; 3359 struct net_device *dev = sp->dev;
3405 XENA_dev_config_t __iomem *bar0 = sp->bar0; 3360 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3406 u64 val64, valt, valr; 3361 u64 val64, valt, valr;
3407 3362
3408 /* 3363 /*
@@ -3527,9 +3482,9 @@ static int s2io_set_swapper(nic_t * sp)
3527 return SUCCESS; 3482 return SUCCESS;
3528} 3483}
3529 3484
3530static int wait_for_msix_trans(nic_t *nic, int i) 3485static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3531{ 3486{
3532 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3487 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3533 u64 val64; 3488 u64 val64;
3534 int ret = 0, cnt = 0; 3489 int ret = 0, cnt = 0;
3535 3490
@@ -3548,9 +3503,9 @@ static int wait_for_msix_trans(nic_t *nic, int i)
3548 return ret; 3503 return ret;
3549} 3504}
3550 3505
3551static void restore_xmsi_data(nic_t *nic) 3506static void restore_xmsi_data(struct s2io_nic *nic)
3552{ 3507{
3553 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3508 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3554 u64 val64; 3509 u64 val64;
3555 int i; 3510 int i;
3556 3511
@@ -3566,9 +3521,9 @@ static void restore_xmsi_data(nic_t *nic)
3566 } 3521 }
3567} 3522}
3568 3523
3569static void store_xmsi_data(nic_t *nic) 3524static void store_xmsi_data(struct s2io_nic *nic)
3570{ 3525{
3571 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3526 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3572 u64 val64, addr, data; 3527 u64 val64, addr, data;
3573 int i; 3528 int i;
3574 3529
@@ -3589,9 +3544,9 @@ static void store_xmsi_data(nic_t *nic)
3589 } 3544 }
3590} 3545}
3591 3546
3592int s2io_enable_msi(nic_t *nic) 3547int s2io_enable_msi(struct s2io_nic *nic)
3593{ 3548{
3594 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3549 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3595 u16 msi_ctrl, msg_val; 3550 u16 msi_ctrl, msg_val;
3596 struct config_param *config = &nic->config; 3551 struct config_param *config = &nic->config;
3597 struct net_device *dev = nic->dev; 3552 struct net_device *dev = nic->dev;
@@ -3639,9 +3594,9 @@ int s2io_enable_msi(nic_t *nic)
3639 return 0; 3594 return 0;
3640} 3595}
3641 3596
3642static int s2io_enable_msi_x(nic_t *nic) 3597static int s2io_enable_msi_x(struct s2io_nic *nic)
3643{ 3598{
3644 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3599 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3645 u64 tx_mat, rx_mat; 3600 u64 tx_mat, rx_mat;
3646 u16 msi_control; /* Temp variable */ 3601 u16 msi_control; /* Temp variable */
3647 int ret, i, j, msix_indx = 1; 3602 int ret, i, j, msix_indx = 1;
@@ -3749,7 +3704,7 @@ static int s2io_enable_msi_x(nic_t *nic)
3749 3704
3750static int s2io_open(struct net_device *dev) 3705static int s2io_open(struct net_device *dev)
3751{ 3706{
3752 nic_t *sp = dev->priv; 3707 struct s2io_nic *sp = dev->priv;
3753 int err = 0; 3708 int err = 0;
3754 3709
3755 /* 3710 /*
@@ -3802,7 +3757,7 @@ hw_init_failed:
3802 3757
3803static int s2io_close(struct net_device *dev) 3758static int s2io_close(struct net_device *dev)
3804{ 3759{
3805 nic_t *sp = dev->priv; 3760 struct s2io_nic *sp = dev->priv;
3806 3761
3807 flush_scheduled_work(); 3762 flush_scheduled_work();
3808 netif_stop_queue(dev); 3763 netif_stop_queue(dev);
@@ -3828,15 +3783,15 @@ static int s2io_close(struct net_device *dev)
3828 3783
3829static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) 3784static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3830{ 3785{
3831 nic_t *sp = dev->priv; 3786 struct s2io_nic *sp = dev->priv;
3832 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off; 3787 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3833 register u64 val64; 3788 register u64 val64;
3834 TxD_t *txdp; 3789 struct TxD *txdp;
3835 TxFIFO_element_t __iomem *tx_fifo; 3790 struct TxFIFO_element __iomem *tx_fifo;
3836 unsigned long flags; 3791 unsigned long flags;
3837 u16 vlan_tag = 0; 3792 u16 vlan_tag = 0;
3838 int vlan_priority = 0; 3793 int vlan_priority = 0;
3839 mac_info_t *mac_control; 3794 struct mac_info *mac_control;
3840 struct config_param *config; 3795 struct config_param *config;
3841 int offload_type; 3796 int offload_type;
3842 3797
@@ -3864,7 +3819,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3864 3819
3865 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset; 3820 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3866 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset; 3821 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3867 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off]. 3822 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3868 list_virt_addr; 3823 list_virt_addr;
3869 3824
3870 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1; 3825 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
@@ -3887,12 +3842,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3887 } 3842 }
3888 3843
3889 offload_type = s2io_offload_type(skb); 3844 offload_type = s2io_offload_type(skb);
3890#ifdef NETIF_F_TSO
3891 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 3845 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3892 txdp->Control_1 |= TXD_TCP_LSO_EN; 3846 txdp->Control_1 |= TXD_TCP_LSO_EN;
3893 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); 3847 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3894 } 3848 }
3895#endif
3896 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3849 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3897 txdp->Control_2 |= 3850 txdp->Control_2 |=
3898 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3851 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
@@ -3993,13 +3946,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3993static void 3946static void
3994s2io_alarm_handle(unsigned long data) 3947s2io_alarm_handle(unsigned long data)
3995{ 3948{
3996 nic_t *sp = (nic_t *)data; 3949 struct s2io_nic *sp = (struct s2io_nic *)data;
3997 3950
3998 alarm_intr_handler(sp); 3951 alarm_intr_handler(sp);
3999 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3952 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4000} 3953}
4001 3954
4002static int s2io_chk_rx_buffers(nic_t *sp, int rng_n) 3955static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4003{ 3956{
4004 int rxb_size, level; 3957 int rxb_size, level;
4005 3958
@@ -4031,9 +3984,9 @@ static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
4031static irqreturn_t s2io_msi_handle(int irq, void *dev_id) 3984static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4032{ 3985{
4033 struct net_device *dev = (struct net_device *) dev_id; 3986 struct net_device *dev = (struct net_device *) dev_id;
4034 nic_t *sp = dev->priv; 3987 struct s2io_nic *sp = dev->priv;
4035 int i; 3988 int i;
4036 mac_info_t *mac_control; 3989 struct mac_info *mac_control;
4037 struct config_param *config; 3990 struct config_param *config;
4038 3991
4039 atomic_inc(&sp->isr_cnt); 3992 atomic_inc(&sp->isr_cnt);
@@ -4063,8 +4016,8 @@ static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4063 4016
4064static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4017static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4065{ 4018{
4066 ring_info_t *ring = (ring_info_t *)dev_id; 4019 struct ring_info *ring = (struct ring_info *)dev_id;
4067 nic_t *sp = ring->nic; 4020 struct s2io_nic *sp = ring->nic;
4068 4021
4069 atomic_inc(&sp->isr_cnt); 4022 atomic_inc(&sp->isr_cnt);
4070 4023
@@ -4077,17 +4030,17 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4077 4030
4078static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) 4031static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4079{ 4032{
4080 fifo_info_t *fifo = (fifo_info_t *)dev_id; 4033 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4081 nic_t *sp = fifo->nic; 4034 struct s2io_nic *sp = fifo->nic;
4082 4035
4083 atomic_inc(&sp->isr_cnt); 4036 atomic_inc(&sp->isr_cnt);
4084 tx_intr_handler(fifo); 4037 tx_intr_handler(fifo);
4085 atomic_dec(&sp->isr_cnt); 4038 atomic_dec(&sp->isr_cnt);
4086 return IRQ_HANDLED; 4039 return IRQ_HANDLED;
4087} 4040}
4088static void s2io_txpic_intr_handle(nic_t *sp) 4041static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4089{ 4042{
4090 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4043 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4091 u64 val64; 4044 u64 val64;
4092 4045
4093 val64 = readq(&bar0->pic_int_status); 4046 val64 = readq(&bar0->pic_int_status);
@@ -4109,39 +4062,33 @@ static void s2io_txpic_intr_handle(nic_t *sp)
4109 } 4062 }
4110 else if (val64 & GPIO_INT_REG_LINK_UP) { 4063 else if (val64 & GPIO_INT_REG_LINK_UP) {
4111 val64 = readq(&bar0->adapter_status); 4064 val64 = readq(&bar0->adapter_status);
4112 if (verify_xena_quiescence(sp, val64,
4113 sp->device_enabled_once)) {
4114 /* Enable Adapter */ 4065 /* Enable Adapter */
4115 val64 = readq(&bar0->adapter_control); 4066 val64 = readq(&bar0->adapter_control);
4116 val64 |= ADAPTER_CNTL_EN; 4067 val64 |= ADAPTER_CNTL_EN;
4117 writeq(val64, &bar0->adapter_control); 4068 writeq(val64, &bar0->adapter_control);
4118 val64 |= ADAPTER_LED_ON; 4069 val64 |= ADAPTER_LED_ON;
4119 writeq(val64, &bar0->adapter_control); 4070 writeq(val64, &bar0->adapter_control);
4120 if (!sp->device_enabled_once) 4071 if (!sp->device_enabled_once)
4121 sp->device_enabled_once = 1; 4072 sp->device_enabled_once = 1;
4122 4073
4123 s2io_link(sp, LINK_UP); 4074 s2io_link(sp, LINK_UP);
4124 /* 4075 /*
4125 * unmask link down interrupt and mask link-up 4076 * unmask link down interrupt and mask link-up
4126 * intr 4077 * intr
4127 */ 4078 */
4128 val64 = readq(&bar0->gpio_int_mask); 4079 val64 = readq(&bar0->gpio_int_mask);
4129 val64 &= ~GPIO_INT_MASK_LINK_DOWN; 4080 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4130 val64 |= GPIO_INT_MASK_LINK_UP; 4081 val64 |= GPIO_INT_MASK_LINK_UP;
4131 writeq(val64, &bar0->gpio_int_mask); 4082 writeq(val64, &bar0->gpio_int_mask);
4132 4083
4133 }
4134 }else if (val64 & GPIO_INT_REG_LINK_DOWN) { 4084 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4135 val64 = readq(&bar0->adapter_status); 4085 val64 = readq(&bar0->adapter_status);
4136 if (verify_xena_quiescence(sp, val64, 4086 s2io_link(sp, LINK_DOWN);
4137 sp->device_enabled_once)) { 4087 /* Link is down so unmaks link up interrupt */
4138 s2io_link(sp, LINK_DOWN); 4088 val64 = readq(&bar0->gpio_int_mask);
4139 /* Link is down so unmaks link up interrupt */ 4089 val64 &= ~GPIO_INT_MASK_LINK_UP;
4140 val64 = readq(&bar0->gpio_int_mask); 4090 val64 |= GPIO_INT_MASK_LINK_DOWN;
4141 val64 &= ~GPIO_INT_MASK_LINK_UP; 4091 writeq(val64, &bar0->gpio_int_mask);
4142 val64 |= GPIO_INT_MASK_LINK_DOWN;
4143 writeq(val64, &bar0->gpio_int_mask);
4144 }
4145 } 4092 }
4146 } 4093 }
4147 val64 = readq(&bar0->gpio_int_mask); 4094 val64 = readq(&bar0->gpio_int_mask);
@@ -4163,11 +4110,11 @@ static void s2io_txpic_intr_handle(nic_t *sp)
4163static irqreturn_t s2io_isr(int irq, void *dev_id) 4110static irqreturn_t s2io_isr(int irq, void *dev_id)
4164{ 4111{
4165 struct net_device *dev = (struct net_device *) dev_id; 4112 struct net_device *dev = (struct net_device *) dev_id;
4166 nic_t *sp = dev->priv; 4113 struct s2io_nic *sp = dev->priv;
4167 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4114 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4168 int i; 4115 int i;
4169 u64 reason = 0, val64, org_mask; 4116 u64 reason = 0;
4170 mac_info_t *mac_control; 4117 struct mac_info *mac_control;
4171 struct config_param *config; 4118 struct config_param *config;
4172 4119
4173 atomic_inc(&sp->isr_cnt); 4120 atomic_inc(&sp->isr_cnt);
@@ -4185,43 +4132,48 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4185 reason = readq(&bar0->general_int_status); 4132 reason = readq(&bar0->general_int_status);
4186 4133
4187 if (!reason) { 4134 if (!reason) {
4188 /* The interrupt was not raised by Xena. */ 4135 /* The interrupt was not raised by us. */
4136 atomic_dec(&sp->isr_cnt);
4137 return IRQ_NONE;
4138 }
4139 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4140 /* Disable device and get out */
4189 atomic_dec(&sp->isr_cnt); 4141 atomic_dec(&sp->isr_cnt);
4190 return IRQ_NONE; 4142 return IRQ_NONE;
4191 } 4143 }
4192 4144
4193 val64 = 0xFFFFFFFFFFFFFFFFULL; 4145 if (napi) {
4194 /* Store current mask before masking all interrupts */ 4146 if (reason & GEN_INTR_RXTRAFFIC) {
4195 org_mask = readq(&bar0->general_int_mask); 4147 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4196 writeq(val64, &bar0->general_int_mask); 4148 __netif_rx_schedule(dev);
4149 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4150 }
4151 else
4152 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4153 }
4154 } else {
4155 /*
4156 * Rx handler is called by default, without checking for the
4157 * cause of interrupt.
4158 * rx_traffic_int reg is an R1 register, writing all 1's
4159 * will ensure that the actual interrupt causing bit get's
4160 * cleared and hence a read can be avoided.
4161 */
4162 if (reason & GEN_INTR_RXTRAFFIC)
4163 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4197 4164
4198#ifdef CONFIG_S2IO_NAPI 4165 for (i = 0; i < config->rx_ring_num; i++) {
4199 if (reason & GEN_INTR_RXTRAFFIC) { 4166 rx_intr_handler(&mac_control->rings[i]);
4200 if (netif_rx_schedule_prep(dev)) {
4201 writeq(val64, &bar0->rx_traffic_mask);
4202 __netif_rx_schedule(dev);
4203 } 4167 }
4204 } 4168 }
4205#else
4206 /*
4207 * Rx handler is called by default, without checking for the
4208 * cause of interrupt.
4209 * rx_traffic_int reg is an R1 register, writing all 1's
4210 * will ensure that the actual interrupt causing bit get's
4211 * cleared and hence a read can be avoided.
4212 */
4213 writeq(val64, &bar0->rx_traffic_int);
4214 for (i = 0; i < config->rx_ring_num; i++) {
4215 rx_intr_handler(&mac_control->rings[i]);
4216 }
4217#endif
4218 4169
4219 /* 4170 /*
4220 * tx_traffic_int reg is an R1 register, writing all 1's 4171 * tx_traffic_int reg is an R1 register, writing all 1's
4221 * will ensure that the actual interrupt causing bit get's 4172 * will ensure that the actual interrupt causing bit get's
4222 * cleared and hence a read can be avoided. 4173 * cleared and hence a read can be avoided.
4223 */ 4174 */
4224 writeq(val64, &bar0->tx_traffic_int); 4175 if (reason & GEN_INTR_TXTRAFFIC)
4176 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4225 4177
4226 for (i = 0; i < config->tx_fifo_num; i++) 4178 for (i = 0; i < config->tx_fifo_num; i++)
4227 tx_intr_handler(&mac_control->fifos[i]); 4179 tx_intr_handler(&mac_control->fifos[i]);
@@ -4233,11 +4185,14 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4233 * reallocate the buffers from the interrupt handler itself, 4185 * reallocate the buffers from the interrupt handler itself,
4234 * else schedule a tasklet to reallocate the buffers. 4186 * else schedule a tasklet to reallocate the buffers.
4235 */ 4187 */
4236#ifndef CONFIG_S2IO_NAPI 4188 if (!napi) {
4237 for (i = 0; i < config->rx_ring_num; i++) 4189 for (i = 0; i < config->rx_ring_num; i++)
4238 s2io_chk_rx_buffers(sp, i); 4190 s2io_chk_rx_buffers(sp, i);
4239#endif 4191 }
4240 writeq(org_mask, &bar0->general_int_mask); 4192
4193 writeq(0, &bar0->general_int_mask);
4194 readl(&bar0->general_int_status);
4195
4241 atomic_dec(&sp->isr_cnt); 4196 atomic_dec(&sp->isr_cnt);
4242 return IRQ_HANDLED; 4197 return IRQ_HANDLED;
4243} 4198}
@@ -4245,9 +4200,9 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4245/** 4200/**
4246 * s2io_updt_stats - 4201 * s2io_updt_stats -
4247 */ 4202 */
4248static void s2io_updt_stats(nic_t *sp) 4203static void s2io_updt_stats(struct s2io_nic *sp)
4249{ 4204{
4250 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4205 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4251 u64 val64; 4206 u64 val64;
4252 int cnt = 0; 4207 int cnt = 0;
4253 4208
@@ -4266,7 +4221,7 @@ static void s2io_updt_stats(nic_t *sp)
4266 break; /* Updt failed */ 4221 break; /* Updt failed */
4267 } while(1); 4222 } while(1);
4268 } else { 4223 } else {
4269 memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t)); 4224 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
4270 } 4225 }
4271} 4226}
4272 4227
@@ -4282,8 +4237,8 @@ static void s2io_updt_stats(nic_t *sp)
4282 4237
4283static struct net_device_stats *s2io_get_stats(struct net_device *dev) 4238static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4284{ 4239{
4285 nic_t *sp = dev->priv; 4240 struct s2io_nic *sp = dev->priv;
4286 mac_info_t *mac_control; 4241 struct mac_info *mac_control;
4287 struct config_param *config; 4242 struct config_param *config;
4288 4243
4289 4244
@@ -4324,8 +4279,8 @@ static void s2io_set_multicast(struct net_device *dev)
4324{ 4279{
4325 int i, j, prev_cnt; 4280 int i, j, prev_cnt;
4326 struct dev_mc_list *mclist; 4281 struct dev_mc_list *mclist;
4327 nic_t *sp = dev->priv; 4282 struct s2io_nic *sp = dev->priv;
4328 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4283 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4329 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = 4284 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4330 0xfeffffffffffULL; 4285 0xfeffffffffffULL;
4331 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0; 4286 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
@@ -4478,8 +4433,8 @@ static void s2io_set_multicast(struct net_device *dev)
4478 4433
4479static int s2io_set_mac_addr(struct net_device *dev, u8 * addr) 4434static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4480{ 4435{
4481 nic_t *sp = dev->priv; 4436 struct s2io_nic *sp = dev->priv;
4482 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4437 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4483 register u64 val64, mac_addr = 0; 4438 register u64 val64, mac_addr = 0;
4484 int i; 4439 int i;
4485 4440
@@ -4525,7 +4480,7 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4525static int s2io_ethtool_sset(struct net_device *dev, 4480static int s2io_ethtool_sset(struct net_device *dev,
4526 struct ethtool_cmd *info) 4481 struct ethtool_cmd *info)
4527{ 4482{
4528 nic_t *sp = dev->priv; 4483 struct s2io_nic *sp = dev->priv;
4529 if ((info->autoneg == AUTONEG_ENABLE) || 4484 if ((info->autoneg == AUTONEG_ENABLE) ||
4530 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL)) 4485 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4531 return -EINVAL; 4486 return -EINVAL;
@@ -4551,7 +4506,7 @@ static int s2io_ethtool_sset(struct net_device *dev,
4551 4506
4552static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) 4507static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4553{ 4508{
4554 nic_t *sp = dev->priv; 4509 struct s2io_nic *sp = dev->priv;
4555 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 4510 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4556 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 4511 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4557 info->port = PORT_FIBRE; 4512 info->port = PORT_FIBRE;
@@ -4584,7 +4539,7 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4584static void s2io_ethtool_gdrvinfo(struct net_device *dev, 4539static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4585 struct ethtool_drvinfo *info) 4540 struct ethtool_drvinfo *info)
4586{ 4541{
4587 nic_t *sp = dev->priv; 4542 struct s2io_nic *sp = dev->priv;
4588 4543
4589 strncpy(info->driver, s2io_driver_name, sizeof(info->driver)); 4544 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4590 strncpy(info->version, s2io_driver_version, sizeof(info->version)); 4545 strncpy(info->version, s2io_driver_version, sizeof(info->version));
@@ -4616,7 +4571,7 @@ static void s2io_ethtool_gregs(struct net_device *dev,
4616 int i; 4571 int i;
4617 u64 reg; 4572 u64 reg;
4618 u8 *reg_space = (u8 *) space; 4573 u8 *reg_space = (u8 *) space;
4619 nic_t *sp = dev->priv; 4574 struct s2io_nic *sp = dev->priv;
4620 4575
4621 regs->len = XENA_REG_SPACE; 4576 regs->len = XENA_REG_SPACE;
4622 regs->version = sp->pdev->subsystem_device; 4577 regs->version = sp->pdev->subsystem_device;
@@ -4638,8 +4593,8 @@ static void s2io_ethtool_gregs(struct net_device *dev,
4638*/ 4593*/
4639static void s2io_phy_id(unsigned long data) 4594static void s2io_phy_id(unsigned long data)
4640{ 4595{
4641 nic_t *sp = (nic_t *) data; 4596 struct s2io_nic *sp = (struct s2io_nic *) data;
4642 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4597 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4643 u64 val64 = 0; 4598 u64 val64 = 0;
4644 u16 subid; 4599 u16 subid;
4645 4600
@@ -4676,8 +4631,8 @@ static void s2io_phy_id(unsigned long data)
4676static int s2io_ethtool_idnic(struct net_device *dev, u32 data) 4631static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4677{ 4632{
4678 u64 val64 = 0, last_gpio_ctrl_val; 4633 u64 val64 = 0, last_gpio_ctrl_val;
4679 nic_t *sp = dev->priv; 4634 struct s2io_nic *sp = dev->priv;
4680 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4635 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4681 u16 subid; 4636 u16 subid;
4682 4637
4683 subid = sp->pdev->subsystem_device; 4638 subid = sp->pdev->subsystem_device;
@@ -4725,8 +4680,8 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
4725 struct ethtool_pauseparam *ep) 4680 struct ethtool_pauseparam *ep)
4726{ 4681{
4727 u64 val64; 4682 u64 val64;
4728 nic_t *sp = dev->priv; 4683 struct s2io_nic *sp = dev->priv;
4729 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4684 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4730 4685
4731 val64 = readq(&bar0->rmac_pause_cfg); 4686 val64 = readq(&bar0->rmac_pause_cfg);
4732 if (val64 & RMAC_PAUSE_GEN_ENABLE) 4687 if (val64 & RMAC_PAUSE_GEN_ENABLE)
@@ -4752,8 +4707,8 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
4752 struct ethtool_pauseparam *ep) 4707 struct ethtool_pauseparam *ep)
4753{ 4708{
4754 u64 val64; 4709 u64 val64;
4755 nic_t *sp = dev->priv; 4710 struct s2io_nic *sp = dev->priv;
4756 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4711 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4757 4712
4758 val64 = readq(&bar0->rmac_pause_cfg); 4713 val64 = readq(&bar0->rmac_pause_cfg);
4759 if (ep->tx_pause) 4714 if (ep->tx_pause)
@@ -4785,12 +4740,12 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
4785 */ 4740 */
4786 4741
4787#define S2IO_DEV_ID 5 4742#define S2IO_DEV_ID 5
4788static int read_eeprom(nic_t * sp, int off, u64 * data) 4743static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4789{ 4744{
4790 int ret = -1; 4745 int ret = -1;
4791 u32 exit_cnt = 0; 4746 u32 exit_cnt = 0;
4792 u64 val64; 4747 u64 val64;
4793 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4748 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4794 4749
4795 if (sp->device_type == XFRAME_I_DEVICE) { 4750 if (sp->device_type == XFRAME_I_DEVICE) {
4796 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | 4751 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
@@ -4850,11 +4805,11 @@ static int read_eeprom(nic_t * sp, int off, u64 * data)
4850 * 0 on success, -1 on failure. 4805 * 0 on success, -1 on failure.
4851 */ 4806 */
4852 4807
4853static int write_eeprom(nic_t * sp, int off, u64 data, int cnt) 4808static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4854{ 4809{
4855 int exit_cnt = 0, ret = -1; 4810 int exit_cnt = 0, ret = -1;
4856 u64 val64; 4811 u64 val64;
4857 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4812 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4858 4813
4859 if (sp->device_type == XFRAME_I_DEVICE) { 4814 if (sp->device_type == XFRAME_I_DEVICE) {
4860 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | 4815 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
@@ -4899,7 +4854,7 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4899 } 4854 }
4900 return ret; 4855 return ret;
4901} 4856}
4902static void s2io_vpd_read(nic_t *nic) 4857static void s2io_vpd_read(struct s2io_nic *nic)
4903{ 4858{
4904 u8 *vpd_data; 4859 u8 *vpd_data;
4905 u8 data; 4860 u8 data;
@@ -4914,6 +4869,7 @@ static void s2io_vpd_read(nic_t *nic)
4914 strcpy(nic->product_name, "Xframe I 10GbE network adapter"); 4869 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4915 vpd_addr = 0x50; 4870 vpd_addr = 0x50;
4916 } 4871 }
4872 strcpy(nic->serial_num, "NOT AVAILABLE");
4917 4873
4918 vpd_data = kmalloc(256, GFP_KERNEL); 4874 vpd_data = kmalloc(256, GFP_KERNEL);
4919 if (!vpd_data) 4875 if (!vpd_data)
@@ -4937,7 +4893,22 @@ static void s2io_vpd_read(nic_t *nic)
4937 pci_read_config_dword(nic->pdev, (vpd_addr + 4), 4893 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4938 (u32 *)&vpd_data[i]); 4894 (u32 *)&vpd_data[i]);
4939 } 4895 }
4940 if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) { 4896
4897 if(!fail) {
4898 /* read serial number of adapter */
4899 for (cnt = 0; cnt < 256; cnt++) {
4900 if ((vpd_data[cnt] == 'S') &&
4901 (vpd_data[cnt+1] == 'N') &&
4902 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
4903 memset(nic->serial_num, 0, VPD_STRING_LEN);
4904 memcpy(nic->serial_num, &vpd_data[cnt + 3],
4905 vpd_data[cnt+2]);
4906 break;
4907 }
4908 }
4909 }
4910
4911 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
4941 memset(nic->product_name, 0, vpd_data[1]); 4912 memset(nic->product_name, 0, vpd_data[1]);
4942 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 4913 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4943 } 4914 }
@@ -4962,7 +4933,7 @@ static int s2io_ethtool_geeprom(struct net_device *dev,
4962{ 4933{
4963 u32 i, valid; 4934 u32 i, valid;
4964 u64 data; 4935 u64 data;
4965 nic_t *sp = dev->priv; 4936 struct s2io_nic *sp = dev->priv;
4966 4937
4967 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16); 4938 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4968 4939
@@ -5000,7 +4971,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
5000{ 4971{
5001 int len = eeprom->len, cnt = 0; 4972 int len = eeprom->len, cnt = 0;
5002 u64 valid = 0, data; 4973 u64 valid = 0, data;
5003 nic_t *sp = dev->priv; 4974 struct s2io_nic *sp = dev->priv;
5004 4975
5005 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { 4976 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5006 DBG_PRINT(ERR_DBG, 4977 DBG_PRINT(ERR_DBG,
@@ -5044,9 +5015,9 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
5044 * 0 on success. 5015 * 0 on success.
5045 */ 5016 */
5046 5017
5047static int s2io_register_test(nic_t * sp, uint64_t * data) 5018static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5048{ 5019{
5049 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5020 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5050 u64 val64 = 0, exp_val; 5021 u64 val64 = 0, exp_val;
5051 int fail = 0; 5022 int fail = 0;
5052 5023
@@ -5111,7 +5082,7 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
5111 * 0 on success. 5082 * 0 on success.
5112 */ 5083 */
5113 5084
5114static int s2io_eeprom_test(nic_t * sp, uint64_t * data) 5085static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5115{ 5086{
5116 int fail = 0; 5087 int fail = 0;
5117 u64 ret_data, org_4F0, org_7F0; 5088 u64 ret_data, org_4F0, org_7F0;
@@ -5213,7 +5184,7 @@ static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
5213 * 0 on success and -1 on failure. 5184 * 0 on success and -1 on failure.
5214 */ 5185 */
5215 5186
5216static int s2io_bist_test(nic_t * sp, uint64_t * data) 5187static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5217{ 5188{
5218 u8 bist = 0; 5189 u8 bist = 0;
5219 int cnt = 0, ret = -1; 5190 int cnt = 0, ret = -1;
@@ -5249,9 +5220,9 @@ static int s2io_bist_test(nic_t * sp, uint64_t * data)
5249 * 0 on success. 5220 * 0 on success.
5250 */ 5221 */
5251 5222
5252static int s2io_link_test(nic_t * sp, uint64_t * data) 5223static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5253{ 5224{
5254 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5225 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5255 u64 val64; 5226 u64 val64;
5256 5227
5257 val64 = readq(&bar0->adapter_status); 5228 val64 = readq(&bar0->adapter_status);
@@ -5276,9 +5247,9 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
5276 * 0 on success. 5247 * 0 on success.
5277 */ 5248 */
5278 5249
5279static int s2io_rldram_test(nic_t * sp, uint64_t * data) 5250static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5280{ 5251{
5281 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5252 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5282 u64 val64; 5253 u64 val64;
5283 int cnt, iteration = 0, test_fail = 0; 5254 int cnt, iteration = 0, test_fail = 0;
5284 5255
@@ -5380,7 +5351,7 @@ static void s2io_ethtool_test(struct net_device *dev,
5380 struct ethtool_test *ethtest, 5351 struct ethtool_test *ethtest,
5381 uint64_t * data) 5352 uint64_t * data)
5382{ 5353{
5383 nic_t *sp = dev->priv; 5354 struct s2io_nic *sp = dev->priv;
5384 int orig_state = netif_running(sp->dev); 5355 int orig_state = netif_running(sp->dev);
5385 5356
5386 if (ethtest->flags == ETH_TEST_FL_OFFLINE) { 5357 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
@@ -5436,8 +5407,8 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
5436 u64 * tmp_stats) 5407 u64 * tmp_stats)
5437{ 5408{
5438 int i = 0; 5409 int i = 0;
5439 nic_t *sp = dev->priv; 5410 struct s2io_nic *sp = dev->priv;
5440 StatInfo_t *stat_info = sp->mac_control.stats_info; 5411 struct stat_block *stat_info = sp->mac_control.stats_info;
5441 5412
5442 s2io_updt_stats(sp); 5413 s2io_updt_stats(sp);
5443 tmp_stats[i++] = 5414 tmp_stats[i++] =
@@ -5664,14 +5635,14 @@ static int s2io_ethtool_get_regs_len(struct net_device *dev)
5664 5635
5665static u32 s2io_ethtool_get_rx_csum(struct net_device * dev) 5636static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5666{ 5637{
5667 nic_t *sp = dev->priv; 5638 struct s2io_nic *sp = dev->priv;
5668 5639
5669 return (sp->rx_csum); 5640 return (sp->rx_csum);
5670} 5641}
5671 5642
5672static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data) 5643static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5673{ 5644{
5674 nic_t *sp = dev->priv; 5645 struct s2io_nic *sp = dev->priv;
5675 5646
5676 if (data) 5647 if (data)
5677 sp->rx_csum = 1; 5648 sp->rx_csum = 1;
@@ -5750,10 +5721,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
5750 .set_tx_csum = s2io_ethtool_op_set_tx_csum, 5721 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5751 .get_sg = ethtool_op_get_sg, 5722 .get_sg = ethtool_op_get_sg,
5752 .set_sg = ethtool_op_set_sg, 5723 .set_sg = ethtool_op_set_sg,
5753#ifdef NETIF_F_TSO
5754 .get_tso = s2io_ethtool_op_get_tso, 5724 .get_tso = s2io_ethtool_op_get_tso,
5755 .set_tso = s2io_ethtool_op_set_tso, 5725 .set_tso = s2io_ethtool_op_set_tso,
5756#endif
5757 .get_ufo = ethtool_op_get_ufo, 5726 .get_ufo = ethtool_op_get_ufo,
5758 .set_ufo = ethtool_op_set_ufo, 5727 .set_ufo = ethtool_op_set_ufo,
5759 .self_test_count = s2io_ethtool_self_test_count, 5728 .self_test_count = s2io_ethtool_self_test_count,
@@ -5794,7 +5763,7 @@ static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5794 5763
5795static int s2io_change_mtu(struct net_device *dev, int new_mtu) 5764static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5796{ 5765{
5797 nic_t *sp = dev->priv; 5766 struct s2io_nic *sp = dev->priv;
5798 5767
5799 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { 5768 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5800 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", 5769 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
@@ -5813,7 +5782,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5813 if (netif_queue_stopped(dev)) 5782 if (netif_queue_stopped(dev))
5814 netif_wake_queue(dev); 5783 netif_wake_queue(dev);
5815 } else { /* Device is down */ 5784 } else { /* Device is down */
5816 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5785 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5817 u64 val64 = new_mtu; 5786 u64 val64 = new_mtu;
5818 5787
5819 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); 5788 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
@@ -5838,9 +5807,9 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5838static void s2io_tasklet(unsigned long dev_addr) 5807static void s2io_tasklet(unsigned long dev_addr)
5839{ 5808{
5840 struct net_device *dev = (struct net_device *) dev_addr; 5809 struct net_device *dev = (struct net_device *) dev_addr;
5841 nic_t *sp = dev->priv; 5810 struct s2io_nic *sp = dev->priv;
5842 int i, ret; 5811 int i, ret;
5843 mac_info_t *mac_control; 5812 struct mac_info *mac_control;
5844 struct config_param *config; 5813 struct config_param *config;
5845 5814
5846 mac_control = &sp->mac_control; 5815 mac_control = &sp->mac_control;
@@ -5873,9 +5842,9 @@ static void s2io_tasklet(unsigned long dev_addr)
5873 5842
5874static void s2io_set_link(struct work_struct *work) 5843static void s2io_set_link(struct work_struct *work)
5875{ 5844{
5876 nic_t *nic = container_of(work, nic_t, set_link_task); 5845 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
5877 struct net_device *dev = nic->dev; 5846 struct net_device *dev = nic->dev;
5878 XENA_dev_config_t __iomem *bar0 = nic->bar0; 5847 struct XENA_dev_config __iomem *bar0 = nic->bar0;
5879 register u64 val64; 5848 register u64 val64;
5880 u16 subid; 5849 u16 subid;
5881 5850
@@ -5894,57 +5863,53 @@ static void s2io_set_link(struct work_struct *work)
5894 } 5863 }
5895 5864
5896 val64 = readq(&bar0->adapter_status); 5865 val64 = readq(&bar0->adapter_status);
5897 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) { 5866 if (LINK_IS_UP(val64)) {
5898 if (LINK_IS_UP(val64)) { 5867 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
5899 val64 = readq(&bar0->adapter_control); 5868 if (verify_xena_quiescence(nic)) {
5900 val64 |= ADAPTER_CNTL_EN; 5869 val64 = readq(&bar0->adapter_control);
5901 writeq(val64, &bar0->adapter_control); 5870 val64 |= ADAPTER_CNTL_EN;
5902 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5903 subid)) {
5904 val64 = readq(&bar0->gpio_control);
5905 val64 |= GPIO_CTRL_GPIO_0;
5906 writeq(val64, &bar0->gpio_control);
5907 val64 = readq(&bar0->gpio_control);
5908 } else {
5909 val64 |= ADAPTER_LED_ON;
5910 writeq(val64, &bar0->adapter_control); 5871 writeq(val64, &bar0->adapter_control);
5911 } 5872 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
5912 if (s2io_link_fault_indication(nic) == 5873 nic->device_type, subid)) {
5913 MAC_RMAC_ERR_TIMER) { 5874 val64 = readq(&bar0->gpio_control);
5914 val64 = readq(&bar0->adapter_status); 5875 val64 |= GPIO_CTRL_GPIO_0;
5915 if (!LINK_IS_UP(val64)) { 5876 writeq(val64, &bar0->gpio_control);
5916 DBG_PRINT(ERR_DBG, "%s:", dev->name); 5877 val64 = readq(&bar0->gpio_control);
5917 DBG_PRINT(ERR_DBG, " Link down"); 5878 } else {
5918 DBG_PRINT(ERR_DBG, "after "); 5879 val64 |= ADAPTER_LED_ON;
5919 DBG_PRINT(ERR_DBG, "enabling "); 5880 writeq(val64, &bar0->adapter_control);
5920 DBG_PRINT(ERR_DBG, "device \n");
5921 } 5881 }
5922 }
5923 if (nic->device_enabled_once == FALSE) {
5924 nic->device_enabled_once = TRUE; 5882 nic->device_enabled_once = TRUE;
5883 } else {
5884 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5885 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5886 netif_stop_queue(dev);
5925 } 5887 }
5888 }
5889 val64 = readq(&bar0->adapter_status);
5890 if (!LINK_IS_UP(val64)) {
5891 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5892 DBG_PRINT(ERR_DBG, " Link down after enabling ");
5893 DBG_PRINT(ERR_DBG, "device \n");
5894 } else
5926 s2io_link(nic, LINK_UP); 5895 s2io_link(nic, LINK_UP);
5927 } else { 5896 } else {
5928 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type, 5897 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5929 subid)) { 5898 subid)) {
5930 val64 = readq(&bar0->gpio_control); 5899 val64 = readq(&bar0->gpio_control);
5931 val64 &= ~GPIO_CTRL_GPIO_0; 5900 val64 &= ~GPIO_CTRL_GPIO_0;
5932 writeq(val64, &bar0->gpio_control); 5901 writeq(val64, &bar0->gpio_control);
5933 val64 = readq(&bar0->gpio_control); 5902 val64 = readq(&bar0->gpio_control);
5934 }
5935 s2io_link(nic, LINK_DOWN);
5936 } 5903 }
5937 } else { /* NIC is not Quiescent. */ 5904 s2io_link(nic, LINK_DOWN);
5938 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5939 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5940 netif_stop_queue(dev);
5941 } 5905 }
5942 clear_bit(0, &(nic->link_state)); 5906 clear_bit(0, &(nic->link_state));
5943} 5907}
5944 5908
5945static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba, 5909static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
5946 struct sk_buff **skb, u64 *temp0, u64 *temp1, 5910 struct buffAdd *ba,
5947 u64 *temp2, int size) 5911 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5912 u64 *temp2, int size)
5948{ 5913{
5949 struct net_device *dev = sp->dev; 5914 struct net_device *dev = sp->dev;
5950 struct sk_buff *frag_list; 5915 struct sk_buff *frag_list;
@@ -5958,7 +5923,7 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5958 * using same mapped address for the Rxd 5923 * using same mapped address for the Rxd
5959 * buffer pointer 5924 * buffer pointer
5960 */ 5925 */
5961 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0; 5926 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
5962 } else { 5927 } else {
5963 *skb = dev_alloc_skb(size); 5928 *skb = dev_alloc_skb(size);
5964 if (!(*skb)) { 5929 if (!(*skb)) {
@@ -5970,7 +5935,7 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5970 * such it will be used for next rxd whose 5935 * such it will be used for next rxd whose
5971 * Host Control is NULL 5936 * Host Control is NULL
5972 */ 5937 */
5973 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 = 5938 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
5974 pci_map_single( sp->pdev, (*skb)->data, 5939 pci_map_single( sp->pdev, (*skb)->data,
5975 size - NET_IP_ALIGN, 5940 size - NET_IP_ALIGN,
5976 PCI_DMA_FROMDEVICE); 5941 PCI_DMA_FROMDEVICE);
@@ -5979,36 +5944,36 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5979 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { 5944 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
5980 /* Two buffer Mode */ 5945 /* Two buffer Mode */
5981 if (*skb) { 5946 if (*skb) {
5982 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2; 5947 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
5983 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0; 5948 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
5984 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1; 5949 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
5985 } else { 5950 } else {
5986 *skb = dev_alloc_skb(size); 5951 *skb = dev_alloc_skb(size);
5987 if (!(*skb)) { 5952 if (!(*skb)) {
5988 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n", 5953 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
5989 dev->name); 5954 dev->name);
5990 return -ENOMEM; 5955 return -ENOMEM;
5991 } 5956 }
5992 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 = 5957 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5993 pci_map_single(sp->pdev, (*skb)->data, 5958 pci_map_single(sp->pdev, (*skb)->data,
5994 dev->mtu + 4, 5959 dev->mtu + 4,
5995 PCI_DMA_FROMDEVICE); 5960 PCI_DMA_FROMDEVICE);
5996 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 = 5961 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5997 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, 5962 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
5998 PCI_DMA_FROMDEVICE); 5963 PCI_DMA_FROMDEVICE);
5999 rxdp->Host_Control = (unsigned long) (*skb); 5964 rxdp->Host_Control = (unsigned long) (*skb);
6000 5965
6001 /* Buffer-1 will be dummy buffer not used */ 5966 /* Buffer-1 will be dummy buffer not used */
6002 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 = 5967 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6003 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, 5968 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6004 PCI_DMA_FROMDEVICE); 5969 PCI_DMA_FROMDEVICE);
6005 } 5970 }
6006 } else if ((rxdp->Host_Control == 0)) { 5971 } else if ((rxdp->Host_Control == 0)) {
6007 /* Three buffer mode */ 5972 /* Three buffer mode */
6008 if (*skb) { 5973 if (*skb) {
6009 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0; 5974 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6010 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1; 5975 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6011 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2; 5976 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6012 } else { 5977 } else {
6013 *skb = dev_alloc_skb(size); 5978 *skb = dev_alloc_skb(size);
6014 if (!(*skb)) { 5979 if (!(*skb)) {
@@ -6016,11 +5981,11 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
6016 dev->name); 5981 dev->name);
6017 return -ENOMEM; 5982 return -ENOMEM;
6018 } 5983 }
6019 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 = 5984 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6020 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, 5985 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6021 PCI_DMA_FROMDEVICE); 5986 PCI_DMA_FROMDEVICE);
6022 /* Buffer-1 receives L3/L4 headers */ 5987 /* Buffer-1 receives L3/L4 headers */
6023 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 = 5988 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6024 pci_map_single( sp->pdev, (*skb)->data, 5989 pci_map_single( sp->pdev, (*skb)->data,
6025 l3l4hdr_size + 4, 5990 l3l4hdr_size + 4,
6026 PCI_DMA_FROMDEVICE); 5991 PCI_DMA_FROMDEVICE);
@@ -6040,14 +6005,15 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
6040 /* 6005 /*
6041 * Buffer-2 receives L4 data payload 6006 * Buffer-2 receives L4 data payload
6042 */ 6007 */
6043 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 = 6008 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6044 pci_map_single( sp->pdev, frag_list->data, 6009 pci_map_single( sp->pdev, frag_list->data,
6045 dev->mtu, PCI_DMA_FROMDEVICE); 6010 dev->mtu, PCI_DMA_FROMDEVICE);
6046 } 6011 }
6047 } 6012 }
6048 return 0; 6013 return 0;
6049} 6014}
6050static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size) 6015static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6016 int size)
6051{ 6017{
6052 struct net_device *dev = sp->dev; 6018 struct net_device *dev = sp->dev;
6053 if (sp->rxd_mode == RXD_MODE_1) { 6019 if (sp->rxd_mode == RXD_MODE_1) {
@@ -6063,15 +6029,15 @@ static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
6063 } 6029 }
6064} 6030}
6065 6031
6066static int rxd_owner_bit_reset(nic_t *sp) 6032static int rxd_owner_bit_reset(struct s2io_nic *sp)
6067{ 6033{
6068 int i, j, k, blk_cnt = 0, size; 6034 int i, j, k, blk_cnt = 0, size;
6069 mac_info_t * mac_control = &sp->mac_control; 6035 struct mac_info * mac_control = &sp->mac_control;
6070 struct config_param *config = &sp->config; 6036 struct config_param *config = &sp->config;
6071 struct net_device *dev = sp->dev; 6037 struct net_device *dev = sp->dev;
6072 RxD_t *rxdp = NULL; 6038 struct RxD_t *rxdp = NULL;
6073 struct sk_buff *skb = NULL; 6039 struct sk_buff *skb = NULL;
6074 buffAdd_t *ba = NULL; 6040 struct buffAdd *ba = NULL;
6075 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0; 6041 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6076 6042
6077 /* Calculate the size based on ring mode */ 6043 /* Calculate the size based on ring mode */
@@ -6110,7 +6076,7 @@ static int rxd_owner_bit_reset(nic_t *sp)
6110 6076
6111} 6077}
6112 6078
6113static int s2io_add_isr(nic_t * sp) 6079static int s2io_add_isr(struct s2io_nic * sp)
6114{ 6080{
6115 int ret = 0; 6081 int ret = 0;
6116 struct net_device *dev = sp->dev; 6082 struct net_device *dev = sp->dev;
@@ -6125,7 +6091,7 @@ static int s2io_add_isr(nic_t * sp)
6125 sp->intr_type = INTA; 6091 sp->intr_type = INTA;
6126 } 6092 }
6127 6093
6128 /* Store the values of the MSIX table in the nic_t structure */ 6094 /* Store the values of the MSIX table in the struct s2io_nic structure */
6129 store_xmsi_data(sp); 6095 store_xmsi_data(sp);
6130 6096
6131 /* After proper initialization of H/W, register ISR */ 6097 /* After proper initialization of H/W, register ISR */
@@ -6180,7 +6146,7 @@ static int s2io_add_isr(nic_t * sp)
6180 } 6146 }
6181 return 0; 6147 return 0;
6182} 6148}
6183static void s2io_rem_isr(nic_t * sp) 6149static void s2io_rem_isr(struct s2io_nic * sp)
6184{ 6150{
6185 int cnt = 0; 6151 int cnt = 0;
6186 struct net_device *dev = sp->dev; 6152 struct net_device *dev = sp->dev;
@@ -6222,10 +6188,10 @@ static void s2io_rem_isr(nic_t * sp)
6222 } while(cnt < 5); 6188 } while(cnt < 5);
6223} 6189}
6224 6190
6225static void s2io_card_down(nic_t * sp) 6191static void s2io_card_down(struct s2io_nic * sp)
6226{ 6192{
6227 int cnt = 0; 6193 int cnt = 0;
6228 XENA_dev_config_t __iomem *bar0 = sp->bar0; 6194 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6229 unsigned long flags; 6195 unsigned long flags;
6230 register u64 val64 = 0; 6196 register u64 val64 = 0;
6231 6197
@@ -6256,7 +6222,8 @@ static void s2io_card_down(nic_t * sp)
6256 rxd_owner_bit_reset(sp); 6222 rxd_owner_bit_reset(sp);
6257 6223
6258 val64 = readq(&bar0->adapter_status); 6224 val64 = readq(&bar0->adapter_status);
6259 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) { 6225 if (verify_xena_quiescence(sp)) {
6226 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6260 break; 6227 break;
6261 } 6228 }
6262 6229
@@ -6285,10 +6252,10 @@ static void s2io_card_down(nic_t * sp)
6285 clear_bit(0, &(sp->link_state)); 6252 clear_bit(0, &(sp->link_state));
6286} 6253}
6287 6254
6288static int s2io_card_up(nic_t * sp) 6255static int s2io_card_up(struct s2io_nic * sp)
6289{ 6256{
6290 int i, ret = 0; 6257 int i, ret = 0;
6291 mac_info_t *mac_control; 6258 struct mac_info *mac_control;
6292 struct config_param *config; 6259 struct config_param *config;
6293 struct net_device *dev = (struct net_device *) sp->dev; 6260 struct net_device *dev = (struct net_device *) sp->dev;
6294 u16 interruptible; 6261 u16 interruptible;
@@ -6319,6 +6286,13 @@ static int s2io_card_up(nic_t * sp)
6319 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, 6286 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6320 atomic_read(&sp->rx_bufs_left[i])); 6287 atomic_read(&sp->rx_bufs_left[i]));
6321 } 6288 }
6289 /* Maintain the state prior to the open */
6290 if (sp->promisc_flg)
6291 sp->promisc_flg = 0;
6292 if (sp->m_cast_flg) {
6293 sp->m_cast_flg = 0;
6294 sp->all_multi_pos= 0;
6295 }
6322 6296
6323 /* Setting its receive mode */ 6297 /* Setting its receive mode */
6324 s2io_set_multicast(dev); 6298 s2io_set_multicast(dev);
@@ -6380,7 +6354,7 @@ static int s2io_card_up(nic_t * sp)
6380 6354
6381static void s2io_restart_nic(struct work_struct *work) 6355static void s2io_restart_nic(struct work_struct *work)
6382{ 6356{
6383 nic_t *sp = container_of(work, nic_t, rst_timer_task); 6357 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6384 struct net_device *dev = sp->dev; 6358 struct net_device *dev = sp->dev;
6385 6359
6386 s2io_card_down(sp); 6360 s2io_card_down(sp);
@@ -6409,7 +6383,7 @@ static void s2io_restart_nic(struct work_struct *work)
6409 6383
6410static void s2io_tx_watchdog(struct net_device *dev) 6384static void s2io_tx_watchdog(struct net_device *dev)
6411{ 6385{
6412 nic_t *sp = dev->priv; 6386 struct s2io_nic *sp = dev->priv;
6413 6387
6414 if (netif_carrier_ok(dev)) { 6388 if (netif_carrier_ok(dev)) {
6415 schedule_work(&sp->rst_timer_task); 6389 schedule_work(&sp->rst_timer_task);
@@ -6434,16 +6408,16 @@ static void s2io_tx_watchdog(struct net_device *dev)
6434 * Return value: 6408 * Return value:
6435 * SUCCESS on success and -1 on failure. 6409 * SUCCESS on success and -1 on failure.
6436 */ 6410 */
6437static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) 6411static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6438{ 6412{
6439 nic_t *sp = ring_data->nic; 6413 struct s2io_nic *sp = ring_data->nic;
6440 struct net_device *dev = (struct net_device *) sp->dev; 6414 struct net_device *dev = (struct net_device *) sp->dev;
6441 struct sk_buff *skb = (struct sk_buff *) 6415 struct sk_buff *skb = (struct sk_buff *)
6442 ((unsigned long) rxdp->Host_Control); 6416 ((unsigned long) rxdp->Host_Control);
6443 int ring_no = ring_data->ring_no; 6417 int ring_no = ring_data->ring_no;
6444 u16 l3_csum, l4_csum; 6418 u16 l3_csum, l4_csum;
6445 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; 6419 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6446 lro_t *lro; 6420 struct lro *lro;
6447 6421
6448 skb->dev = dev; 6422 skb->dev = dev;
6449 6423
@@ -6488,7 +6462,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6488 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2); 6462 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6489 unsigned char *buff = skb_push(skb, buf0_len); 6463 unsigned char *buff = skb_push(skb, buf0_len);
6490 6464
6491 buffAdd_t *ba = &ring_data->ba[get_block][get_off]; 6465 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6492 sp->stats.rx_bytes += buf0_len + buf2_len; 6466 sp->stats.rx_bytes += buf0_len + buf2_len;
6493 memcpy(buff, ba->ba_0, buf0_len); 6467 memcpy(buff, ba->ba_0, buf0_len);
6494 6468
@@ -6498,7 +6472,6 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6498 skb_put(skb, buf1_len); 6472 skb_put(skb, buf1_len);
6499 skb->len += buf2_len; 6473 skb->len += buf2_len;
6500 skb->data_len += buf2_len; 6474 skb->data_len += buf2_len;
6501 skb->truesize += buf2_len;
6502 skb_put(skb_shinfo(skb)->frag_list, buf2_len); 6475 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6503 sp->stats.rx_bytes += buf1_len; 6476 sp->stats.rx_bytes += buf1_len;
6504 6477
@@ -6582,23 +6555,20 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6582 6555
6583 if (!sp->lro) { 6556 if (!sp->lro) {
6584 skb->protocol = eth_type_trans(skb, dev); 6557 skb->protocol = eth_type_trans(skb, dev);
6585#ifdef CONFIG_S2IO_NAPI
6586 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6587 /* Queueing the vlan frame to the upper layer */
6588 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6589 RXD_GET_VLAN_TAG(rxdp->Control_2));
6590 } else {
6591 netif_receive_skb(skb);
6592 }
6593#else
6594 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { 6558 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6595 /* Queueing the vlan frame to the upper layer */ 6559 /* Queueing the vlan frame to the upper layer */
6596 vlan_hwaccel_rx(skb, sp->vlgrp, 6560 if (napi)
6597 RXD_GET_VLAN_TAG(rxdp->Control_2)); 6561 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6562 RXD_GET_VLAN_TAG(rxdp->Control_2));
6563 else
6564 vlan_hwaccel_rx(skb, sp->vlgrp,
6565 RXD_GET_VLAN_TAG(rxdp->Control_2));
6598 } else { 6566 } else {
6599 netif_rx(skb); 6567 if (napi)
6568 netif_receive_skb(skb);
6569 else
6570 netif_rx(skb);
6600 } 6571 }
6601#endif
6602 } else { 6572 } else {
6603send_up: 6573send_up:
6604 queue_rx_frame(skb); 6574 queue_rx_frame(skb);
@@ -6622,7 +6592,7 @@ aggregate:
6622 * void. 6592 * void.
6623 */ 6593 */
6624 6594
6625static void s2io_link(nic_t * sp, int link) 6595static void s2io_link(struct s2io_nic * sp, int link)
6626{ 6596{
6627 struct net_device *dev = (struct net_device *) sp->dev; 6597 struct net_device *dev = (struct net_device *) sp->dev;
6628 6598
@@ -6666,7 +6636,7 @@ static int get_xena_rev_id(struct pci_dev *pdev)
6666 * void 6636 * void
6667 */ 6637 */
6668 6638
6669static void s2io_init_pci(nic_t * sp) 6639static void s2io_init_pci(struct s2io_nic * sp)
6670{ 6640{
6671 u16 pci_cmd = 0, pcix_cmd = 0; 6641 u16 pci_cmd = 0, pcix_cmd = 0;
6672 6642
@@ -6699,13 +6669,9 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6699 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); 6669 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6700 rx_ring_num = 8; 6670 rx_ring_num = 8;
6701 } 6671 }
6702#ifdef CONFIG_S2IO_NAPI 6672 if (*dev_intr_type != INTA)
6703 if (*dev_intr_type != INTA) { 6673 napi = 0;
6704 DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when " 6674
6705 "MSI/MSI-X is enabled. Defaulting to INTA\n");
6706 *dev_intr_type = INTA;
6707 }
6708#endif
6709#ifndef CONFIG_PCI_MSI 6675#ifndef CONFIG_PCI_MSI
6710 if (*dev_intr_type != INTA) { 6676 if (*dev_intr_type != INTA) {
6711 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support" 6677 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
@@ -6726,6 +6692,8 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6726 "Defaulting to INTA\n"); 6692 "Defaulting to INTA\n");
6727 *dev_intr_type = INTA; 6693 *dev_intr_type = INTA;
6728 } 6694 }
6695 if ( (rx_ring_num > 1) && (*dev_intr_type != INTA) )
6696 napi = 0;
6729 if (rx_ring_mode > 3) { 6697 if (rx_ring_mode > 3) {
6730 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); 6698 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6731 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); 6699 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
@@ -6751,15 +6719,15 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6751static int __devinit 6719static int __devinit
6752s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) 6720s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6753{ 6721{
6754 nic_t *sp; 6722 struct s2io_nic *sp;
6755 struct net_device *dev; 6723 struct net_device *dev;
6756 int i, j, ret; 6724 int i, j, ret;
6757 int dma_flag = FALSE; 6725 int dma_flag = FALSE;
6758 u32 mac_up, mac_down; 6726 u32 mac_up, mac_down;
6759 u64 val64 = 0, tmp64 = 0; 6727 u64 val64 = 0, tmp64 = 0;
6760 XENA_dev_config_t __iomem *bar0 = NULL; 6728 struct XENA_dev_config __iomem *bar0 = NULL;
6761 u16 subid; 6729 u16 subid;
6762 mac_info_t *mac_control; 6730 struct mac_info *mac_control;
6763 struct config_param *config; 6731 struct config_param *config;
6764 int mode; 6732 int mode;
6765 u8 dev_intr_type = intr_type; 6733 u8 dev_intr_type = intr_type;
@@ -6814,7 +6782,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6814 } 6782 }
6815 } 6783 }
6816 6784
6817 dev = alloc_etherdev(sizeof(nic_t)); 6785 dev = alloc_etherdev(sizeof(struct s2io_nic));
6818 if (dev == NULL) { 6786 if (dev == NULL) {
6819 DBG_PRINT(ERR_DBG, "Device allocation failed\n"); 6787 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6820 pci_disable_device(pdev); 6788 pci_disable_device(pdev);
@@ -6829,7 +6797,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6829 6797
6830 /* Private member variable initialized to s2io NIC structure */ 6798 /* Private member variable initialized to s2io NIC structure */
6831 sp = dev->priv; 6799 sp = dev->priv;
6832 memset(sp, 0, sizeof(nic_t)); 6800 memset(sp, 0, sizeof(struct s2io_nic));
6833 sp->dev = dev; 6801 sp->dev = dev;
6834 sp->pdev = pdev; 6802 sp->pdev = pdev;
6835 sp->high_dma_flag = dma_flag; 6803 sp->high_dma_flag = dma_flag;
@@ -6925,7 +6893,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6925 sp->bar0 = ioremap(pci_resource_start(pdev, 0), 6893 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6926 pci_resource_len(pdev, 0)); 6894 pci_resource_len(pdev, 0));
6927 if (!sp->bar0) { 6895 if (!sp->bar0) {
6928 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n", 6896 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
6929 dev->name); 6897 dev->name);
6930 ret = -ENOMEM; 6898 ret = -ENOMEM;
6931 goto bar0_remap_failed; 6899 goto bar0_remap_failed;
@@ -6934,7 +6902,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6934 sp->bar1 = ioremap(pci_resource_start(pdev, 2), 6902 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6935 pci_resource_len(pdev, 2)); 6903 pci_resource_len(pdev, 2));
6936 if (!sp->bar1) { 6904 if (!sp->bar1) {
6937 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n", 6905 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
6938 dev->name); 6906 dev->name);
6939 ret = -ENOMEM; 6907 ret = -ENOMEM;
6940 goto bar1_remap_failed; 6908 goto bar1_remap_failed;
@@ -6945,7 +6913,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6945 6913
6946 /* Initializing the BAR1 address as the start of the FIFO pointer. */ 6914 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6947 for (j = 0; j < MAX_TX_FIFOS; j++) { 6915 for (j = 0; j < MAX_TX_FIFOS; j++) {
6948 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *) 6916 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
6949 (sp->bar1 + (j * 0x00020000)); 6917 (sp->bar1 + (j * 0x00020000));
6950 } 6918 }
6951 6919
@@ -6966,10 +6934,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6966 * will use eth_mac_addr() for dev->set_mac_address 6934 * will use eth_mac_addr() for dev->set_mac_address
6967 * mac address will be set every time dev->open() is called 6935 * mac address will be set every time dev->open() is called
6968 */ 6936 */
6969#if defined(CONFIG_S2IO_NAPI)
6970 dev->poll = s2io_poll; 6937 dev->poll = s2io_poll;
6971 dev->weight = 32; 6938 dev->weight = 32;
6972#endif
6973 6939
6974#ifdef CONFIG_NET_POLL_CONTROLLER 6940#ifdef CONFIG_NET_POLL_CONTROLLER
6975 dev->poll_controller = s2io_netpoll; 6941 dev->poll_controller = s2io_netpoll;
@@ -6978,13 +6944,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6978 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 6944 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6979 if (sp->high_dma_flag == TRUE) 6945 if (sp->high_dma_flag == TRUE)
6980 dev->features |= NETIF_F_HIGHDMA; 6946 dev->features |= NETIF_F_HIGHDMA;
6981#ifdef NETIF_F_TSO
6982 dev->features |= NETIF_F_TSO; 6947 dev->features |= NETIF_F_TSO;
6983#endif
6984#ifdef NETIF_F_TSO6
6985 dev->features |= NETIF_F_TSO6; 6948 dev->features |= NETIF_F_TSO6;
6986#endif 6949 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
6987 if (sp->device_type & XFRAME_II_DEVICE) {
6988 dev->features |= NETIF_F_UFO; 6950 dev->features |= NETIF_F_UFO;
6989 dev->features |= NETIF_F_HW_CSUM; 6951 dev->features |= NETIF_F_HW_CSUM;
6990 } 6952 }
@@ -7065,9 +7027,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7065 7027
7066 /* Initialize spinlocks */ 7028 /* Initialize spinlocks */
7067 spin_lock_init(&sp->tx_lock); 7029 spin_lock_init(&sp->tx_lock);
7068#ifndef CONFIG_S2IO_NAPI 7030
7069 spin_lock_init(&sp->put_lock); 7031 if (!napi)
7070#endif 7032 spin_lock_init(&sp->put_lock);
7071 spin_lock_init(&sp->rx_lock); 7033 spin_lock_init(&sp->rx_lock);
7072 7034
7073 /* 7035 /*
@@ -7098,13 +7060,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7098 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, 7060 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7099 s2io_driver_version); 7061 s2io_driver_version);
7100 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7062 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7101 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, 7063 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7102 sp->def_mac_addr[0].mac_addr[0], 7064 sp->def_mac_addr[0].mac_addr[0],
7103 sp->def_mac_addr[0].mac_addr[1], 7065 sp->def_mac_addr[0].mac_addr[1],
7104 sp->def_mac_addr[0].mac_addr[2], 7066 sp->def_mac_addr[0].mac_addr[2],
7105 sp->def_mac_addr[0].mac_addr[3], 7067 sp->def_mac_addr[0].mac_addr[3],
7106 sp->def_mac_addr[0].mac_addr[4], 7068 sp->def_mac_addr[0].mac_addr[4],
7107 sp->def_mac_addr[0].mac_addr[5]); 7069 sp->def_mac_addr[0].mac_addr[5]);
7070 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7108 if (sp->device_type & XFRAME_II_DEVICE) { 7071 if (sp->device_type & XFRAME_II_DEVICE) {
7109 mode = s2io_print_pci_mode(sp); 7072 mode = s2io_print_pci_mode(sp);
7110 if (mode < 0) { 7073 if (mode < 0) {
@@ -7128,9 +7091,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7128 dev->name); 7091 dev->name);
7129 break; 7092 break;
7130 } 7093 }
7131#ifdef CONFIG_S2IO_NAPI 7094
7132 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); 7095 if (napi)
7133#endif 7096 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7134 switch(sp->intr_type) { 7097 switch(sp->intr_type) {
7135 case INTA: 7098 case INTA:
7136 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); 7099 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
@@ -7145,7 +7108,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7145 if (sp->lro) 7108 if (sp->lro)
7146 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", 7109 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7147 dev->name); 7110 dev->name);
7148 7111 if (ufo)
7112 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7113 " enabled\n", dev->name);
7149 /* Initialize device name */ 7114 /* Initialize device name */
7150 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); 7115 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7151 7116
@@ -7202,7 +7167,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7202{ 7167{
7203 struct net_device *dev = 7168 struct net_device *dev =
7204 (struct net_device *) pci_get_drvdata(pdev); 7169 (struct net_device *) pci_get_drvdata(pdev);
7205 nic_t *sp; 7170 struct s2io_nic *sp;
7206 7171
7207 if (dev == NULL) { 7172 if (dev == NULL) {
7208 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n"); 7173 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
@@ -7215,7 +7180,6 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7215 free_shared_mem(sp); 7180 free_shared_mem(sp);
7216 iounmap(sp->bar0); 7181 iounmap(sp->bar0);
7217 iounmap(sp->bar1); 7182 iounmap(sp->bar1);
7218 pci_disable_device(pdev);
7219 if (sp->intr_type != MSI_X) 7183 if (sp->intr_type != MSI_X)
7220 pci_release_regions(pdev); 7184 pci_release_regions(pdev);
7221 else { 7185 else {
@@ -7226,6 +7190,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7226 } 7190 }
7227 pci_set_drvdata(pdev, NULL); 7191 pci_set_drvdata(pdev, NULL);
7228 free_netdev(dev); 7192 free_netdev(dev);
7193 pci_disable_device(pdev);
7229} 7194}
7230 7195
7231/** 7196/**
@@ -7244,7 +7209,7 @@ int __init s2io_starter(void)
7244 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. 7209 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7245 */ 7210 */
7246 7211
7247static void s2io_closer(void) 7212static __exit void s2io_closer(void)
7248{ 7213{
7249 pci_unregister_driver(&s2io_driver); 7214 pci_unregister_driver(&s2io_driver);
7250 DBG_PRINT(INIT_DBG, "cleanup done\n"); 7215 DBG_PRINT(INIT_DBG, "cleanup done\n");
@@ -7254,7 +7219,7 @@ module_init(s2io_starter);
7254module_exit(s2io_closer); 7219module_exit(s2io_closer);
7255 7220
7256static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, 7221static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7257 struct tcphdr **tcp, RxD_t *rxdp) 7222 struct tcphdr **tcp, struct RxD_t *rxdp)
7258{ 7223{
7259 int ip_off; 7224 int ip_off;
7260 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; 7225 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
@@ -7288,7 +7253,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7288 return 0; 7253 return 0;
7289} 7254}
7290 7255
7291static int check_for_socket_match(lro_t *lro, struct iphdr *ip, 7256static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7292 struct tcphdr *tcp) 7257 struct tcphdr *tcp)
7293{ 7258{
7294 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7259 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
@@ -7303,7 +7268,7 @@ static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7303 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2)); 7268 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7304} 7269}
7305 7270
7306static void initiate_new_session(lro_t *lro, u8 *l2h, 7271static void initiate_new_session(struct lro *lro, u8 *l2h,
7307 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len) 7272 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7308{ 7273{
7309 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7274 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
@@ -7329,12 +7294,12 @@ static void initiate_new_session(lro_t *lro, u8 *l2h,
7329 lro->in_use = 1; 7294 lro->in_use = 1;
7330} 7295}
7331 7296
7332static void update_L3L4_header(nic_t *sp, lro_t *lro) 7297static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7333{ 7298{
7334 struct iphdr *ip = lro->iph; 7299 struct iphdr *ip = lro->iph;
7335 struct tcphdr *tcp = lro->tcph; 7300 struct tcphdr *tcp = lro->tcph;
7336 u16 nchk; 7301 u16 nchk;
7337 StatInfo_t *statinfo = sp->mac_control.stats_info; 7302 struct stat_block *statinfo = sp->mac_control.stats_info;
7338 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7303 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7339 7304
7340 /* Update L3 header */ 7305 /* Update L3 header */
@@ -7360,7 +7325,7 @@ static void update_L3L4_header(nic_t *sp, lro_t *lro)
7360 statinfo->sw_stat.num_aggregations++; 7325 statinfo->sw_stat.num_aggregations++;
7361} 7326}
7362 7327
7363static void aggregate_new_rx(lro_t *lro, struct iphdr *ip, 7328static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7364 struct tcphdr *tcp, u32 l4_pyld) 7329 struct tcphdr *tcp, u32 l4_pyld)
7365{ 7330{
7366 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7331 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
@@ -7382,7 +7347,7 @@ static void aggregate_new_rx(lro_t *lro, struct iphdr *ip,
7382 } 7347 }
7383} 7348}
7384 7349
7385static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip, 7350static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7386 struct tcphdr *tcp, u32 tcp_pyld_len) 7351 struct tcphdr *tcp, u32 tcp_pyld_len)
7387{ 7352{
7388 u8 *ptr; 7353 u8 *ptr;
@@ -7440,8 +7405,8 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7440} 7405}
7441 7406
7442static int 7407static int
7443s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, 7408s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7444 RxD_t *rxdp, nic_t *sp) 7409 struct RxD_t *rxdp, struct s2io_nic *sp)
7445{ 7410{
7446 struct iphdr *ip; 7411 struct iphdr *ip;
7447 struct tcphdr *tcph; 7412 struct tcphdr *tcph;
@@ -7458,7 +7423,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7458 tcph = (struct tcphdr *)*tcp; 7423 tcph = (struct tcphdr *)*tcp;
7459 *tcp_len = get_l4_pyld_length(ip, tcph); 7424 *tcp_len = get_l4_pyld_length(ip, tcph);
7460 for (i=0; i<MAX_LRO_SESSIONS; i++) { 7425 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7461 lro_t *l_lro = &sp->lro0_n[i]; 7426 struct lro *l_lro = &sp->lro0_n[i];
7462 if (l_lro->in_use) { 7427 if (l_lro->in_use) {
7463 if (check_for_socket_match(l_lro, ip, tcph)) 7428 if (check_for_socket_match(l_lro, ip, tcph))
7464 continue; 7429 continue;
@@ -7496,7 +7461,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7496 } 7461 }
7497 7462
7498 for (i=0; i<MAX_LRO_SESSIONS; i++) { 7463 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7499 lro_t *l_lro = &sp->lro0_n[i]; 7464 struct lro *l_lro = &sp->lro0_n[i];
7500 if (!(l_lro->in_use)) { 7465 if (!(l_lro->in_use)) {
7501 *lro = l_lro; 7466 *lro = l_lro;
7502 ret = 3; /* Begin anew */ 7467 ret = 3; /* Begin anew */
@@ -7535,9 +7500,9 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7535 return ret; 7500 return ret;
7536} 7501}
7537 7502
7538static void clear_lro_session(lro_t *lro) 7503static void clear_lro_session(struct lro *lro)
7539{ 7504{
7540 static u16 lro_struct_size = sizeof(lro_t); 7505 static u16 lro_struct_size = sizeof(struct lro);
7541 7506
7542 memset(lro, 0, lro_struct_size); 7507 memset(lro, 0, lro_struct_size);
7543} 7508}
@@ -7547,14 +7512,14 @@ static void queue_rx_frame(struct sk_buff *skb)
7547 struct net_device *dev = skb->dev; 7512 struct net_device *dev = skb->dev;
7548 7513
7549 skb->protocol = eth_type_trans(skb, dev); 7514 skb->protocol = eth_type_trans(skb, dev);
7550#ifdef CONFIG_S2IO_NAPI 7515 if (napi)
7551 netif_receive_skb(skb); 7516 netif_receive_skb(skb);
7552#else 7517 else
7553 netif_rx(skb); 7518 netif_rx(skb);
7554#endif
7555} 7519}
7556 7520
7557static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7521static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7522 struct sk_buff *skb,
7558 u32 tcp_len) 7523 u32 tcp_len)
7559{ 7524{
7560 struct sk_buff *first = lro->parent; 7525 struct sk_buff *first = lro->parent;
@@ -7566,6 +7531,7 @@ static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7566 lro->last_frag->next = skb; 7531 lro->last_frag->next = skb;
7567 else 7532 else
7568 skb_shinfo(first)->frag_list = skb; 7533 skb_shinfo(first)->frag_list = skb;
7534 first->truesize += skb->truesize;
7569 lro->last_frag = skb; 7535 lro->last_frag = skb;
7570 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7536 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7571 return; 7537 return;
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 3b0bafd273c8..a5e1a513deb5 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -30,6 +30,8 @@
30#undef SUCCESS 30#undef SUCCESS
31#define SUCCESS 0 31#define SUCCESS 0
32#define FAILURE -1 32#define FAILURE -1
33#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
34#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
33 35
34#define CHECKBIT(value, nbit) (value & (1 << nbit)) 36#define CHECKBIT(value, nbit) (value & (1 << nbit))
35 37
@@ -37,7 +39,7 @@
37#define MAX_FLICKER_TIME 60000 /* 60 Secs */ 39#define MAX_FLICKER_TIME 60000 /* 60 Secs */
38 40
39/* Maximum outstanding splits to be configured into xena. */ 41/* Maximum outstanding splits to be configured into xena. */
40typedef enum xena_max_outstanding_splits { 42enum {
41 XENA_ONE_SPLIT_TRANSACTION = 0, 43 XENA_ONE_SPLIT_TRANSACTION = 0,
42 XENA_TWO_SPLIT_TRANSACTION = 1, 44 XENA_TWO_SPLIT_TRANSACTION = 1,
43 XENA_THREE_SPLIT_TRANSACTION = 2, 45 XENA_THREE_SPLIT_TRANSACTION = 2,
@@ -46,7 +48,7 @@ typedef enum xena_max_outstanding_splits {
46 XENA_TWELVE_SPLIT_TRANSACTION = 5, 48 XENA_TWELVE_SPLIT_TRANSACTION = 5,
47 XENA_SIXTEEN_SPLIT_TRANSACTION = 6, 49 XENA_SIXTEEN_SPLIT_TRANSACTION = 6,
48 XENA_THIRTYTWO_SPLIT_TRANSACTION = 7 50 XENA_THIRTYTWO_SPLIT_TRANSACTION = 7
49} xena_max_outstanding_splits; 51};
50#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4) 52#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
51 53
52/* OS concerned variables and constants */ 54/* OS concerned variables and constants */
@@ -77,7 +79,7 @@ static int debug_level = ERR_DBG;
77#define S2IO_JUMBO_SIZE 9600 79#define S2IO_JUMBO_SIZE 9600
78 80
79/* Driver statistics maintained by driver */ 81/* Driver statistics maintained by driver */
80typedef struct { 82struct swStat {
81 unsigned long long single_ecc_errs; 83 unsigned long long single_ecc_errs;
82 unsigned long long double_ecc_errs; 84 unsigned long long double_ecc_errs;
83 unsigned long long parity_err_cnt; 85 unsigned long long parity_err_cnt;
@@ -92,10 +94,10 @@ typedef struct {
92 unsigned long long flush_max_pkts; 94 unsigned long long flush_max_pkts;
93 unsigned long long sum_avg_pkts_aggregated; 95 unsigned long long sum_avg_pkts_aggregated;
94 unsigned long long num_aggregations; 96 unsigned long long num_aggregations;
95} swStat_t; 97};
96 98
97/* Xpak releated alarm and warnings */ 99/* Xpak releated alarm and warnings */
98typedef struct { 100struct xpakStat {
99 u64 alarm_transceiver_temp_high; 101 u64 alarm_transceiver_temp_high;
100 u64 alarm_transceiver_temp_low; 102 u64 alarm_transceiver_temp_low;
101 u64 alarm_laser_bias_current_high; 103 u64 alarm_laser_bias_current_high;
@@ -110,11 +112,11 @@ typedef struct {
110 u64 warn_laser_output_power_low; 112 u64 warn_laser_output_power_low;
111 u64 xpak_regs_stat; 113 u64 xpak_regs_stat;
112 u32 xpak_timer_count; 114 u32 xpak_timer_count;
113} xpakStat_t; 115};
114 116
115 117
116/* The statistics block of Xena */ 118/* The statistics block of Xena */
117typedef struct stat_block { 119struct stat_block {
118/* Tx MAC statistics counters. */ 120/* Tx MAC statistics counters. */
119 __le32 tmac_data_octets; 121 __le32 tmac_data_octets;
120 __le32 tmac_frms; 122 __le32 tmac_frms;
@@ -290,9 +292,9 @@ typedef struct stat_block {
290 __le32 reserved_14; 292 __le32 reserved_14;
291 __le32 link_fault_cnt; 293 __le32 link_fault_cnt;
292 u8 buffer[20]; 294 u8 buffer[20];
293 swStat_t sw_stat; 295 struct swStat sw_stat;
294 xpakStat_t xpak_stat; 296 struct xpakStat xpak_stat;
295} StatInfo_t; 297};
296 298
297/* 299/*
298 * Structures representing different init time configuration 300 * Structures representing different init time configuration
@@ -315,7 +317,7 @@ static int fifo_map[][MAX_TX_FIFOS] = {
315}; 317};
316 318
317/* Maintains Per FIFO related information. */ 319/* Maintains Per FIFO related information. */
318typedef struct tx_fifo_config { 320struct tx_fifo_config {
319#define MAX_AVAILABLE_TXDS 8192 321#define MAX_AVAILABLE_TXDS 8192
320 u32 fifo_len; /* specifies len of FIFO upto 8192, ie no of TxDLs */ 322 u32 fifo_len; /* specifies len of FIFO upto 8192, ie no of TxDLs */
321/* Priority definition */ 323/* Priority definition */
@@ -332,11 +334,11 @@ typedef struct tx_fifo_config {
332 u8 f_no_snoop; 334 u8 f_no_snoop;
333#define NO_SNOOP_TXD 0x01 335#define NO_SNOOP_TXD 0x01
334#define NO_SNOOP_TXD_BUFFER 0x02 336#define NO_SNOOP_TXD_BUFFER 0x02
335} tx_fifo_config_t; 337};
336 338
337 339
338/* Maintains per Ring related information */ 340/* Maintains per Ring related information */
339typedef struct rx_ring_config { 341struct rx_ring_config {
340 u32 num_rxd; /*No of RxDs per Rx Ring */ 342 u32 num_rxd; /*No of RxDs per Rx Ring */
341#define RX_RING_PRI_0 0 /* highest */ 343#define RX_RING_PRI_0 0 /* highest */
342#define RX_RING_PRI_1 1 344#define RX_RING_PRI_1 1
@@ -357,7 +359,7 @@ typedef struct rx_ring_config {
357 u8 f_no_snoop; 359 u8 f_no_snoop;
358#define NO_SNOOP_RXD 0x01 360#define NO_SNOOP_RXD 0x01
359#define NO_SNOOP_RXD_BUFFER 0x02 361#define NO_SNOOP_RXD_BUFFER 0x02
360} rx_ring_config_t; 362};
361 363
362/* This structure provides contains values of the tunable parameters 364/* This structure provides contains values of the tunable parameters
363 * of the H/W 365 * of the H/W
@@ -367,7 +369,7 @@ struct config_param {
367 u32 tx_fifo_num; /*Number of Tx FIFOs */ 369 u32 tx_fifo_num; /*Number of Tx FIFOs */
368 370
369 u8 fifo_mapping[MAX_TX_FIFOS]; 371 u8 fifo_mapping[MAX_TX_FIFOS];
370 tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */ 372 struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
371 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */ 373 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
372 u64 tx_intr_type; 374 u64 tx_intr_type;
373 /* Specifies if Tx Intr is UTILZ or PER_LIST type. */ 375 /* Specifies if Tx Intr is UTILZ or PER_LIST type. */
@@ -376,7 +378,7 @@ struct config_param {
376 u32 rx_ring_num; /*Number of receive rings */ 378 u32 rx_ring_num; /*Number of receive rings */
377#define MAX_RX_BLOCKS_PER_RING 150 379#define MAX_RX_BLOCKS_PER_RING 150
378 380
379 rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */ 381 struct rx_ring_config rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
380 u8 bimodal; /*Flag for setting bimodal interrupts*/ 382 u8 bimodal; /*Flag for setting bimodal interrupts*/
381 383
382#define HEADER_ETHERNET_II_802_3_SIZE 14 384#define HEADER_ETHERNET_II_802_3_SIZE 14
@@ -395,14 +397,14 @@ struct config_param {
395}; 397};
396 398
397/* Structure representing MAC Addrs */ 399/* Structure representing MAC Addrs */
398typedef struct mac_addr { 400struct mac_addr {
399 u8 mac_addr[ETH_ALEN]; 401 u8 mac_addr[ETH_ALEN];
400} macaddr_t; 402};
401 403
402/* Structure that represent every FIFO element in the BAR1 404/* Structure that represent every FIFO element in the BAR1
403 * Address location. 405 * Address location.
404 */ 406 */
405typedef struct _TxFIFO_element { 407struct TxFIFO_element {
406 u64 TxDL_Pointer; 408 u64 TxDL_Pointer;
407 409
408 u64 List_Control; 410 u64 List_Control;
@@ -413,10 +415,10 @@ typedef struct _TxFIFO_element {
413#define TX_FIFO_SPECIAL_FUNC BIT(23) 415#define TX_FIFO_SPECIAL_FUNC BIT(23)
414#define TX_FIFO_DS_NO_SNOOP BIT(31) 416#define TX_FIFO_DS_NO_SNOOP BIT(31)
415#define TX_FIFO_BUFF_NO_SNOOP BIT(30) 417#define TX_FIFO_BUFF_NO_SNOOP BIT(30)
416} TxFIFO_element_t; 418};
417 419
418/* Tx descriptor structure */ 420/* Tx descriptor structure */
419typedef struct _TxD { 421struct TxD {
420 u64 Control_1; 422 u64 Control_1;
421/* bit mask */ 423/* bit mask */
422#define TXD_LIST_OWN_XENA BIT(7) 424#define TXD_LIST_OWN_XENA BIT(7)
@@ -447,16 +449,16 @@ typedef struct _TxD {
447 449
448 u64 Buffer_Pointer; 450 u64 Buffer_Pointer;
449 u64 Host_Control; /* reserved for host */ 451 u64 Host_Control; /* reserved for host */
450} TxD_t; 452};
451 453
452/* Structure to hold the phy and virt addr of every TxDL. */ 454/* Structure to hold the phy and virt addr of every TxDL. */
453typedef struct list_info_hold { 455struct list_info_hold {
454 dma_addr_t list_phy_addr; 456 dma_addr_t list_phy_addr;
455 void *list_virt_addr; 457 void *list_virt_addr;
456} list_info_hold_t; 458};
457 459
458/* Rx descriptor structure for 1 buffer mode */ 460/* Rx descriptor structure for 1 buffer mode */
459typedef struct _RxD_t { 461struct RxD_t {
460 u64 Host_Control; /* reserved for host */ 462 u64 Host_Control; /* reserved for host */
461 u64 Control_1; 463 u64 Control_1;
462#define RXD_OWN_XENA BIT(7) 464#define RXD_OWN_XENA BIT(7)
@@ -481,21 +483,21 @@ typedef struct _RxD_t {
481#define SET_NUM_TAG(val) vBIT(val,16,32) 483#define SET_NUM_TAG(val) vBIT(val,16,32)
482 484
483 485
484} RxD_t; 486};
485/* Rx descriptor structure for 1 buffer mode */ 487/* Rx descriptor structure for 1 buffer mode */
486typedef struct _RxD1_t { 488struct RxD1 {
487 struct _RxD_t h; 489 struct RxD_t h;
488 490
489#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14) 491#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14)
490#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14) 492#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14)
491#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \ 493#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
492 (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48) 494 (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
493 u64 Buffer0_ptr; 495 u64 Buffer0_ptr;
494} RxD1_t; 496};
495/* Rx descriptor structure for 3 or 2 buffer mode */ 497/* Rx descriptor structure for 3 or 2 buffer mode */
496 498
497typedef struct _RxD3_t { 499struct RxD3 {
498 struct _RxD_t h; 500 struct RxD_t h;
499 501
500#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14) 502#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14)
501#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16) 503#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16)
@@ -515,15 +517,15 @@ typedef struct _RxD3_t {
515 u64 Buffer0_ptr; 517 u64 Buffer0_ptr;
516 u64 Buffer1_ptr; 518 u64 Buffer1_ptr;
517 u64 Buffer2_ptr; 519 u64 Buffer2_ptr;
518} RxD3_t; 520};
519 521
520 522
521/* Structure that represents the Rx descriptor block which contains 523/* Structure that represents the Rx descriptor block which contains
522 * 128 Rx descriptors. 524 * 128 Rx descriptors.
523 */ 525 */
524typedef struct _RxD_block { 526struct RxD_block {
525#define MAX_RXDS_PER_BLOCK_1 127 527#define MAX_RXDS_PER_BLOCK_1 127
526 RxD1_t rxd[MAX_RXDS_PER_BLOCK_1]; 528 struct RxD1 rxd[MAX_RXDS_PER_BLOCK_1];
527 529
528 u64 reserved_0; 530 u64 reserved_0;
529#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 531#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
@@ -533,22 +535,22 @@ typedef struct _RxD_block {
533 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch 535 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
534 * the upper 32 bits should 536 * the upper 32 bits should
535 * be 0 */ 537 * be 0 */
536} RxD_block_t; 538};
537 539
538#define SIZE_OF_BLOCK 4096 540#define SIZE_OF_BLOCK 4096
539 541
540#define RXD_MODE_1 0 542#define RXD_MODE_1 0 /* One Buffer mode */
541#define RXD_MODE_3A 1 543#define RXD_MODE_3A 1 /* Three Buffer mode */
542#define RXD_MODE_3B 2 544#define RXD_MODE_3B 2 /* Two Buffer mode */
543 545
544/* Structure to hold virtual addresses of Buf0 and Buf1 in 546/* Structure to hold virtual addresses of Buf0 and Buf1 in
545 * 2buf mode. */ 547 * 2buf mode. */
546typedef struct bufAdd { 548struct buffAdd {
547 void *ba_0_org; 549 void *ba_0_org;
548 void *ba_1_org; 550 void *ba_1_org;
549 void *ba_0; 551 void *ba_0;
550 void *ba_1; 552 void *ba_1;
551} buffAdd_t; 553};
552 554
553/* Structure which stores all the MAC control parameters */ 555/* Structure which stores all the MAC control parameters */
554 556
@@ -556,43 +558,46 @@ typedef struct bufAdd {
556 * from which the Rx Interrupt processor can start picking 558 * from which the Rx Interrupt processor can start picking
557 * up the RxDs for processing. 559 * up the RxDs for processing.
558 */ 560 */
559typedef struct _rx_curr_get_info_t { 561struct rx_curr_get_info {
560 u32 block_index; 562 u32 block_index;
561 u32 offset; 563 u32 offset;
562 u32 ring_len; 564 u32 ring_len;
563} rx_curr_get_info_t; 565};
564 566
565typedef rx_curr_get_info_t rx_curr_put_info_t; 567struct rx_curr_put_info {
568 u32 block_index;
569 u32 offset;
570 u32 ring_len;
571};
566 572
567/* This structure stores the offset of the TxDl in the FIFO 573/* This structure stores the offset of the TxDl in the FIFO
568 * from which the Tx Interrupt processor can start picking 574 * from which the Tx Interrupt processor can start picking
569 * up the TxDLs for send complete interrupt processing. 575 * up the TxDLs for send complete interrupt processing.
570 */ 576 */
571typedef struct { 577struct tx_curr_get_info {
572 u32 offset; 578 u32 offset;
573 u32 fifo_len; 579 u32 fifo_len;
574} tx_curr_get_info_t; 580};
575
576typedef tx_curr_get_info_t tx_curr_put_info_t;
577 581
582struct tx_curr_put_info {
583 u32 offset;
584 u32 fifo_len;
585};
578 586
579typedef struct rxd_info { 587struct rxd_info {
580 void *virt_addr; 588 void *virt_addr;
581 dma_addr_t dma_addr; 589 dma_addr_t dma_addr;
582}rxd_info_t; 590};
583 591
584/* Structure that holds the Phy and virt addresses of the Blocks */ 592/* Structure that holds the Phy and virt addresses of the Blocks */
585typedef struct rx_block_info { 593struct rx_block_info {
586 void *block_virt_addr; 594 void *block_virt_addr;
587 dma_addr_t block_dma_addr; 595 dma_addr_t block_dma_addr;
588 rxd_info_t *rxds; 596 struct rxd_info *rxds;
589} rx_block_info_t; 597};
590
591/* pre declaration of the nic structure */
592typedef struct s2io_nic nic_t;
593 598
594/* Ring specific structure */ 599/* Ring specific structure */
595typedef struct ring_info { 600struct ring_info {
596 /* The ring number */ 601 /* The ring number */
597 int ring_no; 602 int ring_no;
598 603
@@ -600,7 +605,7 @@ typedef struct ring_info {
600 * Place holders for the virtual and physical addresses of 605 * Place holders for the virtual and physical addresses of
601 * all the Rx Blocks 606 * all the Rx Blocks
602 */ 607 */
603 rx_block_info_t rx_blocks[MAX_RX_BLOCKS_PER_RING]; 608 struct rx_block_info rx_blocks[MAX_RX_BLOCKS_PER_RING];
604 int block_count; 609 int block_count;
605 int pkt_cnt; 610 int pkt_cnt;
606 611
@@ -608,26 +613,24 @@ typedef struct ring_info {
608 * Put pointer info which indictes which RxD has to be replenished 613 * Put pointer info which indictes which RxD has to be replenished
609 * with a new buffer. 614 * with a new buffer.
610 */ 615 */
611 rx_curr_put_info_t rx_curr_put_info; 616 struct rx_curr_put_info rx_curr_put_info;
612 617
613 /* 618 /*
614 * Get pointer info which indictes which is the last RxD that was 619 * Get pointer info which indictes which is the last RxD that was
615 * processed by the driver. 620 * processed by the driver.
616 */ 621 */
617 rx_curr_get_info_t rx_curr_get_info; 622 struct rx_curr_get_info rx_curr_get_info;
618 623
619#ifndef CONFIG_S2IO_NAPI
620 /* Index to the absolute position of the put pointer of Rx ring */ 624 /* Index to the absolute position of the put pointer of Rx ring */
621 int put_pos; 625 int put_pos;
622#endif
623 626
624 /* Buffer Address store. */ 627 /* Buffer Address store. */
625 buffAdd_t **ba; 628 struct buffAdd **ba;
626 nic_t *nic; 629 struct s2io_nic *nic;
627} ring_info_t; 630};
628 631
629/* Fifo specific structure */ 632/* Fifo specific structure */
630typedef struct fifo_info { 633struct fifo_info {
631 /* FIFO number */ 634 /* FIFO number */
632 int fifo_no; 635 int fifo_no;
633 636
@@ -635,40 +638,40 @@ typedef struct fifo_info {
635 int max_txds; 638 int max_txds;
636 639
637 /* Place holder of all the TX List's Phy and Virt addresses. */ 640 /* Place holder of all the TX List's Phy and Virt addresses. */
638 list_info_hold_t *list_info; 641 struct list_info_hold *list_info;
639 642
640 /* 643 /*
641 * Current offset within the tx FIFO where driver would write 644 * Current offset within the tx FIFO where driver would write
642 * new Tx frame 645 * new Tx frame
643 */ 646 */
644 tx_curr_put_info_t tx_curr_put_info; 647 struct tx_curr_put_info tx_curr_put_info;
645 648
646 /* 649 /*
647 * Current offset within tx FIFO from where the driver would start freeing 650 * Current offset within tx FIFO from where the driver would start freeing
648 * the buffers 651 * the buffers
649 */ 652 */
650 tx_curr_get_info_t tx_curr_get_info; 653 struct tx_curr_get_info tx_curr_get_info;
651 654
652 nic_t *nic; 655 struct s2io_nic *nic;
653}fifo_info_t; 656};
654 657
655/* Information related to the Tx and Rx FIFOs and Rings of Xena 658/* Information related to the Tx and Rx FIFOs and Rings of Xena
656 * is maintained in this structure. 659 * is maintained in this structure.
657 */ 660 */
658typedef struct mac_info { 661struct mac_info {
659/* tx side stuff */ 662/* tx side stuff */
660 /* logical pointer of start of each Tx FIFO */ 663 /* logical pointer of start of each Tx FIFO */
661 TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS]; 664 struct TxFIFO_element __iomem *tx_FIFO_start[MAX_TX_FIFOS];
662 665
663 /* Fifo specific structure */ 666 /* Fifo specific structure */
664 fifo_info_t fifos[MAX_TX_FIFOS]; 667 struct fifo_info fifos[MAX_TX_FIFOS];
665 668
666 /* Save virtual address of TxD page with zero DMA addr(if any) */ 669 /* Save virtual address of TxD page with zero DMA addr(if any) */
667 void *zerodma_virt_addr; 670 void *zerodma_virt_addr;
668 671
669/* rx side stuff */ 672/* rx side stuff */
670 /* Ring specific structure */ 673 /* Ring specific structure */
671 ring_info_t rings[MAX_RX_RINGS]; 674 struct ring_info rings[MAX_RX_RINGS];
672 675
673 u16 rmac_pause_time; 676 u16 rmac_pause_time;
674 u16 mc_pause_threshold_q0q3; 677 u16 mc_pause_threshold_q0q3;
@@ -677,14 +680,14 @@ typedef struct mac_info {
677 void *stats_mem; /* orignal pointer to allocated mem */ 680 void *stats_mem; /* orignal pointer to allocated mem */
678 dma_addr_t stats_mem_phy; /* Physical address of the stat block */ 681 dma_addr_t stats_mem_phy; /* Physical address of the stat block */
679 u32 stats_mem_sz; 682 u32 stats_mem_sz;
680 StatInfo_t *stats_info; /* Logical address of the stat block */ 683 struct stat_block *stats_info; /* Logical address of the stat block */
681} mac_info_t; 684};
682 685
683/* structure representing the user defined MAC addresses */ 686/* structure representing the user defined MAC addresses */
684typedef struct { 687struct usr_addr {
685 char addr[ETH_ALEN]; 688 char addr[ETH_ALEN];
686 int usage_cnt; 689 int usage_cnt;
687} usr_addr_t; 690};
688 691
689/* Default Tunable parameters of the NIC. */ 692/* Default Tunable parameters of the NIC. */
690#define DEFAULT_FIFO_0_LEN 4096 693#define DEFAULT_FIFO_0_LEN 4096
@@ -717,7 +720,7 @@ struct msix_info_st {
717}; 720};
718 721
719/* Data structure to represent a LRO session */ 722/* Data structure to represent a LRO session */
720typedef struct lro { 723struct lro {
721 struct sk_buff *parent; 724 struct sk_buff *parent;
722 struct sk_buff *last_frag; 725 struct sk_buff *last_frag;
723 u8 *l2h; 726 u8 *l2h;
@@ -733,20 +736,18 @@ typedef struct lro {
733 u32 cur_tsval; 736 u32 cur_tsval;
734 u32 cur_tsecr; 737 u32 cur_tsecr;
735 u8 saw_ts; 738 u8 saw_ts;
736}lro_t; 739};
737 740
738/* Structure representing one instance of the NIC */ 741/* Structure representing one instance of the NIC */
739struct s2io_nic { 742struct s2io_nic {
740 int rxd_mode; 743 int rxd_mode;
741#ifdef CONFIG_S2IO_NAPI
742 /* 744 /*
743 * Count of packets to be processed in a given iteration, it will be indicated 745 * Count of packets to be processed in a given iteration, it will be indicated
744 * by the quota field of the device structure when NAPI is enabled. 746 * by the quota field of the device structure when NAPI is enabled.
745 */ 747 */
746 int pkts_to_process; 748 int pkts_to_process;
747#endif
748 struct net_device *dev; 749 struct net_device *dev;
749 mac_info_t mac_control; 750 struct mac_info mac_control;
750 struct config_param config; 751 struct config_param config;
751 struct pci_dev *pdev; 752 struct pci_dev *pdev;
752 void __iomem *bar0; 753 void __iomem *bar0;
@@ -754,8 +755,8 @@ struct s2io_nic {
754#define MAX_MAC_SUPPORTED 16 755#define MAX_MAC_SUPPORTED 16
755#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED 756#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
756 757
757 macaddr_t def_mac_addr[MAX_MAC_SUPPORTED]; 758 struct mac_addr def_mac_addr[MAX_MAC_SUPPORTED];
758 macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED]; 759 struct mac_addr pre_mac_addr[MAX_MAC_SUPPORTED];
759 760
760 struct net_device_stats stats; 761 struct net_device_stats stats;
761 int high_dma_flag; 762 int high_dma_flag;
@@ -775,9 +776,7 @@ struct s2io_nic {
775 atomic_t rx_bufs_left[MAX_RX_RINGS]; 776 atomic_t rx_bufs_left[MAX_RX_RINGS];
776 777
777 spinlock_t tx_lock; 778 spinlock_t tx_lock;
778#ifndef CONFIG_S2IO_NAPI
779 spinlock_t put_lock; 779 spinlock_t put_lock;
780#endif
781 780
782#define PROMISC 1 781#define PROMISC 1
783#define ALL_MULTI 2 782#define ALL_MULTI 2
@@ -785,7 +784,7 @@ struct s2io_nic {
785#define MAX_ADDRS_SUPPORTED 64 784#define MAX_ADDRS_SUPPORTED 64
786 u16 usr_addr_count; 785 u16 usr_addr_count;
787 u16 mc_addr_count; 786 u16 mc_addr_count;
788 usr_addr_t usr_addrs[MAX_ADDRS_SUPPORTED]; 787 struct usr_addr usr_addrs[MAX_ADDRS_SUPPORTED];
789 788
790 u16 m_cast_flg; 789 u16 m_cast_flg;
791 u16 all_multi_pos; 790 u16 all_multi_pos;
@@ -841,7 +840,7 @@ struct s2io_nic {
841 u8 device_type; 840 u8 device_type;
842 841
843#define MAX_LRO_SESSIONS 32 842#define MAX_LRO_SESSIONS 32
844 lro_t lro0_n[MAX_LRO_SESSIONS]; 843 struct lro lro0_n[MAX_LRO_SESSIONS];
845 unsigned long clubbed_frms_cnt; 844 unsigned long clubbed_frms_cnt;
846 unsigned long sending_both; 845 unsigned long sending_both;
847 u8 lro; 846 u8 lro;
@@ -855,8 +854,9 @@ struct s2io_nic {
855 spinlock_t rx_lock; 854 spinlock_t rx_lock;
856 atomic_t isr_cnt; 855 atomic_t isr_cnt;
857 u64 *ufo_in_band_v; 856 u64 *ufo_in_band_v;
858#define VPD_PRODUCT_NAME_LEN 50 857#define VPD_STRING_LEN 80
859 u8 product_name[VPD_PRODUCT_NAME_LEN]; 858 u8 product_name[VPD_STRING_LEN];
859 u8 serial_num[VPD_STRING_LEN];
860}; 860};
861 861
862#define RESET_ERROR 1; 862#define RESET_ERROR 1;
@@ -975,43 +975,50 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
975static int init_shared_mem(struct s2io_nic *sp); 975static int init_shared_mem(struct s2io_nic *sp);
976static void free_shared_mem(struct s2io_nic *sp); 976static void free_shared_mem(struct s2io_nic *sp);
977static int init_nic(struct s2io_nic *nic); 977static int init_nic(struct s2io_nic *nic);
978static void rx_intr_handler(ring_info_t *ring_data); 978static void rx_intr_handler(struct ring_info *ring_data);
979static void tx_intr_handler(fifo_info_t *fifo_data); 979static void tx_intr_handler(struct fifo_info *fifo_data);
980static void alarm_intr_handler(struct s2io_nic *sp); 980static void alarm_intr_handler(struct s2io_nic *sp);
981 981
982static int s2io_starter(void); 982static int s2io_starter(void);
983static void s2io_closer(void);
983static void s2io_tx_watchdog(struct net_device *dev); 984static void s2io_tx_watchdog(struct net_device *dev);
984static void s2io_tasklet(unsigned long dev_addr); 985static void s2io_tasklet(unsigned long dev_addr);
985static void s2io_set_multicast(struct net_device *dev); 986static void s2io_set_multicast(struct net_device *dev);
986static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp); 987static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
987static void s2io_link(nic_t * sp, int link); 988static void s2io_link(struct s2io_nic * sp, int link);
988#if defined(CONFIG_S2IO_NAPI) 989static void s2io_reset(struct s2io_nic * sp);
989static int s2io_poll(struct net_device *dev, int *budget); 990static int s2io_poll(struct net_device *dev, int *budget);
990#endif 991static void s2io_init_pci(struct s2io_nic * sp);
991static void s2io_init_pci(nic_t * sp);
992static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); 992static int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
993static void s2io_alarm_handle(unsigned long data); 993static void s2io_alarm_handle(unsigned long data);
994static int s2io_enable_msi(nic_t *nic); 994static int s2io_enable_msi(struct s2io_nic *nic);
995static irqreturn_t s2io_msi_handle(int irq, void *dev_id); 995static irqreturn_t s2io_msi_handle(int irq, void *dev_id);
996static irqreturn_t 996static irqreturn_t
997s2io_msix_ring_handle(int irq, void *dev_id); 997s2io_msix_ring_handle(int irq, void *dev_id);
998static irqreturn_t 998static irqreturn_t
999s2io_msix_fifo_handle(int irq, void *dev_id); 999s2io_msix_fifo_handle(int irq, void *dev_id);
1000static irqreturn_t s2io_isr(int irq, void *dev_id); 1000static irqreturn_t s2io_isr(int irq, void *dev_id);
1001static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); 1001static int verify_xena_quiescence(struct s2io_nic *sp);
1002static const struct ethtool_ops netdev_ethtool_ops; 1002static const struct ethtool_ops netdev_ethtool_ops;
1003static void s2io_set_link(struct work_struct *work); 1003static void s2io_set_link(struct work_struct *work);
1004static int s2io_set_swapper(nic_t * sp); 1004static int s2io_set_swapper(struct s2io_nic * sp);
1005static void s2io_card_down(nic_t *nic); 1005static void s2io_card_down(struct s2io_nic *nic);
1006static int s2io_card_up(nic_t *nic); 1006static int s2io_card_up(struct s2io_nic *nic);
1007static int get_xena_rev_id(struct pci_dev *pdev); 1007static int get_xena_rev_id(struct pci_dev *pdev);
1008static void restore_xmsi_data(nic_t *nic); 1008static int wait_for_cmd_complete(void *addr, u64 busy_bit);
1009static int s2io_add_isr(struct s2io_nic * sp);
1010static void s2io_rem_isr(struct s2io_nic * sp);
1011
1012static void restore_xmsi_data(struct s2io_nic *nic);
1009 1013
1010static int s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, RxD_t *rxdp, nic_t *sp); 1014static int
1011static void clear_lro_session(lro_t *lro); 1015s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
1016 struct RxD_t *rxdp, struct s2io_nic *sp);
1017static void clear_lro_session(struct lro *lro);
1012static void queue_rx_frame(struct sk_buff *skb); 1018static void queue_rx_frame(struct sk_buff *skb);
1013static void update_L3L4_header(nic_t *sp, lro_t *lro); 1019static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
1014static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); 1020static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
1021 struct sk_buff *skb, u32 tcp_len);
1015 1022
1016#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size 1023#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
1017#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size 1024#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
new file mode 100644
index 000000000000..7f800feaa9a2
--- /dev/null
+++ b/drivers/net/sc92031.c
@@ -0,0 +1,1620 @@
1/* Silan SC92031 PCI Fast Ethernet Adapter driver
2 *
3 * Based on vendor drivers:
4 * Silan Fast Ethernet Netcard Driver:
5 * MODULE_AUTHOR ("gaoyonghong");
6 * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver");
7 * MODULE_LICENSE("GPL");
8 * 8139D Fast Ethernet driver:
9 * (C) 2002 by gaoyonghong
10 * MODULE_AUTHOR ("gaoyonghong");
11 * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver");
12 * MODULE_LICENSE("GPL");
13 * Both are almost identical and seem to be based on pci-skeleton.c
14 *
15 * Rewritten for 2.6 by Cesar Eduardo Barros
16 */
17
18/* Note about set_mac_address: I don't know how to change the hardware
19 * matching, so you need to enable IFF_PROMISC when using it.
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/pci.h>
26#include <linux/dma-mapping.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/crc32.h>
31
32#include <asm/irq.h>
33
34#define PCI_VENDOR_ID_SILAN 0x1904
35#define PCI_DEVICE_ID_SILAN_SC92031 0x2031
36#define PCI_DEVICE_ID_SILAN_8139D 0x8139
37
38#define SC92031_NAME "sc92031"
39#define SC92031_DESCRIPTION "Silan SC92031 PCI Fast Ethernet Adapter driver"
40#define SC92031_VERSION "2.0c"
41
42/* BAR 0 is MMIO, BAR 1 is PIO */
43#ifndef SC92031_USE_BAR
44#define SC92031_USE_BAR 0
45#endif
46
47/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
48static int multicast_filter_limit = 64;
49module_param(multicast_filter_limit, int, 0);
50MODULE_PARM_DESC(multicast_filter_limit,
51 "Maximum number of filtered multicast addresses");
52
53static int media;
54module_param(media, int, 0);
55MODULE_PARM_DESC(media, "Media type (0x00 = autodetect,"
56 " 0x01 = 10M half, 0x02 = 10M full,"
57 " 0x04 = 100M half, 0x08 = 100M full)");
58
59/* Size of the in-memory receive ring. */
60#define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/
61#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
62
63/* Number of Tx descriptor registers. */
64#define NUM_TX_DESC 4
65
66/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
67#define MAX_ETH_FRAME_SIZE 1536
68
69/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
70#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
71#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
72
73/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
74#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
75
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78
79#define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */
80
81/* media options */
82#define AUTOSELECT 0x00
83#define M10_HALF 0x01
84#define M10_FULL 0x02
85#define M100_HALF 0x04
86#define M100_FULL 0x08
87
88 /* Symbolic offsets to registers. */
89enum silan_registers {
90 Config0 = 0x00, // Config0
91 Config1 = 0x04, // Config1
92 RxBufWPtr = 0x08, // Rx buffer writer poiter
93 IntrStatus = 0x0C, // Interrupt status
94 IntrMask = 0x10, // Interrupt mask
95 RxbufAddr = 0x14, // Rx buffer start address
96 RxBufRPtr = 0x18, // Rx buffer read pointer
97 Txstatusall = 0x1C, // Transmit status of all descriptors
98 TxStatus0 = 0x20, // Transmit status (Four 32bit registers).
99 TxAddr0 = 0x30, // Tx descriptors (also four 32bit).
100 RxConfig = 0x40, // Rx configuration
101 MAC0 = 0x44, // Ethernet hardware address.
102 MAR0 = 0x4C, // Multicast filter.
103 RxStatus0 = 0x54, // Rx status
104 TxConfig = 0x5C, // Tx configuration
105 PhyCtrl = 0x60, // physical control
106 FlowCtrlConfig = 0x64, // flow control
107 Miicmd0 = 0x68, // Mii command0 register
108 Miicmd1 = 0x6C, // Mii command1 register
109 Miistatus = 0x70, // Mii status register
110 Timercnt = 0x74, // Timer counter register
111 TimerIntr = 0x78, // Timer interrupt register
112 PMConfig = 0x7C, // Power Manager configuration
113 CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers)
114 Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser)
115 LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser)
116 TestD0 = 0xD0,
117 TestD4 = 0xD4,
118 TestD8 = 0xD8,
119};
120
121#define MII_BMCR 0 // Basic mode control register
122#define MII_BMSR 1 // Basic mode status register
123#define MII_JAB 16
124#define MII_OutputStatus 24
125
126#define BMCR_FULLDPLX 0x0100 // Full duplex
127#define BMCR_ANRESTART 0x0200 // Auto negotiation restart
128#define BMCR_ANENABLE 0x1000 // Enable auto negotiation
129#define BMCR_SPEED100 0x2000 // Select 100Mbps
130#define BMSR_LSTATUS 0x0004 // Link status
131#define PHY_16_JAB_ENB 0x1000
132#define PHY_16_PORT_ENB 0x1
133
134enum IntrStatusBits {
135 LinkFail = 0x80000000,
136 LinkOK = 0x40000000,
137 TimeOut = 0x20000000,
138 RxOverflow = 0x0040,
139 RxOK = 0x0020,
140 TxOK = 0x0001,
141 IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK,
142};
143
144enum TxStatusBits {
145 TxCarrierLost = 0x20000000,
146 TxAborted = 0x10000000,
147 TxOutOfWindow = 0x08000000,
148 TxNccShift = 22,
149 EarlyTxThresShift = 16,
150 TxStatOK = 0x8000,
151 TxUnderrun = 0x4000,
152 TxOwn = 0x2000,
153};
154
155enum RxStatusBits {
156 RxStatesOK = 0x80000,
157 RxBadAlign = 0x40000,
158 RxHugeFrame = 0x20000,
159 RxSmallFrame = 0x10000,
160 RxCRCOK = 0x8000,
161 RxCrlFrame = 0x4000,
162 Rx_Broadcast = 0x2000,
163 Rx_Multicast = 0x1000,
164 RxAddrMatch = 0x0800,
165 MiiErr = 0x0400,
166};
167
168enum RxConfigBits {
169 RxFullDx = 0x80000000,
170 RxEnb = 0x40000000,
171 RxSmall = 0x20000000,
172 RxHuge = 0x10000000,
173 RxErr = 0x08000000,
174 RxAllphys = 0x04000000,
175 RxMulticast = 0x02000000,
176 RxBroadcast = 0x01000000,
177 RxLoopBack = (1 << 23) | (1 << 22),
178 LowThresholdShift = 12,
179 HighThresholdShift = 2,
180};
181
182enum TxConfigBits {
183 TxFullDx = 0x80000000,
184 TxEnb = 0x40000000,
185 TxEnbPad = 0x20000000,
186 TxEnbHuge = 0x10000000,
187 TxEnbFCS = 0x08000000,
188 TxNoBackOff = 0x04000000,
189 TxEnbPrem = 0x02000000,
190 TxCareLostCrs = 0x1000000,
191 TxExdCollNum = 0xf00000,
192 TxDataRate = 0x80000,
193};
194
195enum PhyCtrlconfigbits {
196 PhyCtrlAne = 0x80000000,
197 PhyCtrlSpd100 = 0x40000000,
198 PhyCtrlSpd10 = 0x20000000,
199 PhyCtrlPhyBaseAddr = 0x1f000000,
200 PhyCtrlDux = 0x800000,
201 PhyCtrlReset = 0x400000,
202};
203
204enum FlowCtrlConfigBits {
205 FlowCtrlFullDX = 0x80000000,
206 FlowCtrlEnb = 0x40000000,
207};
208
209enum Config0Bits {
210 Cfg0_Reset = 0x80000000,
211 Cfg0_Anaoff = 0x40000000,
212 Cfg0_LDPS = 0x20000000,
213};
214
215enum Config1Bits {
216 Cfg1_EarlyRx = 1 << 31,
217 Cfg1_EarlyTx = 1 << 30,
218
219 //rx buffer size
220 Cfg1_Rcv8K = 0x0,
221 Cfg1_Rcv16K = 0x1,
222 Cfg1_Rcv32K = 0x3,
223 Cfg1_Rcv64K = 0x7,
224 Cfg1_Rcv128K = 0xf,
225};
226
227enum MiiCmd0Bits {
228 Mii_Divider = 0x20000000,
229 Mii_WRITE = 0x400000,
230 Mii_READ = 0x200000,
231 Mii_SCAN = 0x100000,
232 Mii_Tamod = 0x80000,
233 Mii_Drvmod = 0x40000,
234 Mii_mdc = 0x20000,
235 Mii_mdoen = 0x10000,
236 Mii_mdo = 0x8000,
237 Mii_mdi = 0x4000,
238};
239
240enum MiiStatusBits {
241 Mii_StatusBusy = 0x80000000,
242};
243
244enum PMConfigBits {
245 PM_Enable = 1 << 31,
246 PM_LongWF = 1 << 30,
247 PM_Magic = 1 << 29,
248 PM_LANWake = 1 << 28,
249 PM_LWPTN = (1 << 27 | 1<< 26),
250 PM_LinkUp = 1 << 25,
251 PM_WakeUp = 1 << 24,
252};
253
254/* Locking rules:
255 * priv->lock protects most of the fields of priv and most of the
256 * hardware registers. It does not have to protect against softirqs
257 * between sc92031_disable_interrupts and sc92031_enable_interrupts;
258 * it also does not need to be used in ->open and ->stop while the
259 * device interrupts are off.
260 * Not having to protect against softirqs is very useful due to heavy
261 * use of mdelay() at _sc92031_reset.
262 * Functions prefixed with _sc92031_ must be called with the lock held;
263 * functions prefixed with sc92031_ must be called without the lock held.
264 * Use mmiowb() before unlocking if the hardware was written to.
265 */
266
267/* Locking rules for the interrupt:
268 * - the interrupt and the tasklet never run at the same time
269 * - neither run between sc92031_disable_interrupts and
270 * sc92031_enable_interrupt
271 */
272
273struct sc92031_priv {
274 spinlock_t lock;
275 /* iomap.h cookie */
276 void __iomem *port_base;
277 /* pci device structure */
278 struct pci_dev *pdev;
279 /* tasklet */
280 struct tasklet_struct tasklet;
281
282 /* CPU address of rx ring */
283 void *rx_ring;
284 /* PCI address of rx ring */
285 dma_addr_t rx_ring_dma_addr;
286 /* PCI address of rx ring read pointer */
287 dma_addr_t rx_ring_tail;
288
289 /* tx ring write index */
290 unsigned tx_head;
291 /* tx ring read index */
292 unsigned tx_tail;
293 /* CPU address of tx bounce buffer */
294 void *tx_bufs;
295 /* PCI address of tx bounce buffer */
296 dma_addr_t tx_bufs_dma_addr;
297
298 /* copies of some hardware registers */
299 u32 intr_status;
300 atomic_t intr_mask;
301 u32 rx_config;
302 u32 tx_config;
303 u32 pm_config;
304
305 /* copy of some flags from dev->flags */
306 unsigned int mc_flags;
307
308 /* for ETHTOOL_GSTATS */
309 u64 tx_timeouts;
310 u64 rx_loss;
311
312 /* for dev->get_stats */
313 long rx_value;
314 struct net_device_stats stats;
315};
316
317/* I don't know which registers can be safely read; however, I can guess
318 * MAC0 is one of them. */
319static inline void _sc92031_dummy_read(void __iomem *port_base)
320{
321 ioread32(port_base + MAC0);
322}
323
324static u32 _sc92031_mii_wait(void __iomem *port_base)
325{
326 u32 mii_status;
327
328 do {
329 udelay(10);
330 mii_status = ioread32(port_base + Miistatus);
331 } while (mii_status & Mii_StatusBusy);
332
333 return mii_status;
334}
335
336static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1)
337{
338 iowrite32(Mii_Divider, port_base + Miicmd0);
339
340 _sc92031_mii_wait(port_base);
341
342 iowrite32(cmd1, port_base + Miicmd1);
343 iowrite32(Mii_Divider | cmd0, port_base + Miicmd0);
344
345 return _sc92031_mii_wait(port_base);
346}
347
348static void _sc92031_mii_scan(void __iomem *port_base)
349{
350 _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6);
351}
352
353static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg)
354{
355 return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13;
356}
357
358static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val)
359{
360 _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11));
361}
362
363static void sc92031_disable_interrupts(struct net_device *dev)
364{
365 struct sc92031_priv *priv = netdev_priv(dev);
366 void __iomem *port_base = priv->port_base;
367
368 /* tell the tasklet/interrupt not to enable interrupts */
369 atomic_set(&priv->intr_mask, 0);
370 wmb();
371
372 /* stop interrupts */
373 iowrite32(0, port_base + IntrMask);
374 _sc92031_dummy_read(port_base);
375 mmiowb();
376
377 /* wait for any concurrent interrupt/tasklet to finish */
378 synchronize_irq(dev->irq);
379 tasklet_disable(&priv->tasklet);
380}
381
382static void sc92031_enable_interrupts(struct net_device *dev)
383{
384 struct sc92031_priv *priv = netdev_priv(dev);
385 void __iomem *port_base = priv->port_base;
386
387 tasklet_enable(&priv->tasklet);
388
389 atomic_set(&priv->intr_mask, IntrBits);
390 wmb();
391
392 iowrite32(IntrBits, port_base + IntrMask);
393 mmiowb();
394}
395
396static void _sc92031_disable_tx_rx(struct net_device *dev)
397{
398 struct sc92031_priv *priv = netdev_priv(dev);
399 void __iomem *port_base = priv->port_base;
400
401 priv->rx_config &= ~RxEnb;
402 priv->tx_config &= ~TxEnb;
403 iowrite32(priv->rx_config, port_base + RxConfig);
404 iowrite32(priv->tx_config, port_base + TxConfig);
405}
406
407static void _sc92031_enable_tx_rx(struct net_device *dev)
408{
409 struct sc92031_priv *priv = netdev_priv(dev);
410 void __iomem *port_base = priv->port_base;
411
412 priv->rx_config |= RxEnb;
413 priv->tx_config |= TxEnb;
414 iowrite32(priv->rx_config, port_base + RxConfig);
415 iowrite32(priv->tx_config, port_base + TxConfig);
416}
417
418static void _sc92031_tx_clear(struct net_device *dev)
419{
420 struct sc92031_priv *priv = netdev_priv(dev);
421
422 while (priv->tx_head - priv->tx_tail > 0) {
423 priv->tx_tail++;
424 priv->stats.tx_dropped++;
425 }
426 priv->tx_head = priv->tx_tail = 0;
427}
428
429static void _sc92031_set_mar(struct net_device *dev)
430{
431 struct sc92031_priv *priv = netdev_priv(dev);
432 void __iomem *port_base = priv->port_base;
433 u32 mar0 = 0, mar1 = 0;
434
435 if ((dev->flags & IFF_PROMISC)
436 || dev->mc_count > multicast_filter_limit
437 || (dev->flags & IFF_ALLMULTI))
438 mar0 = mar1 = 0xffffffff;
439 else if (dev->flags & IFF_MULTICAST) {
440 struct dev_mc_list *mc_list;
441
442 for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
443 u32 crc;
444 unsigned bit = 0;
445
446 crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr);
447 crc >>= 24;
448
449 if (crc & 0x01) bit |= 0x02;
450 if (crc & 0x02) bit |= 0x01;
451 if (crc & 0x10) bit |= 0x20;
452 if (crc & 0x20) bit |= 0x10;
453 if (crc & 0x40) bit |= 0x08;
454 if (crc & 0x80) bit |= 0x04;
455
456 if (bit > 31)
457 mar0 |= 0x1 << (bit - 32);
458 else
459 mar1 |= 0x1 << bit;
460 }
461 }
462
463 iowrite32(mar0, port_base + MAR0);
464 iowrite32(mar1, port_base + MAR0 + 4);
465}
466
467static void _sc92031_set_rx_config(struct net_device *dev)
468{
469 struct sc92031_priv *priv = netdev_priv(dev);
470 void __iomem *port_base = priv->port_base;
471 unsigned int old_mc_flags;
472 u32 rx_config_bits = 0;
473
474 old_mc_flags = priv->mc_flags;
475
476 if (dev->flags & IFF_PROMISC)
477 rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast
478 | RxMulticast | RxAllphys;
479
480 if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
481 rx_config_bits |= RxMulticast;
482
483 if (dev->flags & IFF_BROADCAST)
484 rx_config_bits |= RxBroadcast;
485
486 priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast
487 | RxMulticast | RxAllphys);
488 priv->rx_config |= rx_config_bits;
489
490 priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI
491 | IFF_MULTICAST | IFF_BROADCAST);
492
493 if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags)
494 iowrite32(priv->rx_config, port_base + RxConfig);
495}
496
497static bool _sc92031_check_media(struct net_device *dev)
498{
499 struct sc92031_priv *priv = netdev_priv(dev);
500 void __iomem *port_base = priv->port_base;
501 u16 bmsr;
502
503 bmsr = _sc92031_mii_read(port_base, MII_BMSR);
504 rmb();
505 if (bmsr & BMSR_LSTATUS) {
506 bool speed_100, duplex_full;
507 u32 flow_ctrl_config = 0;
508 u16 output_status = _sc92031_mii_read(port_base,
509 MII_OutputStatus);
510 _sc92031_mii_scan(port_base);
511
512 speed_100 = output_status & 0x2;
513 duplex_full = output_status & 0x4;
514
515 /* Initial Tx/Rx configuration */
516 priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
517 priv->tx_config = 0x48800000;
518
519 /* NOTE: vendor driver had dead code here to enable tx padding */
520
521 if (!speed_100)
522 priv->tx_config |= 0x80000;
523
524 // configure rx mode
525 _sc92031_set_rx_config(dev);
526
527 if (duplex_full) {
528 priv->rx_config |= RxFullDx;
529 priv->tx_config |= TxFullDx;
530 flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
531 } else {
532 priv->rx_config &= ~RxFullDx;
533 priv->tx_config &= ~TxFullDx;
534 }
535
536 _sc92031_set_mar(dev);
537 _sc92031_set_rx_config(dev);
538 _sc92031_enable_tx_rx(dev);
539 iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);
540
541 netif_carrier_on(dev);
542
543 if (printk_ratelimit())
544 printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
545 dev->name,
546 speed_100 ? "100" : "10",
547 duplex_full ? "full" : "half");
548 return true;
549 } else {
550 _sc92031_mii_scan(port_base);
551
552 netif_carrier_off(dev);
553
554 _sc92031_disable_tx_rx(dev);
555
556 if (printk_ratelimit())
557 printk(KERN_INFO "%s: link down\n", dev->name);
558 return false;
559 }
560}
561
562static void _sc92031_phy_reset(struct net_device *dev)
563{
564 struct sc92031_priv *priv = netdev_priv(dev);
565 void __iomem *port_base = priv->port_base;
566 u32 phy_ctrl;
567
568 phy_ctrl = ioread32(port_base + PhyCtrl);
569 phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);
570 phy_ctrl |= PhyCtrlAne | PhyCtrlReset;
571
572 switch (media) {
573 default:
574 case AUTOSELECT:
575 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
576 break;
577 case M10_HALF:
578 phy_ctrl |= PhyCtrlSpd10;
579 break;
580 case M10_FULL:
581 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;
582 break;
583 case M100_HALF:
584 phy_ctrl |= PhyCtrlSpd100;
585 break;
586 case M100_FULL:
587 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
588 break;
589 }
590
591 iowrite32(phy_ctrl, port_base + PhyCtrl);
592 mdelay(10);
593
594 phy_ctrl &= ~PhyCtrlReset;
595 iowrite32(phy_ctrl, port_base + PhyCtrl);
596 mdelay(1);
597
598 _sc92031_mii_write(port_base, MII_JAB,
599 PHY_16_JAB_ENB | PHY_16_PORT_ENB);
600 _sc92031_mii_scan(port_base);
601
602 netif_carrier_off(dev);
603 netif_stop_queue(dev);
604}
605
606static void _sc92031_reset(struct net_device *dev)
607{
608 struct sc92031_priv *priv = netdev_priv(dev);
609 void __iomem *port_base = priv->port_base;
610
611 /* disable PM */
612 iowrite32(0, port_base + PMConfig);
613
614 /* soft reset the chip */
615 iowrite32(Cfg0_Reset, port_base + Config0);
616 mdelay(200);
617
618 iowrite32(0, port_base + Config0);
619 mdelay(10);
620
621 /* disable interrupts */
622 iowrite32(0, port_base + IntrMask);
623
624 /* clear multicast address */
625 iowrite32(0, port_base + MAR0);
626 iowrite32(0, port_base + MAR0 + 4);
627
628 /* init rx ring */
629 iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);
630 priv->rx_ring_tail = priv->rx_ring_dma_addr;
631
632 /* init tx ring */
633 _sc92031_tx_clear(dev);
634
635 /* clear old register values */
636 priv->intr_status = 0;
637 atomic_set(&priv->intr_mask, 0);
638 priv->rx_config = 0;
639 priv->tx_config = 0;
640 priv->mc_flags = 0;
641
642 /* configure rx buffer size */
643 /* NOTE: vendor driver had dead code here to enable early tx/rx */
644 iowrite32(Cfg1_Rcv64K, port_base + Config1);
645
646 _sc92031_phy_reset(dev);
647 _sc92031_check_media(dev);
648
649 /* calculate rx fifo overflow */
650 priv->rx_value = 0;
651
652 /* enable PM */
653 iowrite32(priv->pm_config, port_base + PMConfig);
654
655 /* clear intr register */
656 ioread32(port_base + IntrStatus);
657}
658
659static void _sc92031_tx_tasklet(struct net_device *dev)
660{
661 struct sc92031_priv *priv = netdev_priv(dev);
662 void __iomem *port_base = priv->port_base;
663
664 unsigned old_tx_tail;
665 unsigned entry;
666 u32 tx_status;
667
668 old_tx_tail = priv->tx_tail;
669 while (priv->tx_head - priv->tx_tail > 0) {
670 entry = priv->tx_tail % NUM_TX_DESC;
671 tx_status = ioread32(port_base + TxStatus0 + entry * 4);
672
673 if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))
674 break;
675
676 priv->tx_tail++;
677
678 if (tx_status & TxStatOK) {
679 priv->stats.tx_bytes += tx_status & 0x1fff;
680 priv->stats.tx_packets++;
681 /* Note: TxCarrierLost is always asserted at 100mbps. */
682 priv->stats.collisions += (tx_status >> 22) & 0xf;
683 }
684
685 if (tx_status & (TxOutOfWindow | TxAborted)) {
686 priv->stats.tx_errors++;
687
688 if (tx_status & TxAborted)
689 priv->stats.tx_aborted_errors++;
690
691 if (tx_status & TxCarrierLost)
692 priv->stats.tx_carrier_errors++;
693
694 if (tx_status & TxOutOfWindow)
695 priv->stats.tx_window_errors++;
696 }
697
698 if (tx_status & TxUnderrun)
699 priv->stats.tx_fifo_errors++;
700 }
701
702 if (priv->tx_tail != old_tx_tail)
703 if (netif_queue_stopped(dev))
704 netif_wake_queue(dev);
705}
706
707static void _sc92031_rx_tasklet_error(u32 rx_status,
708 struct sc92031_priv *priv, unsigned rx_size)
709{
710 if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
711 priv->stats.rx_errors++;
712 priv->stats.rx_length_errors++;
713 }
714
715 if (!(rx_status & RxStatesOK)) {
716 priv->stats.rx_errors++;
717
718 if (rx_status & (RxHugeFrame | RxSmallFrame))
719 priv->stats.rx_length_errors++;
720
721 if (rx_status & RxBadAlign)
722 priv->stats.rx_frame_errors++;
723
724 if (!(rx_status & RxCRCOK))
725 priv->stats.rx_crc_errors++;
726 } else
727 priv->rx_loss++;
728}
729
730static void _sc92031_rx_tasklet(struct net_device *dev)
731{
732 struct sc92031_priv *priv = netdev_priv(dev);
733 void __iomem *port_base = priv->port_base;
734
735 dma_addr_t rx_ring_head;
736 unsigned rx_len;
737 unsigned rx_ring_offset;
738 void *rx_ring = priv->rx_ring;
739
740 rx_ring_head = ioread32(port_base + RxBufWPtr);
741 rmb();
742
743 /* rx_ring_head is only 17 bits in the RxBufWPtr register.
744 * we need to change it to 32 bits physical address
745 */
746 rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);
747 rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);
748 if (rx_ring_head < priv->rx_ring_dma_addr)
749 rx_ring_head += RX_BUF_LEN;
750
751 if (rx_ring_head >= priv->rx_ring_tail)
752 rx_len = rx_ring_head - priv->rx_ring_tail;
753 else
754 rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);
755
756 if (!rx_len)
757 return;
758
759 if (unlikely(rx_len > RX_BUF_LEN)) {
760 if (printk_ratelimit())
761 printk(KERN_ERR "%s: rx packets length > rx buffer\n",
762 dev->name);
763 return;
764 }
765
766 rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;
767
768 while (rx_len) {
769 u32 rx_status;
770 unsigned rx_size, rx_size_align, pkt_size;
771 struct sk_buff *skb;
772
773 rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));
774 rmb();
775
776 rx_size = rx_status >> 20;
777 rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned
778 pkt_size = rx_size - 4; // Omit the four octet CRC from the length.
779
780 rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
781
782 if (unlikely(rx_status == 0
783 || rx_size > (MAX_ETH_FRAME_SIZE + 4)
784 || rx_size < 16
785 || !(rx_status & RxStatesOK))) {
786 _sc92031_rx_tasklet_error(rx_status, priv, rx_size);
787 break;
788 }
789
790 if (unlikely(rx_size_align + 4 > rx_len)) {
791 if (printk_ratelimit())
792 printk(KERN_ERR "%s: rx_len is too small\n", dev->name);
793 break;
794 }
795
796 rx_len -= rx_size_align + 4;
797
798 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
799 if (unlikely(!skb)) {
800 if (printk_ratelimit())
801 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
802 dev->name, pkt_size);
803 goto next;
804 }
805
806 skb_reserve(skb, NET_IP_ALIGN);
807
808 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
809 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
810 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
811 memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
812 rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
813 } else {
814 memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
815 }
816
817 skb->dev = dev;
818 skb->protocol = eth_type_trans(skb, dev);
819 dev->last_rx = jiffies;
820 netif_rx(skb);
821
822 priv->stats.rx_bytes += pkt_size;
823 priv->stats.rx_packets++;
824
825 if (rx_status & Rx_Multicast)
826 priv->stats.multicast++;
827
828 next:
829 rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
830 }
831 mb();
832
833 priv->rx_ring_tail = rx_ring_head;
834 iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);
835}
836
837static void _sc92031_link_tasklet(struct net_device *dev)
838{
839 struct sc92031_priv *priv = netdev_priv(dev);
840
841 if (_sc92031_check_media(dev))
842 netif_wake_queue(dev);
843 else {
844 netif_stop_queue(dev);
845 priv->stats.tx_carrier_errors++;
846 }
847}
848
849static void sc92031_tasklet(unsigned long data)
850{
851 struct net_device *dev = (struct net_device *)data;
852 struct sc92031_priv *priv = netdev_priv(dev);
853 void __iomem *port_base = priv->port_base;
854 u32 intr_status, intr_mask;
855
856 intr_status = priv->intr_status;
857
858 spin_lock(&priv->lock);
859
860 if (unlikely(!netif_running(dev)))
861 goto out;
862
863 if (intr_status & TxOK)
864 _sc92031_tx_tasklet(dev);
865
866 if (intr_status & RxOK)
867 _sc92031_rx_tasklet(dev);
868
869 if (intr_status & RxOverflow)
870 priv->stats.rx_errors++;
871
872 if (intr_status & TimeOut) {
873 priv->stats.rx_errors++;
874 priv->stats.rx_length_errors++;
875 }
876
877 if (intr_status & (LinkFail | LinkOK))
878 _sc92031_link_tasklet(dev);
879
880out:
881 intr_mask = atomic_read(&priv->intr_mask);
882 rmb();
883
884 iowrite32(intr_mask, port_base + IntrMask);
885 mmiowb();
886
887 spin_unlock(&priv->lock);
888}
889
890static irqreturn_t sc92031_interrupt(int irq, void *dev_id)
891{
892 struct net_device *dev = dev_id;
893 struct sc92031_priv *priv = netdev_priv(dev);
894 void __iomem *port_base = priv->port_base;
895 u32 intr_status, intr_mask;
896
897 /* mask interrupts before clearing IntrStatus */
898 iowrite32(0, port_base + IntrMask);
899 _sc92031_dummy_read(port_base);
900
901 intr_status = ioread32(port_base + IntrStatus);
902 if (unlikely(intr_status == 0xffffffff))
903 return IRQ_NONE; // hardware has gone missing
904
905 intr_status &= IntrBits;
906 if (!intr_status)
907 goto out_none;
908
909 priv->intr_status = intr_status;
910 tasklet_schedule(&priv->tasklet);
911
912 return IRQ_HANDLED;
913
914out_none:
915 intr_mask = atomic_read(&priv->intr_mask);
916 rmb();
917
918 iowrite32(intr_mask, port_base + IntrMask);
919 mmiowb();
920
921 return IRQ_NONE;
922}
923
924static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
925{
926 struct sc92031_priv *priv = netdev_priv(dev);
927 void __iomem *port_base = priv->port_base;
928
929 // FIXME I do not understand what is this trying to do.
930 if (netif_running(dev)) {
931 int temp;
932
933 spin_lock_bh(&priv->lock);
934
935 /* Update the error count. */
936 temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;
937
938 if (temp == 0xffff) {
939 priv->rx_value += temp;
940 priv->stats.rx_fifo_errors = priv->rx_value;
941 } else {
942 priv->stats.rx_fifo_errors = temp + priv->rx_value;
943 }
944
945 spin_unlock_bh(&priv->lock);
946 }
947
948 return &priv->stats;
949}
950
951static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
952{
953 int err = 0;
954 struct sc92031_priv *priv = netdev_priv(dev);
955 void __iomem *port_base = priv->port_base;
956
957 unsigned len;
958 unsigned entry;
959 u32 tx_status;
960
961 if (unlikely(skb->len > TX_BUF_SIZE)) {
962 err = -EMSGSIZE;
963 priv->stats.tx_dropped++;
964 goto out;
965 }
966
967 spin_lock_bh(&priv->lock);
968
969 if (unlikely(!netif_carrier_ok(dev))) {
970 err = -ENOLINK;
971 priv->stats.tx_dropped++;
972 goto out_unlock;
973 }
974
975 BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);
976
977 entry = priv->tx_head++ % NUM_TX_DESC;
978
979 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
980
981 len = skb->len;
982 if (unlikely(len < ETH_ZLEN)) {
983 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
984 0, ETH_ZLEN - len);
985 len = ETH_ZLEN;
986 }
987
988 wmb();
989
990 if (len < 100)
991 tx_status = len;
992 else if (len < 300)
993 tx_status = 0x30000 | len;
994 else
995 tx_status = 0x50000 | len;
996
997 iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
998 port_base + TxAddr0 + entry * 4);
999 iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
1000 mmiowb();
1001
1002 dev->trans_start = jiffies;
1003
1004 if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
1005 netif_stop_queue(dev);
1006
1007out_unlock:
1008 spin_unlock_bh(&priv->lock);
1009
1010out:
1011 dev_kfree_skb(skb);
1012
1013 return err;
1014}
1015
1016static int sc92031_open(struct net_device *dev)
1017{
1018 int err;
1019 struct sc92031_priv *priv = netdev_priv(dev);
1020 struct pci_dev *pdev = priv->pdev;
1021
1022 priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
1023 &priv->rx_ring_dma_addr);
1024 if (unlikely(!priv->rx_ring)) {
1025 err = -ENOMEM;
1026 goto out_alloc_rx_ring;
1027 }
1028
1029 priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
1030 &priv->tx_bufs_dma_addr);
1031 if (unlikely(!priv->tx_bufs)) {
1032 err = -ENOMEM;
1033 goto out_alloc_tx_bufs;
1034 }
1035 priv->tx_head = priv->tx_tail = 0;
1036
1037 err = request_irq(pdev->irq, sc92031_interrupt,
1038 SA_SHIRQ, dev->name, dev);
1039 if (unlikely(err < 0))
1040 goto out_request_irq;
1041
1042 priv->pm_config = 0;
1043
1044 /* Interrupts already disabled by sc92031_stop or sc92031_probe */
1045 spin_lock(&priv->lock);
1046
1047 _sc92031_reset(dev);
1048 mmiowb();
1049
1050 spin_unlock(&priv->lock);
1051 sc92031_enable_interrupts(dev);
1052
1053 if (netif_carrier_ok(dev))
1054 netif_start_queue(dev);
1055 else
1056 netif_tx_disable(dev);
1057
1058 return 0;
1059
1060out_request_irq:
1061 pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1062 priv->tx_bufs_dma_addr);
1063out_alloc_tx_bufs:
1064 pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1065 priv->rx_ring_dma_addr);
1066out_alloc_rx_ring:
1067 return err;
1068}
1069
1070static int sc92031_stop(struct net_device *dev)
1071{
1072 struct sc92031_priv *priv = netdev_priv(dev);
1073 struct pci_dev *pdev = priv->pdev;
1074
1075 netif_tx_disable(dev);
1076
1077 /* Disable interrupts, stop Tx and Rx. */
1078 sc92031_disable_interrupts(dev);
1079
1080 spin_lock(&priv->lock);
1081
1082 _sc92031_disable_tx_rx(dev);
1083 _sc92031_tx_clear(dev);
1084 mmiowb();
1085
1086 spin_unlock(&priv->lock);
1087
1088 free_irq(pdev->irq, dev);
1089 pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1090 priv->tx_bufs_dma_addr);
1091 pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1092 priv->rx_ring_dma_addr);
1093
1094 return 0;
1095}
1096
1097static void sc92031_set_multicast_list(struct net_device *dev)
1098{
1099 struct sc92031_priv *priv = netdev_priv(dev);
1100
1101 spin_lock_bh(&priv->lock);
1102
1103 _sc92031_set_mar(dev);
1104 _sc92031_set_rx_config(dev);
1105 mmiowb();
1106
1107 spin_unlock_bh(&priv->lock);
1108}
1109
1110static void sc92031_tx_timeout(struct net_device *dev)
1111{
1112 struct sc92031_priv *priv = netdev_priv(dev);
1113
1114 /* Disable interrupts by clearing the interrupt mask.*/
1115 sc92031_disable_interrupts(dev);
1116
1117 spin_lock(&priv->lock);
1118
1119 priv->tx_timeouts++;
1120
1121 _sc92031_reset(dev);
1122 mmiowb();
1123
1124 spin_unlock(&priv->lock);
1125
1126 /* enable interrupts */
1127 sc92031_enable_interrupts(dev);
1128
1129 if (netif_carrier_ok(dev))
1130 netif_wake_queue(dev);
1131}
1132
1133#ifdef CONFIG_NET_POLL_CONTROLLER
1134static void sc92031_poll_controller(struct net_device *dev)
1135{
1136 disable_irq(dev->irq);
1137 if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
1138 sc92031_tasklet((unsigned long)dev);
1139 enable_irq(dev->irq);
1140}
1141#endif
1142
1143static int sc92031_ethtool_get_settings(struct net_device *dev,
1144 struct ethtool_cmd *cmd)
1145{
1146 struct sc92031_priv *priv = netdev_priv(dev);
1147 void __iomem *port_base = priv->port_base;
1148 u8 phy_address;
1149 u32 phy_ctrl;
1150 u16 output_status;
1151
1152 spin_lock_bh(&priv->lock);
1153
1154 phy_address = ioread32(port_base + Miicmd1) >> 27;
1155 phy_ctrl = ioread32(port_base + PhyCtrl);
1156
1157 output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
1158 _sc92031_mii_scan(port_base);
1159 mmiowb();
1160
1161 spin_unlock_bh(&priv->lock);
1162
1163 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
1164 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
1165 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
1166
1167 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
1168
1169 if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1170 == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1171 cmd->advertising |= ADVERTISED_Autoneg;
1172
1173 if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
1174 cmd->advertising |= ADVERTISED_10baseT_Half;
1175
1176 if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
1177 == (PhyCtrlSpd10 | PhyCtrlDux))
1178 cmd->advertising |= ADVERTISED_10baseT_Full;
1179
1180 if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
1181 cmd->advertising |= ADVERTISED_100baseT_Half;
1182
1183 if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
1184 == (PhyCtrlSpd100 | PhyCtrlDux))
1185 cmd->advertising |= ADVERTISED_100baseT_Full;
1186
1187 if (phy_ctrl & PhyCtrlAne)
1188 cmd->advertising |= ADVERTISED_Autoneg;
1189
1190 cmd->speed = (output_status & 0x2) ? SPEED_100 : SPEED_10;
1191 cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
1192 cmd->port = PORT_MII;
1193 cmd->phy_address = phy_address;
1194 cmd->transceiver = XCVR_INTERNAL;
1195 cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1196
1197 return 0;
1198}
1199
1200static int sc92031_ethtool_set_settings(struct net_device *dev,
1201 struct ethtool_cmd *cmd)
1202{
1203 struct sc92031_priv *priv = netdev_priv(dev);
1204 void __iomem *port_base = priv->port_base;
1205 u32 phy_ctrl;
1206 u32 old_phy_ctrl;
1207
1208 if (!(cmd->speed == SPEED_10 || cmd->speed == SPEED_100))
1209 return -EINVAL;
1210 if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
1211 return -EINVAL;
1212 if (!(cmd->port == PORT_MII))
1213 return -EINVAL;
1214 if (!(cmd->phy_address == 0x1f))
1215 return -EINVAL;
1216 if (!(cmd->transceiver == XCVR_INTERNAL))
1217 return -EINVAL;
1218 if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
1219 return -EINVAL;
1220
1221 if (cmd->autoneg == AUTONEG_ENABLE) {
1222 if (!(cmd->advertising & (ADVERTISED_Autoneg
1223 | ADVERTISED_100baseT_Full
1224 | ADVERTISED_100baseT_Half
1225 | ADVERTISED_10baseT_Full
1226 | ADVERTISED_10baseT_Half)))
1227 return -EINVAL;
1228
1229 phy_ctrl = PhyCtrlAne;
1230
1231 // FIXME: I'm not sure what the original code was trying to do
1232 if (cmd->advertising & ADVERTISED_Autoneg)
1233 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
1234 if (cmd->advertising & ADVERTISED_100baseT_Full)
1235 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
1236 if (cmd->advertising & ADVERTISED_100baseT_Half)
1237 phy_ctrl |= PhyCtrlSpd100;
1238 if (cmd->advertising & ADVERTISED_10baseT_Full)
1239 phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
1240 if (cmd->advertising & ADVERTISED_10baseT_Half)
1241 phy_ctrl |= PhyCtrlSpd10;
1242 } else {
1243 // FIXME: Whole branch guessed
1244 phy_ctrl = 0;
1245
1246 if (cmd->speed == SPEED_10)
1247 phy_ctrl |= PhyCtrlSpd10;
1248 else /* cmd->speed == SPEED_100 */
1249 phy_ctrl |= PhyCtrlSpd100;
1250
1251 if (cmd->duplex == DUPLEX_FULL)
1252 phy_ctrl |= PhyCtrlDux;
1253 }
1254
1255 spin_lock_bh(&priv->lock);
1256
1257 old_phy_ctrl = ioread32(port_base + PhyCtrl);
1258 phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux
1259 | PhyCtrlSpd100 | PhyCtrlSpd10);
1260 if (phy_ctrl != old_phy_ctrl)
1261 iowrite32(phy_ctrl, port_base + PhyCtrl);
1262
1263 spin_unlock_bh(&priv->lock);
1264
1265 return 0;
1266}
1267
1268static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
1269 struct ethtool_drvinfo *drvinfo)
1270{
1271 struct sc92031_priv *priv = netdev_priv(dev);
1272 struct pci_dev *pdev = priv->pdev;
1273
1274 strcpy(drvinfo->driver, SC92031_NAME);
1275 strcpy(drvinfo->version, SC92031_VERSION);
1276 strcpy(drvinfo->bus_info, pci_name(pdev));
1277}
1278
1279static void sc92031_ethtool_get_wol(struct net_device *dev,
1280 struct ethtool_wolinfo *wolinfo)
1281{
1282 struct sc92031_priv *priv = netdev_priv(dev);
1283 void __iomem *port_base = priv->port_base;
1284 u32 pm_config;
1285
1286 spin_lock_bh(&priv->lock);
1287 pm_config = ioread32(port_base + PMConfig);
1288 spin_unlock_bh(&priv->lock);
1289
1290 // FIXME: Guessed
1291 wolinfo->supported = WAKE_PHY | WAKE_MAGIC
1292 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1293 wolinfo->wolopts = 0;
1294
1295 if (pm_config & PM_LinkUp)
1296 wolinfo->wolopts |= WAKE_PHY;
1297
1298 if (pm_config & PM_Magic)
1299 wolinfo->wolopts |= WAKE_MAGIC;
1300
1301 if (pm_config & PM_WakeUp)
1302 // FIXME: Guessed
1303 wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1304}
1305
1306static int sc92031_ethtool_set_wol(struct net_device *dev,
1307 struct ethtool_wolinfo *wolinfo)
1308{
1309 struct sc92031_priv *priv = netdev_priv(dev);
1310 void __iomem *port_base = priv->port_base;
1311 u32 pm_config;
1312
1313 spin_lock_bh(&priv->lock);
1314
1315 pm_config = ioread32(port_base + PMConfig)
1316 & ~(PM_LinkUp | PM_Magic | PM_WakeUp);
1317
1318 if (wolinfo->wolopts & WAKE_PHY)
1319 pm_config |= PM_LinkUp;
1320
1321 if (wolinfo->wolopts & WAKE_MAGIC)
1322 pm_config |= PM_Magic;
1323
1324 // FIXME: Guessed
1325 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
1326 pm_config |= PM_WakeUp;
1327
1328 priv->pm_config = pm_config;
1329 iowrite32(pm_config, port_base + PMConfig);
1330 mmiowb();
1331
1332 spin_unlock_bh(&priv->lock);
1333
1334 return 0;
1335}
1336
1337static int sc92031_ethtool_nway_reset(struct net_device *dev)
1338{
1339 int err = 0;
1340 struct sc92031_priv *priv = netdev_priv(dev);
1341 void __iomem *port_base = priv->port_base;
1342 u16 bmcr;
1343
1344 spin_lock_bh(&priv->lock);
1345
1346 bmcr = _sc92031_mii_read(port_base, MII_BMCR);
1347 if (!(bmcr & BMCR_ANENABLE)) {
1348 err = -EINVAL;
1349 goto out;
1350 }
1351
1352 _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART);
1353
1354out:
1355 _sc92031_mii_scan(port_base);
1356 mmiowb();
1357
1358 spin_unlock_bh(&priv->lock);
1359
1360 return err;
1361}
1362
1363static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = {
1364 "tx_timeout",
1365 "rx_loss",
1366};
1367
1368static void sc92031_ethtool_get_strings(struct net_device *dev,
1369 u32 stringset, u8 *data)
1370{
1371 if (stringset == ETH_SS_STATS)
1372 memcpy(data, sc92031_ethtool_stats_strings,
1373 SILAN_STATS_NUM * ETH_GSTRING_LEN);
1374}
1375
1376static int sc92031_ethtool_get_stats_count(struct net_device *dev)
1377{
1378 return SILAN_STATS_NUM;
1379}
1380
1381static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1382 struct ethtool_stats *stats, u64 *data)
1383{
1384 struct sc92031_priv *priv = netdev_priv(dev);
1385
1386 spin_lock_bh(&priv->lock);
1387 data[0] = priv->tx_timeouts;
1388 data[1] = priv->rx_loss;
1389 spin_unlock_bh(&priv->lock);
1390}
1391
1392static struct ethtool_ops sc92031_ethtool_ops = {
1393 .get_settings = sc92031_ethtool_get_settings,
1394 .set_settings = sc92031_ethtool_set_settings,
1395 .get_drvinfo = sc92031_ethtool_get_drvinfo,
1396 .get_wol = sc92031_ethtool_get_wol,
1397 .set_wol = sc92031_ethtool_set_wol,
1398 .nway_reset = sc92031_ethtool_nway_reset,
1399 .get_link = ethtool_op_get_link,
1400 .get_tx_csum = ethtool_op_get_tx_csum,
1401 .get_sg = ethtool_op_get_sg,
1402 .get_tso = ethtool_op_get_tso,
1403 .get_strings = sc92031_ethtool_get_strings,
1404 .get_stats_count = sc92031_ethtool_get_stats_count,
1405 .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
1406 .get_perm_addr = ethtool_op_get_perm_addr,
1407 .get_ufo = ethtool_op_get_ufo,
1408};
1409
1410static int __devinit sc92031_probe(struct pci_dev *pdev,
1411 const struct pci_device_id *id)
1412{
1413 int err;
1414 void __iomem* port_base;
1415 struct net_device *dev;
1416 struct sc92031_priv *priv;
1417 u32 mac0, mac1;
1418
1419 err = pci_enable_device(pdev);
1420 if (unlikely(err < 0))
1421 goto out_enable_device;
1422
1423 pci_set_master(pdev);
1424
1425 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1426 if (unlikely(err < 0))
1427 goto out_set_dma_mask;
1428
1429 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1430 if (unlikely(err < 0))
1431 goto out_set_dma_mask;
1432
1433 err = pci_request_regions(pdev, SC92031_NAME);
1434 if (unlikely(err < 0))
1435 goto out_request_regions;
1436
1437 port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
1438 if (unlikely(!port_base)) {
1439 err = -EIO;
1440 goto out_iomap;
1441 }
1442
1443 dev = alloc_etherdev(sizeof(struct sc92031_priv));
1444 if (unlikely(!dev)) {
1445 err = -ENOMEM;
1446 goto out_alloc_etherdev;
1447 }
1448
1449 pci_set_drvdata(pdev, dev);
1450
1451#if SC92031_USE_BAR == 0
1452 dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
1453 dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
1454#elif SC92031_USE_BAR == 1
1455 dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
1456#endif
1457 dev->irq = pdev->irq;
1458
1459 /* faked with skb_copy_and_csum_dev */
1460 dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
1461
1462 dev->get_stats = sc92031_get_stats;
1463 dev->ethtool_ops = &sc92031_ethtool_ops;
1464 dev->hard_start_xmit = sc92031_start_xmit;
1465 dev->watchdog_timeo = TX_TIMEOUT;
1466 dev->open = sc92031_open;
1467 dev->stop = sc92031_stop;
1468 dev->set_multicast_list = sc92031_set_multicast_list;
1469 dev->tx_timeout = sc92031_tx_timeout;
1470#ifdef CONFIG_NET_POLL_CONTROLLER
1471 dev->poll_controller = sc92031_poll_controller;
1472#endif
1473
1474 priv = netdev_priv(dev);
1475 spin_lock_init(&priv->lock);
1476 priv->port_base = port_base;
1477 priv->pdev = pdev;
1478 tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
1479 /* Fudge tasklet count so the call to sc92031_enable_interrupts at
1480 * sc92031_open will work correctly */
1481 tasklet_disable_nosync(&priv->tasklet);
1482
1483 /* PCI PM Wakeup */
1484 iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig);
1485
1486 mac0 = ioread32(port_base + MAC0);
1487 mac1 = ioread32(port_base + MAC0 + 4);
1488 dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
1489 dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
1490 dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
1491 dev->dev_addr[3] = dev->perm_addr[3] = mac0;
1492 dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
1493 dev->dev_addr[5] = dev->perm_addr[5] = mac1;
1494
1495 err = register_netdev(dev);
1496 if (err < 0)
1497 goto out_register_netdev;
1498
1499 return 0;
1500
1501out_register_netdev:
1502 free_netdev(dev);
1503out_alloc_etherdev:
1504 pci_iounmap(pdev, port_base);
1505out_iomap:
1506 pci_release_regions(pdev);
1507out_request_regions:
1508out_set_dma_mask:
1509 pci_disable_device(pdev);
1510out_enable_device:
1511 return err;
1512}
1513
1514static void __devexit sc92031_remove(struct pci_dev *pdev)
1515{
1516 struct net_device *dev = pci_get_drvdata(pdev);
1517 struct sc92031_priv *priv = netdev_priv(dev);
1518 void __iomem* port_base = priv->port_base;
1519
1520 unregister_netdev(dev);
1521 free_netdev(dev);
1522 pci_iounmap(pdev, port_base);
1523 pci_release_regions(pdev);
1524 pci_disable_device(pdev);
1525}
1526
1527static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
1528{
1529 struct net_device *dev = pci_get_drvdata(pdev);
1530 struct sc92031_priv *priv = netdev_priv(dev);
1531
1532 pci_save_state(pdev);
1533
1534 if (!netif_running(dev))
1535 goto out;
1536
1537 netif_device_detach(dev);
1538
1539 /* Disable interrupts, stop Tx and Rx. */
1540 sc92031_disable_interrupts(dev);
1541
1542 spin_lock(&priv->lock);
1543
1544 _sc92031_disable_tx_rx(dev);
1545 _sc92031_tx_clear(dev);
1546 mmiowb();
1547
1548 spin_unlock(&priv->lock);
1549
1550out:
1551 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1552
1553 return 0;
1554}
1555
1556static int sc92031_resume(struct pci_dev *pdev)
1557{
1558 struct net_device *dev = pci_get_drvdata(pdev);
1559 struct sc92031_priv *priv = netdev_priv(dev);
1560
1561 pci_restore_state(pdev);
1562 pci_set_power_state(pdev, PCI_D0);
1563
1564 if (!netif_running(dev))
1565 goto out;
1566
1567 /* Interrupts already disabled by sc92031_suspend */
1568 spin_lock(&priv->lock);
1569
1570 _sc92031_reset(dev);
1571 mmiowb();
1572
1573 spin_unlock(&priv->lock);
1574 sc92031_enable_interrupts(dev);
1575
1576 netif_device_attach(dev);
1577
1578 if (netif_carrier_ok(dev))
1579 netif_wake_queue(dev);
1580 else
1581 netif_tx_disable(dev);
1582
1583out:
1584 return 0;
1585}
1586
1587static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
1588 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_SC92031) },
1589 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_8139D) },
1590 { 0, }
1591};
1592MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
1593
1594static struct pci_driver sc92031_pci_driver = {
1595 .name = SC92031_NAME,
1596 .id_table = sc92031_pci_device_id_table,
1597 .probe = sc92031_probe,
1598 .remove = __devexit_p(sc92031_remove),
1599 .suspend = sc92031_suspend,
1600 .resume = sc92031_resume,
1601};
1602
1603static int __init sc92031_init(void)
1604{
1605 printk(KERN_INFO SC92031_DESCRIPTION " " SC92031_VERSION "\n");
1606 return pci_register_driver(&sc92031_pci_driver);
1607}
1608
1609static void __exit sc92031_exit(void)
1610{
1611 pci_unregister_driver(&sc92031_pci_driver);
1612}
1613
1614module_init(sc92031_init);
1615module_exit(sc92031_exit);
1616
1617MODULE_LICENSE("GPL");
1618MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1619MODULE_DESCRIPTION(SC92031_DESCRIPTION);
1620MODULE_VERSION(SC92031_VERSION);
diff --git a/drivers/net/sk_mca.c b/drivers/net/sk_mca.c
deleted file mode 100644
index 96e06c51b75d..000000000000
--- a/drivers/net/sk_mca.c
+++ /dev/null
@@ -1,1216 +0,0 @@
1/*
2net-3-driver for the SKNET MCA-based cards
3
4This is an extension to the Linux operating system, and is covered by the
5same GNU General Public License that covers that work.
6
7Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de,
8 alfred.arnold@lancom.de)
9
10This driver is based both on the 3C523 driver and the SK_G16 driver.
11
12paper sources:
13 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
14 Hans-Peter Messmer for the basic Microchannel stuff
15
16 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
17 for help on Ethernet driver programming
18
19 'Ethernet/IEEE 802.3 Family 1992 World Network Data Book/Handbook' by AMD
20 for documentation on the AM7990 LANCE
21
22 'SKNET Personal Technisches Manual', Version 1.2 by Schneider&Koch
23 for documentation on the Junior board
24
25 'SK-NET MC2+ Technical Manual", Version 1.1 by Schneider&Koch for
26 documentation on the MC2 bord
27
28 A big thank you to the S&K support for providing me so quickly with
29 documentation!
30
31 Also see http://www.syskonnect.com/
32
33 Missing things:
34
35 -> set debug level via ioctl instead of compile-time switches
36 -> I didn't follow the development of the 2.1.x kernels, so my
37 assumptions about which things changed with which kernel version
38 are probably nonsense
39
40History:
41 May 16th, 1999
42 startup
43 May 22st, 1999
44 added private structure, methods
45 begun building data structures in RAM
46 May 23nd, 1999
47 can receive frames, send frames
48 May 24th, 1999
49 modularized initialization of LANCE
50 loadable as module
51 still Tx problem :-(
52 May 26th, 1999
53 MC2 works
54 support for multiple devices
55 display media type for MC2+
56 May 28th, 1999
57 fixed problem in GetLANCE leaving interrupts turned off
58 increase TX queue to 4 packets to improve send performance
59 May 29th, 1999
60 a few corrections in statistics, caught rcvr overruns
61 reinitialization of LANCE/board in critical situations
62 MCA info implemented
63 implemented LANCE multicast filter
64 Jun 6th, 1999
65 additions for Linux 2.2
66 Dec 25th, 1999
67 unfortunately there seem to be newer MC2+ boards that react
68 on IRQ 3/5/9/10 instead of 3/5/10/11, so we have to autoprobe
69 in questionable cases...
70 Dec 28th, 1999
71 integrated patches from David Weinehall & Bill Wendling for 2.3
72 kernels (isa_...functions). Things are defined in a way that
73 it still works with 2.0.x 8-)
74 Dec 30th, 1999
75 added handling of the remaining interrupt conditions. That
76 should cure the spurious hangs.
77 Jan 30th, 2000
78 newer kernels automatically probe more than one board, so the
79 'startslot' as a variable is also needed here
80 June 1st, 2000
81 added changes for recent 2.3 kernels
82
83 *************************************************************************/
84
85#include <linux/kernel.h>
86#include <linux/string.h>
87#include <linux/errno.h>
88#include <linux/ioport.h>
89#include <linux/slab.h>
90#include <linux/interrupt.h>
91#include <linux/delay.h>
92#include <linux/time.h>
93#include <linux/mca-legacy.h>
94#include <linux/init.h>
95#include <linux/module.h>
96#include <linux/netdevice.h>
97#include <linux/etherdevice.h>
98#include <linux/skbuff.h>
99#include <linux/bitops.h>
100
101#include <asm/processor.h>
102#include <asm/io.h>
103
104#define _SK_MCA_DRIVER_
105#include "sk_mca.h"
106
107/* ------------------------------------------------------------------------
108 * global static data - not more since we can handle multiple boards and
109 * have to pack all state info into the device struct!
110 * ------------------------------------------------------------------------ */
111
112static char *MediaNames[Media_Count] =
113 { "10Base2", "10BaseT", "10Base5", "Unknown" };
114
115static unsigned char poly[] =
116 { 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,
117 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0
118};
119
120/* ------------------------------------------------------------------------
121 * private subfunctions
122 * ------------------------------------------------------------------------ */
123
124/* dump parts of shared memory - only needed during debugging */
125
126#ifdef DEBUG
127static void dumpmem(struct net_device *dev, u32 start, u32 len)
128{
129 skmca_priv *priv = netdev_priv(dev);
130 int z;
131
132 for (z = 0; z < len; z++) {
133 if ((z & 15) == 0)
134 printk("%04x:", z);
135 printk(" %02x", readb(priv->base + start + z));
136 if ((z & 15) == 15)
137 printk("\n");
138 }
139}
140
141/* print exact time - ditto */
142
143static void PrTime(void)
144{
145 struct timeval tv;
146
147 do_gettimeofday(&tv);
148 printk("%9d:%06d: ", tv.tv_sec, tv.tv_usec);
149}
150#endif
151
152/* deduce resources out of POS registers */
153
154static void __init getaddrs(int slot, int junior, int *base, int *irq,
155 skmca_medium * medium)
156{
157 u_char pos0, pos1, pos2;
158
159 if (junior) {
160 pos0 = mca_read_stored_pos(slot, 2);
161 *base = ((pos0 & 0x0e) << 13) + 0xc0000;
162 *irq = ((pos0 & 0x10) >> 4) + 10;
163 *medium = Media_Unknown;
164 } else {
165 /* reset POS 104 Bits 0+1 so the shared memory region goes to the
166 configured area between 640K and 1M. Afterwards, enable the MC2.
167 I really don't know what rode SK to do this... */
168
169 mca_write_pos(slot, 4,
170 mca_read_stored_pos(slot, 4) & 0xfc);
171 mca_write_pos(slot, 2,
172 mca_read_stored_pos(slot, 2) | 0x01);
173
174 pos1 = mca_read_stored_pos(slot, 3);
175 pos2 = mca_read_stored_pos(slot, 4);
176 *base = ((pos1 & 0x07) << 14) + 0xc0000;
177 switch (pos2 & 0x0c) {
178 case 0:
179 *irq = 3;
180 break;
181 case 4:
182 *irq = 5;
183 break;
184 case 8:
185 *irq = -10;
186 break;
187 case 12:
188 *irq = -11;
189 break;
190 }
191 *medium = (pos2 >> 6) & 3;
192 }
193}
194
195/* check for both cards:
196 When the MC2 is turned off, it was configured for more than 15MB RAM,
197 is disabled and won't get detected using the standard probe. We
198 therefore have to scan the slots manually :-( */
199
200static int __init dofind(int *junior, int firstslot)
201{
202 int slot;
203 unsigned int id;
204
205 for (slot = firstslot; slot < MCA_MAX_SLOT_NR; slot++) {
206 id = mca_read_stored_pos(slot, 0)
207 + (((unsigned int) mca_read_stored_pos(slot, 1)) << 8);
208
209 *junior = 0;
210 if (id == SKNET_MCA_ID)
211 return slot;
212 *junior = 1;
213 if (id == SKNET_JUNIOR_MCA_ID)
214 return slot;
215 }
216 return MCA_NOTFOUND;
217}
218
219/* reset the whole board */
220
221static void ResetBoard(struct net_device *dev)
222{
223 skmca_priv *priv = netdev_priv(dev);
224
225 writeb(CTRL_RESET_ON, priv->ctrladdr);
226 udelay(10);
227 writeb(CTRL_RESET_OFF, priv->ctrladdr);
228}
229
230/* wait for LANCE interface to become not busy */
231
232static int WaitLANCE(struct net_device *dev)
233{
234 skmca_priv *priv = netdev_priv(dev);
235 int t = 0;
236
237 while ((readb(priv->ctrladdr) & STAT_IO_BUSY) ==
238 STAT_IO_BUSY) {
239 udelay(1);
240 if (++t > 1000) {
241 printk("%s: LANCE access timeout", dev->name);
242 return 0;
243 }
244 }
245
246 return 1;
247}
248
249/* set LANCE register - must be atomic */
250
251static void SetLANCE(struct net_device *dev, u16 addr, u16 value)
252{
253 skmca_priv *priv = netdev_priv(dev);
254 unsigned long flags;
255
256 /* disable interrupts */
257
258 spin_lock_irqsave(&priv->lock, flags);
259
260 /* wait until no transfer is pending */
261
262 WaitLANCE(dev);
263
264 /* transfer register address to RAP */
265
266 writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
267 writew(addr, priv->ioregaddr);
268 writeb(IOCMD_GO, priv->cmdaddr);
269 udelay(1);
270 WaitLANCE(dev);
271
272 /* transfer data to register */
273
274 writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_DATA, priv->ctrladdr);
275 writew(value, priv->ioregaddr);
276 writeb(IOCMD_GO, priv->cmdaddr);
277 udelay(1);
278 WaitLANCE(dev);
279
280 /* reenable interrupts */
281
282 spin_unlock_irqrestore(&priv->lock, flags);
283}
284
285/* get LANCE register */
286
287static u16 GetLANCE(struct net_device *dev, u16 addr)
288{
289 skmca_priv *priv = netdev_priv(dev);
290 unsigned long flags;
291 unsigned int res;
292
293 /* disable interrupts */
294
295 spin_lock_irqsave(&priv->lock, flags);
296
297 /* wait until no transfer is pending */
298
299 WaitLANCE(dev);
300
301 /* transfer register address to RAP */
302
303 writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
304 writew(addr, priv->ioregaddr);
305 writeb(IOCMD_GO, priv->cmdaddr);
306 udelay(1);
307 WaitLANCE(dev);
308
309 /* transfer data from register */
310
311 writeb(CTRL_RESET_OFF | CTRL_RW_READ | CTRL_ADR_DATA, priv->ctrladdr);
312 writeb(IOCMD_GO, priv->cmdaddr);
313 udelay(1);
314 WaitLANCE(dev);
315 res = readw(priv->ioregaddr);
316
317 /* reenable interrupts */
318
319 spin_unlock_irqrestore(&priv->lock, flags);
320
321 return res;
322}
323
324/* build up descriptors in shared RAM */
325
326static void InitDscrs(struct net_device *dev)
327{
328 skmca_priv *priv = netdev_priv(dev);
329 u32 bufaddr;
330
331 /* Set up Tx descriptors. The board has only 16K RAM so bits 16..23
332 are always 0. */
333
334 bufaddr = RAM_DATABASE;
335 {
336 LANCE_TxDescr descr;
337 int z;
338
339 for (z = 0; z < TXCOUNT; z++) {
340 descr.LowAddr = bufaddr;
341 descr.Flags = 0;
342 descr.Len = 0xf000;
343 descr.Status = 0;
344 memcpy_toio(priv->base + RAM_TXBASE +
345 (z * sizeof(LANCE_TxDescr)), &descr,
346 sizeof(LANCE_TxDescr));
347 memset_io(priv->base + bufaddr, 0, RAM_BUFSIZE);
348 bufaddr += RAM_BUFSIZE;
349 }
350 }
351
352 /* do the same for the Rx descriptors */
353
354 {
355 LANCE_RxDescr descr;
356 int z;
357
358 for (z = 0; z < RXCOUNT; z++) {
359 descr.LowAddr = bufaddr;
360 descr.Flags = RXDSCR_FLAGS_OWN;
361 descr.MaxLen = -RAM_BUFSIZE;
362 descr.Len = 0;
363 memcpy_toio(priv->base + RAM_RXBASE +
364 (z * sizeof(LANCE_RxDescr)), &descr,
365 sizeof(LANCE_RxDescr));
366 memset_io(priv->base + bufaddr, 0, RAM_BUFSIZE);
367 bufaddr += RAM_BUFSIZE;
368 }
369 }
370}
371
372/* calculate the hash bit position for a given multicast address
373 taken more or less directly from the AMD datasheet... */
374
375static void UpdateCRC(unsigned char *CRC, int bit)
376{
377 int j;
378
379 /* shift CRC one bit */
380
381 memmove(CRC + 1, CRC, 32 * sizeof(unsigned char));
382 CRC[0] = 0;
383
384 /* if bit XOR controlbit = 1, set CRC = CRC XOR polynomial */
385
386 if (bit ^ CRC[32])
387 for (j = 0; j < 32; j++)
388 CRC[j] ^= poly[j];
389}
390
391static unsigned int GetHash(char *address)
392{
393 unsigned char CRC[33];
394 int i, byte, hashcode;
395
396 /* a multicast address has bit 0 in the first byte set */
397
398 if ((address[0] & 1) == 0)
399 return -1;
400
401 /* initialize CRC */
402
403 memset(CRC, 1, sizeof(CRC));
404
405 /* loop through address bits */
406
407 for (byte = 0; byte < 6; byte++)
408 for (i = 0; i < 8; i++)
409 UpdateCRC(CRC, (address[byte] >> i) & 1);
410
411 /* hashcode is the 6 least significant bits of the CRC */
412
413 hashcode = 0;
414 for (i = 0; i < 6; i++)
415 hashcode = (hashcode << 1) + CRC[i];
416 return hashcode;
417}
418
419/* feed ready-built initialization block into LANCE */
420
421static void InitLANCE(struct net_device *dev)
422{
423 skmca_priv *priv = netdev_priv(dev);
424
425 /* build up descriptors. */
426
427 InitDscrs(dev);
428
429 /* next RX descriptor to be read is the first one. Since the LANCE
430 will start from the beginning after initialization, we have to
431 reset out pointers too. */
432
433 priv->nextrx = 0;
434
435 /* no TX descriptors active */
436
437 priv->nexttxput = priv->nexttxdone = priv->txbusy = 0;
438
439 /* set up the LANCE bus control register - constant for SKnet boards */
440
441 SetLANCE(dev, LANCE_CSR3,
442 CSR3_BSWAP_OFF | CSR3_ALE_LOW | CSR3_BCON_HOLD);
443
444 /* write address of initialization block into LANCE */
445
446 SetLANCE(dev, LANCE_CSR1, RAM_INITBASE & 0xffff);
447 SetLANCE(dev, LANCE_CSR2, (RAM_INITBASE >> 16) & 0xff);
448
449 /* we don't get ready until the LANCE has read the init block */
450
451 netif_stop_queue(dev);
452
453 /* let LANCE read the initialization block. LANCE is ready
454 when we receive the corresponding interrupt. */
455
456 SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_INIT);
457}
458
459/* stop the LANCE so we can reinitialize it */
460
461static void StopLANCE(struct net_device *dev)
462{
463 /* can't take frames any more */
464
465 netif_stop_queue(dev);
466
467 /* disable interrupts, stop it */
468
469 SetLANCE(dev, LANCE_CSR0, CSR0_STOP);
470}
471
472/* initialize card and LANCE for proper operation */
473
474static void InitBoard(struct net_device *dev)
475{
476 skmca_priv *priv = netdev_priv(dev);
477 LANCE_InitBlock block;
478
479 /* Lay out the shared RAM - first we create the init block for the LANCE.
480 We do not overwrite it later because we need it again when we switch
481 promiscous mode on/off. */
482
483 block.Mode = 0;
484 if (dev->flags & IFF_PROMISC)
485 block.Mode |= LANCE_INIT_PROM;
486 memcpy(block.PAdr, dev->dev_addr, 6);
487 memset(block.LAdrF, 0, sizeof(block.LAdrF));
488 block.RdrP = (RAM_RXBASE & 0xffffff) | (LRXCOUNT << 29);
489 block.TdrP = (RAM_TXBASE & 0xffffff) | (LTXCOUNT << 29);
490
491 memcpy_toio(priv->base + RAM_INITBASE, &block, sizeof(block));
492
493 /* initialize LANCE. Implicitly sets up other structures in RAM. */
494
495 InitLANCE(dev);
496}
497
498/* deinitialize card and LANCE */
499
500static void DeinitBoard(struct net_device *dev)
501{
502 /* stop LANCE */
503
504 StopLANCE(dev);
505
506 /* reset board */
507
508 ResetBoard(dev);
509}
510
511/* probe for device's irq */
512
513static int __init ProbeIRQ(struct net_device *dev)
514{
515 unsigned long imaskval, njiffies, irq;
516 u16 csr0val;
517
518 /* enable all interrupts */
519
520 imaskval = probe_irq_on();
521
522 /* initialize the board. Wait for interrupt 'Initialization done'. */
523
524 ResetBoard(dev);
525 InitBoard(dev);
526
527 njiffies = jiffies + HZ;
528 do {
529 csr0val = GetLANCE(dev, LANCE_CSR0);
530 }
531 while (((csr0val & CSR0_IDON) == 0) && (jiffies != njiffies));
532
533 /* turn of interrupts again */
534
535 irq = probe_irq_off(imaskval);
536
537 /* if we found something, ack the interrupt */
538
539 if (irq)
540 SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_IDON);
541
542 /* back to idle state */
543
544 DeinitBoard(dev);
545
546 return irq;
547}
548
549/* ------------------------------------------------------------------------
550 * interrupt handler(s)
551 * ------------------------------------------------------------------------ */
552
553/* LANCE has read initialization block -> start it */
554
555static u16 irqstart_handler(struct net_device *dev, u16 oldcsr0)
556{
557 /* now we're ready to transmit */
558
559 netif_wake_queue(dev);
560
561 /* reset IDON bit, start LANCE */
562
563 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_IDON | CSR0_STRT);
564 return GetLANCE(dev, LANCE_CSR0);
565}
566
567/* did we lose blocks due to a FIFO overrun ? */
568
569static u16 irqmiss_handler(struct net_device *dev, u16 oldcsr0)
570{
571 skmca_priv *priv = netdev_priv(dev);
572
573 /* update statistics */
574
575 priv->stat.rx_fifo_errors++;
576
577 /* reset MISS bit */
578
579 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_MISS);
580 return GetLANCE(dev, LANCE_CSR0);
581}
582
583/* receive interrupt */
584
585static u16 irqrx_handler(struct net_device *dev, u16 oldcsr0)
586{
587 skmca_priv *priv = netdev_priv(dev);
588 LANCE_RxDescr descr;
589 unsigned int descraddr;
590
591 /* run through queue until we reach a descriptor we do not own */
592
593 descraddr = RAM_RXBASE + (priv->nextrx * sizeof(LANCE_RxDescr));
594 while (1) {
595 /* read descriptor */
596 memcpy_fromio(&descr, priv->base + descraddr,
597 sizeof(LANCE_RxDescr));
598
599 /* if we reach a descriptor we do not own, we're done */
600 if ((descr.Flags & RXDSCR_FLAGS_OWN) != 0)
601 break;
602
603#ifdef DEBUG
604 PrTime();
605 printk("Receive packet on descr %d len %d\n", priv->nextrx,
606 descr.Len);
607#endif
608
609 /* erroneous packet ? */
610 if ((descr.Flags & RXDSCR_FLAGS_ERR) != 0) {
611 priv->stat.rx_errors++;
612 if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
613 priv->stat.rx_crc_errors++;
614 else if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
615 priv->stat.rx_frame_errors++;
616 else if ((descr.Flags & RXDSCR_FLAGS_OFLO) != 0)
617 priv->stat.rx_fifo_errors++;
618 }
619
620 /* good packet ? */
621 else {
622 struct sk_buff *skb;
623
624 skb = dev_alloc_skb(descr.Len + 2);
625 if (skb == NULL)
626 priv->stat.rx_dropped++;
627 else {
628 memcpy_fromio(skb_put(skb, descr.Len),
629 priv->base +
630 descr.LowAddr, descr.Len);
631 skb->dev = dev;
632 skb->protocol = eth_type_trans(skb, dev);
633 skb->ip_summed = CHECKSUM_NONE;
634 priv->stat.rx_packets++;
635 priv->stat.rx_bytes += descr.Len;
636 netif_rx(skb);
637 dev->last_rx = jiffies;
638 }
639 }
640
641 /* give descriptor back to LANCE */
642 descr.Len = 0;
643 descr.Flags |= RXDSCR_FLAGS_OWN;
644
645 /* update descriptor in shared RAM */
646 memcpy_toio(priv->base + descraddr, &descr,
647 sizeof(LANCE_RxDescr));
648
649 /* go to next descriptor */
650 priv->nextrx++;
651 descraddr += sizeof(LANCE_RxDescr);
652 if (priv->nextrx >= RXCOUNT) {
653 priv->nextrx = 0;
654 descraddr = RAM_RXBASE;
655 }
656 }
657
658 /* reset RINT bit */
659
660 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_RINT);
661 return GetLANCE(dev, LANCE_CSR0);
662}
663
664/* transmit interrupt */
665
666static u16 irqtx_handler(struct net_device *dev, u16 oldcsr0)
667{
668 skmca_priv *priv = netdev_priv(dev);
669 LANCE_TxDescr descr;
670 unsigned int descraddr;
671
672 /* check descriptors at most until no busy one is left */
673
674 descraddr =
675 RAM_TXBASE + (priv->nexttxdone * sizeof(LANCE_TxDescr));
676 while (priv->txbusy > 0) {
677 /* read descriptor */
678 memcpy_fromio(&descr, priv->base + descraddr,
679 sizeof(LANCE_TxDescr));
680
681 /* if the LANCE still owns this one, we've worked out all sent packets */
682 if ((descr.Flags & TXDSCR_FLAGS_OWN) != 0)
683 break;
684
685#ifdef DEBUG
686 PrTime();
687 printk("Send packet done on descr %d\n", priv->nexttxdone);
688#endif
689
690 /* update statistics */
691 if ((descr.Flags & TXDSCR_FLAGS_ERR) == 0) {
692 priv->stat.tx_packets++;
693 priv->stat.tx_bytes++;
694 } else {
695 priv->stat.tx_errors++;
696 if ((descr.Status & TXDSCR_STATUS_UFLO) != 0) {
697 priv->stat.tx_fifo_errors++;
698 InitLANCE(dev);
699 }
700 else
701 if ((descr.Status & TXDSCR_STATUS_LCOL) !=
702 0) priv->stat.tx_window_errors++;
703 else if ((descr.Status & TXDSCR_STATUS_LCAR) != 0)
704 priv->stat.tx_carrier_errors++;
705 else if ((descr.Status & TXDSCR_STATUS_RTRY) != 0)
706 priv->stat.tx_aborted_errors++;
707 }
708
709 /* go to next descriptor */
710 priv->nexttxdone++;
711 descraddr += sizeof(LANCE_TxDescr);
712 if (priv->nexttxdone >= TXCOUNT) {
713 priv->nexttxdone = 0;
714 descraddr = RAM_TXBASE;
715 }
716 priv->txbusy--;
717 }
718
719 /* reset TX interrupt bit */
720
721 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_TINT);
722 oldcsr0 = GetLANCE(dev, LANCE_CSR0);
723
724 /* at least one descriptor is freed. Therefore we can accept
725 a new one */
726 /* inform upper layers we're in business again */
727
728 netif_wake_queue(dev);
729
730 return oldcsr0;
731}
732
733/* general interrupt entry */
734
735static irqreturn_t irq_handler(int irq, void *device)
736{
737 struct net_device *dev = (struct net_device *) device;
738 u16 csr0val;
739
740 /* read CSR0 to get interrupt cause */
741
742 csr0val = GetLANCE(dev, LANCE_CSR0);
743
744 /* in case we're not meant... */
745
746 if ((csr0val & CSR0_INTR) == 0)
747 return IRQ_NONE;
748
749#if 0
750 set_bit(LINK_STATE_RXSEM, &dev->state);
751#endif
752
753 /* loop through the interrupt bits until everything is clear */
754
755 do {
756 if ((csr0val & CSR0_IDON) != 0)
757 csr0val = irqstart_handler(dev, csr0val);
758 if ((csr0val & CSR0_RINT) != 0)
759 csr0val = irqrx_handler(dev, csr0val);
760 if ((csr0val & CSR0_MISS) != 0)
761 csr0val = irqmiss_handler(dev, csr0val);
762 if ((csr0val & CSR0_TINT) != 0)
763 csr0val = irqtx_handler(dev, csr0val);
764 if ((csr0val & CSR0_MERR) != 0) {
765 SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_MERR);
766 csr0val = GetLANCE(dev, LANCE_CSR0);
767 }
768 if ((csr0val & CSR0_BABL) != 0) {
769 SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_BABL);
770 csr0val = GetLANCE(dev, LANCE_CSR0);
771 }
772 }
773 while ((csr0val & CSR0_INTR) != 0);
774
775#if 0
776 clear_bit(LINK_STATE_RXSEM, &dev->state);
777#endif
778 return IRQ_HANDLED;
779}
780
781/* ------------------------------------------------------------------------
782 * driver methods
783 * ------------------------------------------------------------------------ */
784
785/* MCA info */
786
787static int skmca_getinfo(char *buf, int slot, void *d)
788{
789 int len = 0, i;
790 struct net_device *dev = (struct net_device *) d;
791 skmca_priv *priv;
792
793 /* can't say anything about an uninitialized device... */
794
795 if (dev == NULL)
796 return len;
797 priv = netdev_priv(dev);
798
799 /* print info */
800
801 len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
802 len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
803 dev->mem_end - 1);
804 len +=
805 sprintf(buf + len, "Transceiver: %s\n",
806 MediaNames[priv->medium]);
807 len += sprintf(buf + len, "Device: %s\n", dev->name);
808 len += sprintf(buf + len, "MAC address:");
809 for (i = 0; i < 6; i++)
810 len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
811 buf[len++] = '\n';
812 buf[len] = 0;
813
814 return len;
815}
816
817/* open driver. Means also initialization and start of LANCE */
818
819static int skmca_open(struct net_device *dev)
820{
821 int result;
822 skmca_priv *priv = netdev_priv(dev);
823
824 /* register resources - only necessary for IRQ */
825 result =
826 request_irq(priv->realirq, irq_handler,
827 IRQF_SHARED | IRQF_SAMPLE_RANDOM, "sk_mca", dev);
828 if (result != 0) {
829 printk("%s: failed to register irq %d\n", dev->name,
830 dev->irq);
831 return result;
832 }
833 dev->irq = priv->realirq;
834
835 /* set up the card and LANCE */
836
837 InitBoard(dev);
838
839 /* set up flags */
840
841 netif_start_queue(dev);
842
843 return 0;
844}
845
846/* close driver. Shut down board and free allocated resources */
847
848static int skmca_close(struct net_device *dev)
849{
850 /* turn off board */
851 DeinitBoard(dev);
852
853 /* release resources */
854 if (dev->irq != 0)
855 free_irq(dev->irq, dev);
856 dev->irq = 0;
857
858 return 0;
859}
860
861/* transmit a block. */
862
863static int skmca_tx(struct sk_buff *skb, struct net_device *dev)
864{
865 skmca_priv *priv = netdev_priv(dev);
866 LANCE_TxDescr descr;
867 unsigned int address;
868 int tmplen, retval = 0;
869 unsigned long flags;
870
871 /* if we get called with a NULL descriptor, the Ethernet layer thinks
872 our card is stuck an we should reset it. We'll do this completely: */
873
874 if (skb == NULL) {
875 DeinitBoard(dev);
876 InitBoard(dev);
877 return 0; /* don't try to free the block here ;-) */
878 }
879
880 /* is there space in the Tx queue ? If no, the upper layer gave us a
881 packet in spite of us not being ready and is really in trouble.
882 We'll do the dropping for him: */
883 if (priv->txbusy >= TXCOUNT) {
884 priv->stat.tx_dropped++;
885 retval = -EIO;
886 goto tx_done;
887 }
888
889 /* get TX descriptor */
890 address = RAM_TXBASE + (priv->nexttxput * sizeof(LANCE_TxDescr));
891 memcpy_fromio(&descr, priv->base + address, sizeof(LANCE_TxDescr));
892
893 /* enter packet length as 2s complement - assure minimum length */
894 tmplen = skb->len;
895 if (tmplen < 60)
896 tmplen = 60;
897 descr.Len = 65536 - tmplen;
898
899 /* copy filler into RAM - in case we're filling up...
900 we're filling a bit more than necessary, but that doesn't harm
901 since the buffer is far larger... */
902 if (tmplen > skb->len) {
903 char *fill = "NetBSD is a nice OS too! ";
904 unsigned int destoffs = 0, l = strlen(fill);
905
906 while (destoffs < tmplen) {
907 memcpy_toio(priv->base + descr.LowAddr +
908 destoffs, fill, l);
909 destoffs += l;
910 }
911 }
912
913 /* do the real data copying */
914 memcpy_toio(priv->base + descr.LowAddr, skb->data, skb->len);
915
916 /* hand descriptor over to LANCE - this is the first and last chunk */
917 descr.Flags =
918 TXDSCR_FLAGS_OWN | TXDSCR_FLAGS_STP | TXDSCR_FLAGS_ENP;
919
920#ifdef DEBUG
921 PrTime();
922 printk("Send packet on descr %d len %d\n", priv->nexttxput,
923 skb->len);
924#endif
925
926 /* one more descriptor busy */
927
928 spin_lock_irqsave(&priv->lock, flags);
929
930 priv->nexttxput++;
931 if (priv->nexttxput >= TXCOUNT)
932 priv->nexttxput = 0;
933 priv->txbusy++;
934
935 /* are we saturated ? */
936
937 if (priv->txbusy >= TXCOUNT)
938 netif_stop_queue(dev);
939
940 /* write descriptor back to RAM */
941 memcpy_toio(priv->base + address, &descr, sizeof(LANCE_TxDescr));
942
943 /* if no descriptors were active, give the LANCE a hint to read it
944 immediately */
945
946 if (priv->txbusy == 0)
947 SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_TDMD);
948
949 spin_unlock_irqrestore(&priv->lock, flags);
950
951 tx_done:
952
953 dev_kfree_skb(skb);
954
955 return retval;
956}
957
958/* return pointer to Ethernet statistics */
959
960static struct net_device_stats *skmca_stats(struct net_device *dev)
961{
962 skmca_priv *priv = netdev_priv(dev);
963
964 return &(priv->stat);
965}
966
967/* switch receiver mode. We use the LANCE's multicast filter to prefilter
968 multicast addresses. */
969
970static void skmca_set_multicast_list(struct net_device *dev)
971{
972 skmca_priv *priv = netdev_priv(dev);
973 LANCE_InitBlock block;
974
975 /* first stop the LANCE... */
976 StopLANCE(dev);
977
978 /* ...then modify the initialization block... */
979 memcpy_fromio(&block, priv->base + RAM_INITBASE, sizeof(block));
980 if (dev->flags & IFF_PROMISC)
981 block.Mode |= LANCE_INIT_PROM;
982 else
983 block.Mode &= ~LANCE_INIT_PROM;
984
985 if (dev->flags & IFF_ALLMULTI) { /* get all multicasts */
986 memset(block.LAdrF, 0xff, sizeof(block.LAdrF));
987 } else { /* get selected/no multicasts */
988
989 struct dev_mc_list *mptr;
990 int code;
991
992 memset(block.LAdrF, 0, sizeof(block.LAdrF));
993 for (mptr = dev->mc_list; mptr != NULL; mptr = mptr->next) {
994 code = GetHash(mptr->dmi_addr);
995 block.LAdrF[(code >> 3) & 7] |= 1 << (code & 7);
996 }
997 }
998
999 memcpy_toio(priv->base + RAM_INITBASE, &block, sizeof(block));
1000
1001 /* ...then reinit LANCE with the correct flags */
1002 InitLANCE(dev);
1003}
1004
1005/* ------------------------------------------------------------------------
1006 * hardware check
1007 * ------------------------------------------------------------------------ */
1008
1009static int startslot; /* counts through slots when probing multiple devices */
1010
1011static void cleanup_card(struct net_device *dev)
1012{
1013 skmca_priv *priv = netdev_priv(dev);
1014 DeinitBoard(dev);
1015 if (dev->irq != 0)
1016 free_irq(dev->irq, dev);
1017 iounmap(priv->base);
1018 mca_mark_as_unused(priv->slot);
1019 mca_set_adapter_procfn(priv->slot, NULL, NULL);
1020}
1021
1022struct net_device * __init skmca_probe(int unit)
1023{
1024 struct net_device *dev;
1025 int force_detect = 0;
1026 int junior, slot, i;
1027 int base = 0, irq = 0;
1028 skmca_priv *priv;
1029 skmca_medium medium;
1030 int err;
1031
1032 /* can't work without an MCA bus ;-) */
1033
1034 if (MCA_bus == 0)
1035 return ERR_PTR(-ENODEV);
1036
1037 dev = alloc_etherdev(sizeof(skmca_priv));
1038 if (!dev)
1039 return ERR_PTR(-ENOMEM);
1040
1041 if (unit >= 0) {
1042 sprintf(dev->name, "eth%d", unit);
1043 netdev_boot_setup_check(dev);
1044 }
1045
1046 SET_MODULE_OWNER(dev);
1047
1048 /* start address of 1 --> forced detection */
1049
1050 if (dev->mem_start == 1)
1051 force_detect = 1;
1052
1053 /* search through slots */
1054
1055 base = dev->mem_start;
1056 irq = dev->base_addr;
1057 for (slot = startslot; (slot = dofind(&junior, slot)) != -1; slot++) {
1058 /* deduce card addresses */
1059
1060 getaddrs(slot, junior, &base, &irq, &medium);
1061
1062 /* slot already in use ? */
1063
1064 if (mca_is_adapter_used(slot))
1065 continue;
1066
1067 /* were we looking for something different ? */
1068
1069 if (dev->irq && dev->irq != irq)
1070 continue;
1071 if (dev->mem_start && dev->mem_start != base)
1072 continue;
1073
1074 /* found something that matches */
1075
1076 break;
1077 }
1078
1079 /* nothing found ? */
1080
1081 if (slot == -1) {
1082 free_netdev(dev);
1083 return (base || irq) ? ERR_PTR(-ENXIO) : ERR_PTR(-ENODEV);
1084 }
1085
1086 /* make procfs entries */
1087
1088 if (junior)
1089 mca_set_adapter_name(slot,
1090 "SKNET junior MC2 Ethernet Adapter");
1091 else
1092 mca_set_adapter_name(slot, "SKNET MC2+ Ethernet Adapter");
1093 mca_set_adapter_procfn(slot, (MCA_ProcFn) skmca_getinfo, dev);
1094
1095 mca_mark_as_used(slot);
1096
1097 /* announce success */
1098 printk("%s: SKNet %s adapter found in slot %d\n", dev->name,
1099 junior ? "Junior MC2" : "MC2+", slot + 1);
1100
1101 priv = netdev_priv(dev);
1102 priv->base = ioremap(base, 0x4000);
1103 if (!priv->base) {
1104 mca_set_adapter_procfn(slot, NULL, NULL);
1105 mca_mark_as_unused(slot);
1106 free_netdev(dev);
1107 return ERR_PTR(-ENOMEM);
1108 }
1109
1110 priv->slot = slot;
1111 priv->macbase = priv->base + 0x3fc0;
1112 priv->ioregaddr = priv->base + 0x3ff0;
1113 priv->ctrladdr = priv->base + 0x3ff2;
1114 priv->cmdaddr = priv->base + 0x3ff3;
1115 priv->medium = medium;
1116 memset(&priv->stat, 0, sizeof(struct net_device_stats));
1117 spin_lock_init(&priv->lock);
1118
1119 /* set base + irq for this device (irq not allocated so far) */
1120 dev->irq = 0;
1121 dev->mem_start = base;
1122 dev->mem_end = base + 0x4000;
1123
1124 /* autoprobe ? */
1125 if (irq < 0) {
1126 int nirq;
1127
1128 printk
1129 ("%s: ambigous POS bit combination, must probe for IRQ...\n",
1130 dev->name);
1131 nirq = ProbeIRQ(dev);
1132 if (nirq <= 0)
1133 printk("%s: IRQ probe failed, assuming IRQ %d",
1134 dev->name, priv->realirq = -irq);
1135 else
1136 priv->realirq = nirq;
1137 } else
1138 priv->realirq = irq;
1139
1140 /* set methods */
1141 dev->open = skmca_open;
1142 dev->stop = skmca_close;
1143 dev->hard_start_xmit = skmca_tx;
1144 dev->do_ioctl = NULL;
1145 dev->get_stats = skmca_stats;
1146 dev->set_multicast_list = skmca_set_multicast_list;
1147 dev->flags |= IFF_MULTICAST;
1148
1149 /* copy out MAC address */
1150 for (i = 0; i < 6; i++)
1151 dev->dev_addr[i] = readb(priv->macbase + (i << 1));
1152
1153 /* print config */
1154 printk("%s: IRQ %d, memory %#lx-%#lx, "
1155 "MAC address %02x:%02x:%02x:%02x:%02x:%02x.\n",
1156 dev->name, priv->realirq, dev->mem_start, dev->mem_end - 1,
1157 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1158 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1159 printk("%s: %s medium\n", dev->name, MediaNames[priv->medium]);
1160
1161 /* reset board */
1162
1163 ResetBoard(dev);
1164
1165 startslot = slot + 1;
1166
1167 err = register_netdev(dev);
1168 if (err) {
1169 cleanup_card(dev);
1170 free_netdev(dev);
1171 dev = ERR_PTR(err);
1172 }
1173 return dev;
1174}
1175
1176/* ------------------------------------------------------------------------
1177 * modularization support
1178 * ------------------------------------------------------------------------ */
1179
1180#ifdef MODULE
1181MODULE_LICENSE("GPL");
1182
1183#define DEVMAX 5
1184
1185static struct net_device *moddevs[DEVMAX];
1186
1187int init_module(void)
1188{
1189 int z;
1190
1191 startslot = 0;
1192 for (z = 0; z < DEVMAX; z++) {
1193 struct net_device *dev = skmca_probe(-1);
1194 if (IS_ERR(dev))
1195 break;
1196 moddevs[z] = dev;
1197 }
1198 if (!z)
1199 return -EIO;
1200 return 0;
1201}
1202
1203void cleanup_module(void)
1204{
1205 int z;
1206
1207 for (z = 0; z < DEVMAX; z++) {
1208 struct net_device *dev = moddevs[z];
1209 if (dev) {
1210 unregister_netdev(dev);
1211 cleanup_card(dev);
1212 free_netdev(dev);
1213 }
1214 }
1215}
1216#endif /* MODULE */
diff --git a/drivers/net/sk_mca.h b/drivers/net/sk_mca.h
deleted file mode 100644
index 0dae056fed99..000000000000
--- a/drivers/net/sk_mca.h
+++ /dev/null
@@ -1,170 +0,0 @@
1#ifndef _SK_MCA_INCLUDE_
2#define _SK_MCA_INCLUDE_
3
4#ifdef _SK_MCA_DRIVER_
5
6/* Adapter ID's */
7#define SKNET_MCA_ID 0x6afd
8#define SKNET_JUNIOR_MCA_ID 0x6be9
9
10/* media enumeration - defined in a way that it fits onto the MC2+'s
11 POS registers... */
12
13typedef enum { Media_10Base2, Media_10BaseT,
14 Media_10Base5, Media_Unknown, Media_Count
15} skmca_medium;
16
17/* private structure */
18typedef struct {
19 unsigned int slot; /* MCA-Slot-# */
20 void __iomem *base;
21 void __iomem *macbase; /* base address of MAC address PROM */
22 void __iomem *ioregaddr;/* address of I/O-register (Lo) */
23 void __iomem *ctrladdr; /* address of control/stat register */
24 void __iomem *cmdaddr; /* address of I/O-command register */
25 int nextrx; /* index of next RX descriptor to
26 be read */
27 int nexttxput; /* index of next free TX descriptor */
28 int nexttxdone; /* index of next TX descriptor to
29 be finished */
30 int txbusy; /* # of busy TX descriptors */
31 struct net_device_stats stat; /* packet statistics */
32 int realirq; /* memorizes actual IRQ, even when
33 currently not allocated */
34 skmca_medium medium; /* physical cannector */
35 spinlock_t lock;
36} skmca_priv;
37
38/* card registers: control/status register bits */
39
40#define CTRL_ADR_DATA 0 /* Bit 0 = 0 ->access data register */
41#define CTRL_ADR_RAP 1 /* Bit 0 = 1 ->access RAP register */
42#define CTRL_RW_WRITE 0 /* Bit 1 = 0 ->write register */
43#define CTRL_RW_READ 2 /* Bit 1 = 1 ->read register */
44#define CTRL_RESET_ON 0 /* Bit 3 = 0 ->reset board */
45#define CTRL_RESET_OFF 8 /* Bit 3 = 1 ->no reset of board */
46
47#define STAT_ADR_DATA 0 /* Bit 0 of ctrl register read back */
48#define STAT_ADR_RAP 1
49#define STAT_RW_WRITE 0 /* Bit 1 of ctrl register read back */
50#define STAT_RW_READ 2
51#define STAT_RESET_ON 0 /* Bit 3 of ctrl register read back */
52#define STAT_RESET_OFF 8
53#define STAT_IRQ_ACT 0 /* interrupt pending */
54#define STAT_IRQ_NOACT 16 /* no interrupt pending */
55#define STAT_IO_NOBUSY 0 /* no transfer busy */
56#define STAT_IO_BUSY 32 /* transfer busy */
57
58/* I/O command register bits */
59
60#define IOCMD_GO 128 /* Bit 7 = 1 -> start register xfer */
61
62/* LANCE registers */
63
64#define LANCE_CSR0 0 /* Status/Control */
65
66#define CSR0_ERR 0x8000 /* general error flag */
67#define CSR0_BABL 0x4000 /* transmitter timeout */
68#define CSR0_CERR 0x2000 /* collision error */
69#define CSR0_MISS 0x1000 /* lost Rx block */
70#define CSR0_MERR 0x0800 /* memory access error */
71#define CSR0_RINT 0x0400 /* receiver interrupt */
72#define CSR0_TINT 0x0200 /* transmitter interrupt */
73#define CSR0_IDON 0x0100 /* initialization done */
74#define CSR0_INTR 0x0080 /* general interrupt flag */
75#define CSR0_INEA 0x0040 /* interrupt enable */
76#define CSR0_RXON 0x0020 /* receiver enabled */
77#define CSR0_TXON 0x0010 /* transmitter enabled */
78#define CSR0_TDMD 0x0008 /* force transmission now */
79#define CSR0_STOP 0x0004 /* stop LANCE */
80#define CSR0_STRT 0x0002 /* start LANCE */
81#define CSR0_INIT 0x0001 /* read initialization block */
82
83#define LANCE_CSR1 1 /* addr bit 0..15 of initialization */
84#define LANCE_CSR2 2 /* 16..23 block */
85
86#define LANCE_CSR3 3 /* Bus control */
87#define CSR3_BCON_HOLD 0 /* Bit 0 = 0 -> BM1,BM0,HOLD */
88#define CSR3_BCON_BUSRQ 1 /* Bit 0 = 1 -> BUSAK0,BYTE,BUSRQ */
89#define CSR3_ALE_HIGH 0 /* Bit 1 = 0 -> ALE asserted high */
90#define CSR3_ALE_LOW 2 /* Bit 1 = 1 -> ALE asserted low */
91#define CSR3_BSWAP_OFF 0 /* Bit 2 = 0 -> no byte swap */
92#define CSR3_BSWAP_ON 4 /* Bit 2 = 1 -> byte swap */
93
94/* LANCE structures */
95
96typedef struct { /* LANCE initialization block */
97 u16 Mode; /* mode flags */
98 u8 PAdr[6]; /* MAC address */
99 u8 LAdrF[8]; /* Multicast filter */
100 u32 RdrP; /* Receive descriptor */
101 u32 TdrP; /* Transmit descriptor */
102} LANCE_InitBlock;
103
104/* Mode flags init block */
105
106#define LANCE_INIT_PROM 0x8000 /* enable promiscous mode */
107#define LANCE_INIT_INTL 0x0040 /* internal loopback */
108#define LANCE_INIT_DRTY 0x0020 /* disable retry */
109#define LANCE_INIT_COLL 0x0010 /* force collision */
110#define LANCE_INIT_DTCR 0x0008 /* disable transmit CRC */
111#define LANCE_INIT_LOOP 0x0004 /* loopback */
112#define LANCE_INIT_DTX 0x0002 /* disable transmitter */
113#define LANCE_INIT_DRX 0x0001 /* disable receiver */
114
115typedef struct { /* LANCE Tx descriptor */
116 u16 LowAddr; /* bit 0..15 of address */
117 u16 Flags; /* bit 16..23 of address + Flags */
118 u16 Len; /* 2s complement of packet length */
119 u16 Status; /* Result of transmission */
120} LANCE_TxDescr;
121
122#define TXDSCR_FLAGS_OWN 0x8000 /* LANCE owns descriptor */
123#define TXDSCR_FLAGS_ERR 0x4000 /* summary error flag */
124#define TXDSCR_FLAGS_MORE 0x1000 /* more than one retry needed? */
125#define TXDSCR_FLAGS_ONE 0x0800 /* one retry? */
126#define TXDSCR_FLAGS_DEF 0x0400 /* transmission deferred? */
127#define TXDSCR_FLAGS_STP 0x0200 /* first packet in chain? */
128#define TXDSCR_FLAGS_ENP 0x0100 /* last packet in chain? */
129
130#define TXDSCR_STATUS_BUFF 0x8000 /* buffer error? */
131#define TXDSCR_STATUS_UFLO 0x4000 /* silo underflow during transmit? */
132#define TXDSCR_STATUS_LCOL 0x1000 /* late collision? */
133#define TXDSCR_STATUS_LCAR 0x0800 /* loss of carrier? */
134#define TXDSCR_STATUS_RTRY 0x0400 /* retry error? */
135
136typedef struct { /* LANCE Rx descriptor */
137 u16 LowAddr; /* bit 0..15 of address */
138 u16 Flags; /* bit 16..23 of address + Flags */
139 u16 MaxLen; /* 2s complement of buffer length */
140 u16 Len; /* packet length */
141} LANCE_RxDescr;
142
143#define RXDSCR_FLAGS_OWN 0x8000 /* LANCE owns descriptor */
144#define RXDSCR_FLAGS_ERR 0x4000 /* summary error flag */
145#define RXDSCR_FLAGS_FRAM 0x2000 /* framing error flag */
146#define RXDSCR_FLAGS_OFLO 0x1000 /* FIFO overflow? */
147#define RXDSCR_FLAGS_CRC 0x0800 /* CRC error? */
148#define RXDSCR_FLAGS_BUFF 0x0400 /* buffer error? */
149#define RXDSCR_FLAGS_STP 0x0200 /* first packet in chain? */
150#define RXDCSR_FLAGS_ENP 0x0100 /* last packet in chain? */
151
152/* RAM layout */
153
154#define TXCOUNT 4 /* length of TX descriptor queue */
155#define LTXCOUNT 2 /* log2 of it */
156#define RXCOUNT 4 /* length of RX descriptor queue */
157#define LRXCOUNT 2 /* log2 of it */
158
159#define RAM_INITBASE 0 /* LANCE init block */
160#define RAM_TXBASE 24 /* Start of TX descriptor queue */
161#define RAM_RXBASE \
162(RAM_TXBASE + (TXCOUNT * 8)) /* Start of RX descriptor queue */
163#define RAM_DATABASE \
164(RAM_RXBASE + (RXCOUNT * 8)) /* Start of data area for frames */
165#define RAM_BUFSIZE 1580 /* max. frame size - should never be
166 reached */
167
168#endif /* _SK_MCA_DRIVER_ */
169
170#endif /* _SK_MCA_INCLUDE_ */
diff --git a/drivers/net/skfp/can.c b/drivers/net/skfp/can.c
deleted file mode 100644
index 8a49abce7961..000000000000
--- a/drivers/net/skfp/can.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/******************************************************************************
2 *
3 * (C)Copyright 1998,1999 SysKonnect,
4 * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
5 *
6 * See the file "skfddi.c" for further information.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * The information in this file is provided "AS IS" without warranty.
14 *
15 ******************************************************************************/
16
17#ifndef lint
18static const char xID_sccs[] = "@(#)can.c 1.5 97/04/07 (C) SK " ;
19#endif
20
21/*
22 * canonical bit order
23 */
24const u_char canonical[256] = {
25 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0,
26 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0,
27 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8,
28 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8,
29 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4,
30 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4,
31 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec,
32 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc,
33 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2,
34 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2,
35 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea,
36 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa,
37 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6,
38 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6,
39 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee,
40 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe,
41 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1,
42 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1,
43 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9,
44 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9,
45 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5,
46 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5,
47 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed,
48 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd,
49 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3,
50 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3,
51 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb,
52 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb,
53 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7,
54 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7,
55 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef,
56 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
57} ;
58
59#ifdef MAKE_TABLE
60int byte_reverse(x)
61int x ;
62{
63 int y = 0 ;
64
65 if (x & 0x01)
66 y |= 0x80 ;
67 if (x & 0x02)
68 y |= 0x40 ;
69 if (x & 0x04)
70 y |= 0x20 ;
71 if (x & 0x08)
72 y |= 0x10 ;
73 if (x & 0x10)
74 y |= 0x08 ;
75 if (x & 0x20)
76 y |= 0x04 ;
77 if (x & 0x40)
78 y |= 0x02 ;
79 if (x & 0x80)
80 y |= 0x01 ;
81 return(y) ;
82}
83#endif
diff --git a/drivers/net/skfp/drvfbi.c b/drivers/net/skfp/drvfbi.c
index 5b475833f645..4fe624b0dd25 100644
--- a/drivers/net/skfp/drvfbi.c
+++ b/drivers/net/skfp/drvfbi.c
@@ -23,6 +23,7 @@
23#include "h/smc.h" 23#include "h/smc.h"
24#include "h/supern_2.h" 24#include "h/supern_2.h"
25#include "h/skfbiinc.h" 25#include "h/skfbiinc.h"
26#include <linux/bitrev.h>
26 27
27#ifndef lint 28#ifndef lint
28static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ; 29static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
@@ -445,16 +446,14 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
445 char PmdType ; 446 char PmdType ;
446 int i ; 447 int i ;
447 448
448 extern const u_char canonical[256] ;
449
450#if (defined(ISA) || defined(MCA)) 449#if (defined(ISA) || defined(MCA))
451 for (i = 0; i < 4 ;i++) { /* read mac address from board */ 450 for (i = 0; i < 4 ;i++) { /* read mac address from board */
452 smc->hw.fddi_phys_addr.a[i] = 451 smc->hw.fddi_phys_addr.a[i] =
453 canonical[(inpw(PR_A(i+SA_MAC))&0xff)] ; 452 bitrev8(inpw(PR_A(i+SA_MAC)));
454 } 453 }
455 for (i = 4; i < 6; i++) { 454 for (i = 4; i < 6; i++) {
456 smc->hw.fddi_phys_addr.a[i] = 455 smc->hw.fddi_phys_addr.a[i] =
457 canonical[(inpw(PR_A(i+SA_MAC+PRA_OFF))&0xff)] ; 456 bitrev8(inpw(PR_A(i+SA_MAC+PRA_OFF)));
458 } 457 }
459#endif 458#endif
460#ifdef EISA 459#ifdef EISA
@@ -464,17 +463,17 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
464 */ 463 */
465 for (i = 0; i < 4 ;i++) { /* read mac address from board */ 464 for (i = 0; i < 4 ;i++) { /* read mac address from board */
466 smc->hw.fddi_phys_addr.a[i] = 465 smc->hw.fddi_phys_addr.a[i] =
467 canonical[inp(PR_A(i+SA_MAC))] ; 466 bitrev8(inp(PR_A(i+SA_MAC)));
468 } 467 }
469 for (i = 4; i < 6; i++) { 468 for (i = 4; i < 6; i++) {
470 smc->hw.fddi_phys_addr.a[i] = 469 smc->hw.fddi_phys_addr.a[i] =
471 canonical[inp(PR_A(i+SA_MAC+PRA_OFF))] ; 470 bitrev8(inp(PR_A(i+SA_MAC+PRA_OFF)));
472 } 471 }
473#endif 472#endif
474#ifdef PCI 473#ifdef PCI
475 for (i = 0; i < 6; i++) { /* read mac address from board */ 474 for (i = 0; i < 6; i++) { /* read mac address from board */
476 smc->hw.fddi_phys_addr.a[i] = 475 smc->hw.fddi_phys_addr.a[i] =
477 canonical[inp(ADDR(B2_MAC_0+i))] ; 476 bitrev8(inp(ADDR(B2_MAC_0+i)));
478 } 477 }
479#endif 478#endif
480#ifndef PCI 479#ifndef PCI
@@ -493,7 +492,7 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
493 if (mac_addr) { 492 if (mac_addr) {
494 for (i = 0; i < 6 ;i++) { 493 for (i = 0; i < 6 ;i++) {
495 smc->hw.fddi_canon_addr.a[i] = mac_addr[i] ; 494 smc->hw.fddi_canon_addr.a[i] = mac_addr[i] ;
496 smc->hw.fddi_home_addr.a[i] = canonical[mac_addr[i]] ; 495 smc->hw.fddi_home_addr.a[i] = bitrev8(mac_addr[i]);
497 } 496 }
498 return ; 497 return ;
499 } 498 }
@@ -501,7 +500,7 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
501 500
502 for (i = 0; i < 6 ;i++) { 501 for (i = 0; i < 6 ;i++) {
503 smc->hw.fddi_canon_addr.a[i] = 502 smc->hw.fddi_canon_addr.a[i] =
504 canonical[smc->hw.fddi_phys_addr.a[i]] ; 503 bitrev8(smc->hw.fddi_phys_addr.a[i]);
505 } 504 }
506} 505}
507 506
@@ -1269,11 +1268,8 @@ void driver_get_bia(struct s_smc *smc, struct fddi_addr *bia_addr)
1269{ 1268{
1270 int i ; 1269 int i ;
1271 1270
1272 extern const u_char canonical[256] ; 1271 for (i = 0 ; i < 6 ; i++)
1273 1272 bia_addr->a[i] = bitrev8(smc->hw.fddi_phys_addr.a[i]);
1274 for (i = 0 ; i < 6 ; i++) {
1275 bia_addr->a[i] = canonical[smc->hw.fddi_phys_addr.a[i]] ;
1276 }
1277} 1273}
1278 1274
1279void smt_start_watchdog(struct s_smc *smc) 1275void smt_start_watchdog(struct s_smc *smc)
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 0784f558ca9a..a45205da8033 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -22,7 +22,7 @@
22#include "h/fddi.h" 22#include "h/fddi.h"
23#include "h/smc.h" 23#include "h/smc.h"
24#include "h/supern_2.h" 24#include "h/supern_2.h"
25#include "can.c" 25#include <linux/bitrev.h>
26 26
27#ifndef lint 27#ifndef lint
28static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ; 28static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ;
@@ -1073,7 +1073,7 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
1073 if (can) { 1073 if (can) {
1074 p = own->a ; 1074 p = own->a ;
1075 for (i = 0 ; i < 6 ; i++, p++) 1075 for (i = 0 ; i < 6 ; i++, p++)
1076 *p = canonical[*p] ; 1076 *p = bitrev8(*p);
1077 } 1077 }
1078 slot = NULL; 1078 slot = NULL;
1079 for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){ 1079 for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 99a776a51fb5..fe847800acdc 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -18,6 +18,7 @@
18#include "h/fddi.h" 18#include "h/fddi.h"
19#include "h/smc.h" 19#include "h/smc.h"
20#include "h/smt_p.h" 20#include "h/smt_p.h"
21#include <linux/bitrev.h>
21 22
22#define KERNEL 23#define KERNEL
23#include "h/smtstate.h" 24#include "h/smtstate.h"
@@ -26,8 +27,6 @@
26static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ; 27static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ;
27#endif 28#endif
28 29
29extern const u_char canonical[256] ;
30
31/* 30/*
32 * FC in SMbuf 31 * FC in SMbuf
33 */ 32 */
@@ -180,7 +179,7 @@ void smt_agent_init(struct s_smc *smc)
180 driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ; 179 driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ;
181 for (i = 0 ; i < 6 ; i ++) { 180 for (i = 0 ; i < 6 ; i ++) {
182 smc->mib.fddiSMTStationId.sid_node.a[i] = 181 smc->mib.fddiSMTStationId.sid_node.a[i] =
183 canonical[smc->mib.fddiSMTStationId.sid_node.a[i]] ; 182 bitrev8(smc->mib.fddiSMTStationId.sid_node.a[i]);
184 } 183 }
185 smc->mib.fddiSMTManufacturerData[0] = 184 smc->mib.fddiSMTManufacturerData[0] =
186 smc->mib.fddiSMTStationId.sid_node.a[0] ; 185 smc->mib.fddiSMTStationId.sid_node.a[0] ;
@@ -2049,9 +2048,8 @@ static void hwm_conv_can(struct s_smc *smc, char *data, int len)
2049 2048
2050 SK_UNUSED(smc) ; 2049 SK_UNUSED(smc) ;
2051 2050
2052 for (i = len; i ; i--, data++) { 2051 for (i = len; i ; i--, data++)
2053 *data = canonical[*(u_char *)data] ; 2052 *data = bitrev8(*data);
2054 }
2055} 2053}
2056#endif 2054#endif
2057 2055
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 45283f3f95e4..e482e7fcbb2b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "1.9" 45#define DRV_VERSION "1.10"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -132,18 +132,93 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
132} 132}
133 133
134/* Wake on Lan only supported on Yukon chips with rev 1 or above */ 134/* Wake on Lan only supported on Yukon chips with rev 1 or above */
135static int wol_supported(const struct skge_hw *hw) 135static u32 wol_supported(const struct skge_hw *hw)
136{ 136{
137 return !((hw->chip_id == CHIP_ID_GENESIS || 137 if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev != 0)
138 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0))); 138 return WAKE_MAGIC | WAKE_PHY;
139 else
140 return 0;
141}
142
143static u32 pci_wake_enabled(struct pci_dev *dev)
144{
145 int pm = pci_find_capability(dev, PCI_CAP_ID_PM);
146 u16 value;
147
148 /* If device doesn't support PM Capabilities, but request is to disable
149 * wake events, it's a nop; otherwise fail */
150 if (!pm)
151 return 0;
152
153 pci_read_config_word(dev, pm + PCI_PM_PMC, &value);
154
155 value &= PCI_PM_CAP_PME_MASK;
156 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */
157
158 return value != 0;
159}
160
161static void skge_wol_init(struct skge_port *skge)
162{
163 struct skge_hw *hw = skge->hw;
164 int port = skge->port;
165 enum pause_control save_mode;
166 u32 ctrl;
167
168 /* Bring hardware out of reset */
169 skge_write16(hw, B0_CTST, CS_RST_CLR);
170 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
171
172 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
173 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
174
175 /* Force to 10/100 skge_reset will re-enable on resume */
176 save_mode = skge->flow_control;
177 skge->flow_control = FLOW_MODE_SYMMETRIC;
178
179 ctrl = skge->advertising;
180 skge->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
181
182 skge_phy_reset(skge);
183
184 skge->flow_control = save_mode;
185 skge->advertising = ctrl;
186
187 /* Set GMAC to no flow control and auto update for speed/duplex */
188 gma_write16(hw, port, GM_GP_CTRL,
189 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
190 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
191
192 /* Set WOL address */
193 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
194 skge->netdev->dev_addr, ETH_ALEN);
195
196 /* Turn on appropriate WOL control bits */
197 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
198 ctrl = 0;
199 if (skge->wol & WAKE_PHY)
200 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
201 else
202 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
203
204 if (skge->wol & WAKE_MAGIC)
205 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
206 else
207 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
208
209 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
210 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
211
212 /* block receiver */
213 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
139} 214}
140 215
141static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 216static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
142{ 217{
143 struct skge_port *skge = netdev_priv(dev); 218 struct skge_port *skge = netdev_priv(dev);
144 219
145 wol->supported = wol_supported(skge->hw) ? WAKE_MAGIC : 0; 220 wol->supported = wol_supported(skge->hw);
146 wol->wolopts = skge->wol ? WAKE_MAGIC : 0; 221 wol->wolopts = skge->wol;
147} 222}
148 223
149static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 224static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -151,23 +226,12 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
151 struct skge_port *skge = netdev_priv(dev); 226 struct skge_port *skge = netdev_priv(dev);
152 struct skge_hw *hw = skge->hw; 227 struct skge_hw *hw = skge->hw;
153 228
154 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) 229 if (wol->wolopts & wol_supported(hw))
155 return -EOPNOTSUPP; 230 return -EOPNOTSUPP;
156 231
157 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw)) 232 skge->wol = wol->wolopts;
158 return -EOPNOTSUPP; 233 if (!netif_running(dev))
159 234 skge_wol_init(skge);
160 skge->wol = wol->wolopts == WAKE_MAGIC;
161
162 if (skge->wol) {
163 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
164
165 skge_write16(hw, WOL_CTRL_STAT,
166 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
167 WOL_CTL_ENA_MAGIC_PKT_UNIT);
168 } else
169 skge_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
170
171 return 0; 235 return 0;
172} 236}
173 237
@@ -2373,6 +2437,9 @@ static int skge_up(struct net_device *dev)
2373 size_t rx_size, tx_size; 2437 size_t rx_size, tx_size;
2374 int err; 2438 int err;
2375 2439
2440 if (!is_valid_ether_addr(dev->dev_addr))
2441 return -EINVAL;
2442
2376 if (netif_msg_ifup(skge)) 2443 if (netif_msg_ifup(skge))
2377 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 2444 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2378 2445
@@ -2392,7 +2459,7 @@ static int skge_up(struct net_device *dev)
2392 BUG_ON(skge->dma & 7); 2459 BUG_ON(skge->dma & 7);
2393 2460
2394 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { 2461 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
2395 printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n"); 2462 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
2396 err = -EINVAL; 2463 err = -EINVAL;
2397 goto free_pci_mem; 2464 goto free_pci_mem;
2398 } 2465 }
@@ -3001,6 +3068,7 @@ static void skge_mac_intr(struct skge_hw *hw, int port)
3001/* Handle device specific framing and timeout interrupts */ 3068/* Handle device specific framing and timeout interrupts */
3002static void skge_error_irq(struct skge_hw *hw) 3069static void skge_error_irq(struct skge_hw *hw)
3003{ 3070{
3071 struct pci_dev *pdev = hw->pdev;
3004 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3072 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
3005 3073
3006 if (hw->chip_id == CHIP_ID_GENESIS) { 3074 if (hw->chip_id == CHIP_ID_GENESIS) {
@@ -3016,12 +3084,12 @@ static void skge_error_irq(struct skge_hw *hw)
3016 } 3084 }
3017 3085
3018 if (hwstatus & IS_RAM_RD_PAR) { 3086 if (hwstatus & IS_RAM_RD_PAR) {
3019 printk(KERN_ERR PFX "Ram read data parity error\n"); 3087 dev_err(&pdev->dev, "Ram read data parity error\n");
3020 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); 3088 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
3021 } 3089 }
3022 3090
3023 if (hwstatus & IS_RAM_WR_PAR) { 3091 if (hwstatus & IS_RAM_WR_PAR) {
3024 printk(KERN_ERR PFX "Ram write data parity error\n"); 3092 dev_err(&pdev->dev, "Ram write data parity error\n");
3025 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); 3093 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
3026 } 3094 }
3027 3095
@@ -3032,38 +3100,38 @@ static void skge_error_irq(struct skge_hw *hw)
3032 skge_mac_parity(hw, 1); 3100 skge_mac_parity(hw, 1);
3033 3101
3034 if (hwstatus & IS_R1_PAR_ERR) { 3102 if (hwstatus & IS_R1_PAR_ERR) {
3035 printk(KERN_ERR PFX "%s: receive queue parity error\n", 3103 dev_err(&pdev->dev, "%s: receive queue parity error\n",
3036 hw->dev[0]->name); 3104 hw->dev[0]->name);
3037 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); 3105 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
3038 } 3106 }
3039 3107
3040 if (hwstatus & IS_R2_PAR_ERR) { 3108 if (hwstatus & IS_R2_PAR_ERR) {
3041 printk(KERN_ERR PFX "%s: receive queue parity error\n", 3109 dev_err(&pdev->dev, "%s: receive queue parity error\n",
3042 hw->dev[1]->name); 3110 hw->dev[1]->name);
3043 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); 3111 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
3044 } 3112 }
3045 3113
3046 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { 3114 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
3047 u16 pci_status, pci_cmd; 3115 u16 pci_status, pci_cmd;
3048 3116
3049 pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd); 3117 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
3050 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); 3118 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
3051 3119
3052 printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n", 3120 dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n",
3053 pci_name(hw->pdev), pci_cmd, pci_status); 3121 pci_cmd, pci_status);
3054 3122
3055 /* Write the error bits back to clear them. */ 3123 /* Write the error bits back to clear them. */
3056 pci_status &= PCI_STATUS_ERROR_BITS; 3124 pci_status &= PCI_STATUS_ERROR_BITS;
3057 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3125 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3058 pci_write_config_word(hw->pdev, PCI_COMMAND, 3126 pci_write_config_word(pdev, PCI_COMMAND,
3059 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); 3127 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
3060 pci_write_config_word(hw->pdev, PCI_STATUS, pci_status); 3128 pci_write_config_word(pdev, PCI_STATUS, pci_status);
3061 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3129 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3062 3130
3063 /* if error still set then just ignore it */ 3131 /* if error still set then just ignore it */
3064 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3132 hwstatus = skge_read32(hw, B0_HWE_ISRC);
3065 if (hwstatus & IS_IRQ_STAT) { 3133 if (hwstatus & IS_IRQ_STAT) {
3066 printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n"); 3134 dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n");
3067 hw->intr_mask &= ~IS_HW_ERR; 3135 hw->intr_mask &= ~IS_HW_ERR;
3068 } 3136 }
3069 } 3137 }
@@ -3277,8 +3345,8 @@ static int skge_reset(struct skge_hw *hw)
3277 hw->phy_addr = PHY_ADDR_BCOM; 3345 hw->phy_addr = PHY_ADDR_BCOM;
3278 break; 3346 break;
3279 default: 3347 default:
3280 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n", 3348 dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n",
3281 pci_name(hw->pdev), hw->phy_type); 3349 hw->phy_type);
3282 return -EOPNOTSUPP; 3350 return -EOPNOTSUPP;
3283 } 3351 }
3284 break; 3352 break;
@@ -3293,8 +3361,8 @@ static int skge_reset(struct skge_hw *hw)
3293 break; 3361 break;
3294 3362
3295 default: 3363 default:
3296 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n", 3364 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
3297 pci_name(hw->pdev), hw->chip_id); 3365 hw->chip_id);
3298 return -EOPNOTSUPP; 3366 return -EOPNOTSUPP;
3299 } 3367 }
3300 3368
@@ -3334,7 +3402,7 @@ static int skge_reset(struct skge_hw *hw)
3334 /* avoid boards with stuck Hardware error bits */ 3402 /* avoid boards with stuck Hardware error bits */
3335 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && 3403 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
3336 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { 3404 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
3337 printk(KERN_WARNING PFX "stuck hardware sensor bit\n"); 3405 dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n");
3338 hw->intr_mask &= ~IS_HW_ERR; 3406 hw->intr_mask &= ~IS_HW_ERR;
3339 } 3407 }
3340 3408
@@ -3408,7 +3476,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3408 struct net_device *dev = alloc_etherdev(sizeof(*skge)); 3476 struct net_device *dev = alloc_etherdev(sizeof(*skge));
3409 3477
3410 if (!dev) { 3478 if (!dev) {
3411 printk(KERN_ERR "skge etherdev alloc failed"); 3479 dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
3412 return NULL; 3480 return NULL;
3413 } 3481 }
3414 3482
@@ -3452,6 +3520,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3452 skge->duplex = -1; 3520 skge->duplex = -1;
3453 skge->speed = -1; 3521 skge->speed = -1;
3454 skge->advertising = skge_supported_modes(hw); 3522 skge->advertising = skge_supported_modes(hw);
3523 skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0;
3455 3524
3456 hw->dev[port] = dev; 3525 hw->dev[port] = dev;
3457 3526
@@ -3496,15 +3565,13 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3496 3565
3497 err = pci_enable_device(pdev); 3566 err = pci_enable_device(pdev);
3498 if (err) { 3567 if (err) {
3499 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3568 dev_err(&pdev->dev, "cannot enable PCI device\n");
3500 pci_name(pdev));
3501 goto err_out; 3569 goto err_out;
3502 } 3570 }
3503 3571
3504 err = pci_request_regions(pdev, DRV_NAME); 3572 err = pci_request_regions(pdev, DRV_NAME);
3505 if (err) { 3573 if (err) {
3506 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3574 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
3507 pci_name(pdev));
3508 goto err_out_disable_pdev; 3575 goto err_out_disable_pdev;
3509 } 3576 }
3510 3577
@@ -3519,8 +3586,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3519 } 3586 }
3520 3587
3521 if (err) { 3588 if (err) {
3522 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3589 dev_err(&pdev->dev, "no usable DMA configuration\n");
3523 pci_name(pdev));
3524 goto err_out_free_regions; 3590 goto err_out_free_regions;
3525 } 3591 }
3526 3592
@@ -3538,8 +3604,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3538 err = -ENOMEM; 3604 err = -ENOMEM;
3539 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3605 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3540 if (!hw) { 3606 if (!hw) {
3541 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", 3607 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3542 pci_name(pdev));
3543 goto err_out_free_regions; 3608 goto err_out_free_regions;
3544 } 3609 }
3545 3610
@@ -3550,8 +3615,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3550 3615
3551 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3616 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3552 if (!hw->regs) { 3617 if (!hw->regs) {
3553 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3618 dev_err(&pdev->dev, "cannot map device registers\n");
3554 pci_name(pdev));
3555 goto err_out_free_hw; 3619 goto err_out_free_hw;
3556 } 3620 }
3557 3621
@@ -3567,23 +3631,19 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3567 if (!dev) 3631 if (!dev)
3568 goto err_out_led_off; 3632 goto err_out_led_off;
3569 3633
3570 if (!is_valid_ether_addr(dev->dev_addr)) { 3634 /* Some motherboards are broken and has zero in ROM. */
3571 printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n", 3635 if (!is_valid_ether_addr(dev->dev_addr))
3572 pci_name(pdev)); 3636 dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n");
3573 err = -EIO;
3574 goto err_out_free_netdev;
3575 }
3576 3637
3577 err = register_netdev(dev); 3638 err = register_netdev(dev);
3578 if (err) { 3639 if (err) {
3579 printk(KERN_ERR PFX "%s: cannot register net device\n", 3640 dev_err(&pdev->dev, "cannot register net device\n");
3580 pci_name(pdev));
3581 goto err_out_free_netdev; 3641 goto err_out_free_netdev;
3582 } 3642 }
3583 3643
3584 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); 3644 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
3585 if (err) { 3645 if (err) {
3586 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3646 dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
3587 dev->name, pdev->irq); 3647 dev->name, pdev->irq);
3588 goto err_out_unregister; 3648 goto err_out_unregister;
3589 } 3649 }
@@ -3594,7 +3654,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3594 skge_show_addr(dev1); 3654 skge_show_addr(dev1);
3595 else { 3655 else {
3596 /* Failure to register second port need not be fatal */ 3656 /* Failure to register second port need not be fatal */
3597 printk(KERN_WARNING PFX "register of second port failed\n"); 3657 dev_warn(&pdev->dev, "register of second port failed\n");
3598 hw->dev[1] = NULL; 3658 hw->dev[1] = NULL;
3599 free_netdev(dev1); 3659 free_netdev(dev1);
3600 } 3660 }
@@ -3659,28 +3719,46 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3659} 3719}
3660 3720
3661#ifdef CONFIG_PM 3721#ifdef CONFIG_PM
3722static int vaux_avail(struct pci_dev *pdev)
3723{
3724 int pm_cap;
3725
3726 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3727 if (pm_cap) {
3728 u16 ctl;
3729 pci_read_config_word(pdev, pm_cap + PCI_PM_PMC, &ctl);
3730 if (ctl & PCI_PM_CAP_AUX_POWER)
3731 return 1;
3732 }
3733 return 0;
3734}
3735
3736
3662static int skge_suspend(struct pci_dev *pdev, pm_message_t state) 3737static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3663{ 3738{
3664 struct skge_hw *hw = pci_get_drvdata(pdev); 3739 struct skge_hw *hw = pci_get_drvdata(pdev);
3665 int i, wol = 0; 3740 int i, err, wol = 0;
3741
3742 err = pci_save_state(pdev);
3743 if (err)
3744 return err;
3666 3745
3667 pci_save_state(pdev);
3668 for (i = 0; i < hw->ports; i++) { 3746 for (i = 0; i < hw->ports; i++) {
3669 struct net_device *dev = hw->dev[i]; 3747 struct net_device *dev = hw->dev[i];
3748 struct skge_port *skge = netdev_priv(dev);
3670 3749
3671 if (netif_running(dev)) { 3750 if (netif_running(dev))
3672 struct skge_port *skge = netdev_priv(dev); 3751 skge_down(dev);
3752 if (skge->wol)
3753 skge_wol_init(skge);
3673 3754
3674 netif_carrier_off(dev); 3755 wol |= skge->wol;
3675 if (skge->wol)
3676 netif_stop_queue(dev);
3677 else
3678 skge_down(dev);
3679 wol |= skge->wol;
3680 }
3681 netif_device_detach(dev);
3682 } 3756 }
3683 3757
3758 if (wol && vaux_avail(pdev))
3759 skge_write8(hw, B0_POWER_CTRL,
3760 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
3761
3684 skge_write32(hw, B0_IMSK, 0); 3762 skge_write32(hw, B0_IMSK, 0);
3685 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 3763 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3686 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3764 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3693,8 +3771,14 @@ static int skge_resume(struct pci_dev *pdev)
3693 struct skge_hw *hw = pci_get_drvdata(pdev); 3771 struct skge_hw *hw = pci_get_drvdata(pdev);
3694 int i, err; 3772 int i, err;
3695 3773
3696 pci_set_power_state(pdev, PCI_D0); 3774 err = pci_set_power_state(pdev, PCI_D0);
3697 pci_restore_state(pdev); 3775 if (err)
3776 goto out;
3777
3778 err = pci_restore_state(pdev);
3779 if (err)
3780 goto out;
3781
3698 pci_enable_wake(pdev, PCI_D0, 0); 3782 pci_enable_wake(pdev, PCI_D0, 0);
3699 3783
3700 err = skge_reset(hw); 3784 err = skge_reset(hw);
@@ -3704,7 +3788,6 @@ static int skge_resume(struct pci_dev *pdev)
3704 for (i = 0; i < hw->ports; i++) { 3788 for (i = 0; i < hw->ports; i++) {
3705 struct net_device *dev = hw->dev[i]; 3789 struct net_device *dev = hw->dev[i];
3706 3790
3707 netif_device_attach(dev);
3708 if (netif_running(dev)) { 3791 if (netif_running(dev)) {
3709 err = skge_up(dev); 3792 err = skge_up(dev);
3710 3793
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index f6223c533c01..17b1b479dff5 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -876,11 +876,13 @@ enum {
876 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ 876 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
877 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ 877 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
878}; 878};
879#define WOL_REGS(port, x) (x + (port)*0x80)
879 880
880enum { 881enum {
881 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ 882 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
882 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ 883 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
883}; 884};
885#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
884 886
885enum { 887enum {
886 BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */ 888 BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 822dd0b13133..f2ab3d56e565 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -49,7 +49,7 @@
49#include "sky2.h" 49#include "sky2.h"
50 50
51#define DRV_NAME "sky2" 51#define DRV_NAME "sky2"
52#define DRV_VERSION "1.10" 52#define DRV_VERSION "1.12"
53#define PFX DRV_NAME " " 53#define PFX DRV_NAME " "
54 54
55/* 55/*
@@ -105,6 +105,7 @@ static const struct pci_device_id sky2_id_table[] = {
105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ 105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ 106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ 107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */
108 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */ 109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */ 110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */ 111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
@@ -126,6 +127,9 @@ static const struct pci_device_id sky2_id_table[] = {
126 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ 127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ 128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ 129 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
130 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
132 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
129 { 0 } 133 { 0 }
130}; 134};
131 135
@@ -140,7 +144,7 @@ static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
140static const char *yukon2_name[] = { 144static const char *yukon2_name[] = {
141 "XL", /* 0xb3 */ 145 "XL", /* 0xb3 */
142 "EC Ultra", /* 0xb4 */ 146 "EC Ultra", /* 0xb4 */
143 "UNKNOWN", /* 0xb5 */ 147 "Extreme", /* 0xb5 */
144 "EC", /* 0xb6 */ 148 "EC", /* 0xb6 */
145 "FE", /* 0xb7 */ 149 "FE", /* 0xb7 */
146}; 150};
@@ -192,76 +196,52 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
192 return v; 196 return v;
193} 197}
194 198
195static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
196{
197 u16 power_control;
198 int vaux;
199
200 pr_debug("sky2_set_power_state %d\n", state);
201 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
202
203 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
204 vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
205 (power_control & PCI_PM_CAP_PME_D3cold);
206
207 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
208
209 power_control |= PCI_PM_CTRL_PME_STATUS;
210 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
211 199
212 switch (state) { 200static void sky2_power_on(struct sky2_hw *hw)
213 case PCI_D0: 201{
214 /* switch power to VCC (WA for VAUX problem) */ 202 /* switch power to VCC (WA for VAUX problem) */
215 sky2_write8(hw, B0_POWER_CTRL, 203 sky2_write8(hw, B0_POWER_CTRL,
216 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 204 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
217
218 /* disable Core Clock Division, */
219 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
220
221 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
222 /* enable bits are inverted */
223 sky2_write8(hw, B2_Y2_CLK_GATE,
224 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
225 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
226 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
227 else
228 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
229 205
230 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 206 /* disable Core Clock Division, */
231 u32 reg1; 207 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
232 208
233 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 209 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
234 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); 210 /* enable bits are inverted */
235 reg1 &= P_ASPM_CONTROL_MSK; 211 sky2_write8(hw, B2_Y2_CLK_GATE,
236 sky2_pci_write32(hw, PCI_DEV_REG4, reg1); 212 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
237 sky2_pci_write32(hw, PCI_DEV_REG5, 0); 213 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
238 } 214 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
215 else
216 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
239 217
240 break; 218 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
219 u32 reg1;
241 220
242 case PCI_D3hot: 221 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
243 case PCI_D3cold: 222 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
244 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 223 reg1 &= P_ASPM_CONTROL_MSK;
245 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 224 sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
246 else 225 sky2_pci_write32(hw, PCI_DEV_REG5, 0);
247 /* enable bits are inverted */
248 sky2_write8(hw, B2_Y2_CLK_GATE,
249 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
250 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
251 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
252
253 /* switch power to VAUX */
254 if (vaux && state != PCI_D3cold)
255 sky2_write8(hw, B0_POWER_CTRL,
256 (PC_VAUX_ENA | PC_VCC_ENA |
257 PC_VAUX_ON | PC_VCC_OFF));
258 break;
259 default:
260 printk(KERN_ERR PFX "Unknown power state %d\n", state);
261 } 226 }
227}
262 228
263 sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control); 229static void sky2_power_aux(struct sky2_hw *hw)
264 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 230{
231 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
232 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
233 else
234 /* enable bits are inverted */
235 sky2_write8(hw, B2_Y2_CLK_GATE,
236 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
237 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
238 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
239
240 /* switch power to VAUX */
241 if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL)
242 sky2_write8(hw, B0_POWER_CTRL,
243 (PC_VAUX_ENA | PC_VCC_ENA |
244 PC_VAUX_ON | PC_VCC_OFF));
265} 245}
266 246
267static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 247static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
@@ -313,8 +293,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
313 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 293 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
314 u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; 294 u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
315 295
316 if (sky2->autoneg == AUTONEG_ENABLE && 296 if (sky2->autoneg == AUTONEG_ENABLE
317 !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { 297 && !(hw->chip_id == CHIP_ID_YUKON_XL
298 || hw->chip_id == CHIP_ID_YUKON_EC_U
299 || hw->chip_id == CHIP_ID_YUKON_EX)) {
318 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 300 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
319 301
320 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 302 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -341,8 +323,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
341 /* enable automatic crossover */ 323 /* enable automatic crossover */
342 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); 324 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
343 325
344 if (sky2->autoneg == AUTONEG_ENABLE && 326 if (sky2->autoneg == AUTONEG_ENABLE
345 (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { 327 && (hw->chip_id == CHIP_ID_YUKON_XL
328 || hw->chip_id == CHIP_ID_YUKON_EC_U
329 || hw->chip_id == CHIP_ID_YUKON_EX)) {
346 ctrl &= ~PHY_M_PC_DSC_MSK; 330 ctrl &= ~PHY_M_PC_DSC_MSK;
347 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; 331 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
348 } 332 }
@@ -497,7 +481,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
497 /* restore page register */ 481 /* restore page register */
498 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 482 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
499 break; 483 break;
484
500 case CHIP_ID_YUKON_EC_U: 485 case CHIP_ID_YUKON_EC_U:
486 case CHIP_ID_YUKON_EX:
501 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 487 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
502 488
503 /* select page 3 to access LED control register */ 489 /* select page 3 to access LED control register */
@@ -539,7 +525,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
539 525
540 /* set page register to 0 */ 526 /* set page register to 0 */
541 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 527 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
542 } else { 528 } else if (hw->chip_id != CHIP_ID_YUKON_EX) {
543 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 529 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
544 530
545 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { 531 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
@@ -591,6 +577,73 @@ static void sky2_phy_reinit(struct sky2_port *sky2)
591 spin_unlock_bh(&sky2->phy_lock); 577 spin_unlock_bh(&sky2->phy_lock);
592} 578}
593 579
580/* Put device in state to listen for Wake On Lan */
581static void sky2_wol_init(struct sky2_port *sky2)
582{
583 struct sky2_hw *hw = sky2->hw;
584 unsigned port = sky2->port;
585 enum flow_control save_mode;
586 u16 ctrl;
587 u32 reg1;
588
589 /* Bring hardware out of reset */
590 sky2_write16(hw, B0_CTST, CS_RST_CLR);
591 sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
592
593 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
594 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
595
596 /* Force to 10/100
597 * sky2_reset will re-enable on resume
598 */
599 save_mode = sky2->flow_mode;
600 ctrl = sky2->advertising;
601
602 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
603 sky2->flow_mode = FC_NONE;
604 sky2_phy_power(hw, port, 1);
605 sky2_phy_reinit(sky2);
606
607 sky2->flow_mode = save_mode;
608 sky2->advertising = ctrl;
609
610 /* Set GMAC to no flow control and auto update for speed/duplex */
611 gma_write16(hw, port, GM_GP_CTRL,
612 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
613 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
614
615 /* Set WOL address */
616 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
617 sky2->netdev->dev_addr, ETH_ALEN);
618
619 /* Turn on appropriate WOL control bits */
620 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
621 ctrl = 0;
622 if (sky2->wol & WAKE_PHY)
623 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
624 else
625 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
626
627 if (sky2->wol & WAKE_MAGIC)
628 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
629 else
630 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
631
632 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
633 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
634
635 /* Turn on legacy PCI-Express PME mode */
636 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
637 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
638 reg1 |= PCI_Y2_PME_LEGACY;
639 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
640 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
641
642 /* block receiver */
643 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
644
645}
646
594static void sky2_mac_init(struct sky2_hw *hw, unsigned port) 647static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
595{ 648{
596 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 649 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
@@ -684,7 +737,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
684 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 737 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
685 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 738 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
686 739
687 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 740 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
688 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 741 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
689 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 742 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
690 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 743 if (hw->dev[port]->mtu > ETH_DATA_LEN) {
@@ -1467,6 +1520,9 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1467 if (unlikely(netif_msg_tx_done(sky2))) 1520 if (unlikely(netif_msg_tx_done(sky2)))
1468 printk(KERN_DEBUG "%s: tx done %u\n", 1521 printk(KERN_DEBUG "%s: tx done %u\n",
1469 dev->name, idx); 1522 dev->name, idx);
1523 sky2->net_stats.tx_packets++;
1524 sky2->net_stats.tx_bytes += re->skb->len;
1525
1470 dev_kfree_skb_any(re->skb); 1526 dev_kfree_skb_any(re->skb);
1471 } 1527 }
1472 1528
@@ -1641,7 +1697,9 @@ static void sky2_link_up(struct sky2_port *sky2)
1641 sky2_write8(hw, SK_REG(port, LNK_LED_REG), 1697 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1642 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); 1698 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1643 1699
1644 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) { 1700 if (hw->chip_id == CHIP_ID_YUKON_XL
1701 || hw->chip_id == CHIP_ID_YUKON_EC_U
1702 || hw->chip_id == CHIP_ID_YUKON_EX) {
1645 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 1703 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1646 u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */ 1704 u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */
1647 1705
@@ -1734,14 +1792,16 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1734 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; 1792 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1735 1793
1736 /* Pause bits are offset (9..8) */ 1794 /* Pause bits are offset (9..8) */
1737 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) 1795 if (hw->chip_id == CHIP_ID_YUKON_XL
1796 || hw->chip_id == CHIP_ID_YUKON_EC_U
1797 || hw->chip_id == CHIP_ID_YUKON_EX)
1738 aux >>= 6; 1798 aux >>= 6;
1739 1799
1740 sky2->flow_status = sky2_flow(aux & PHY_M_PS_RX_P_EN, 1800 sky2->flow_status = sky2_flow(aux & PHY_M_PS_RX_P_EN,
1741 aux & PHY_M_PS_TX_P_EN); 1801 aux & PHY_M_PS_TX_P_EN);
1742 1802
1743 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 1803 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
1744 && hw->chip_id != CHIP_ID_YUKON_EC_U) 1804 && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
1745 sky2->flow_status = FC_NONE; 1805 sky2->flow_status = FC_NONE;
1746 1806
1747 if (aux & PHY_M_PS_RX_P_EN) 1807 if (aux & PHY_M_PS_RX_P_EN)
@@ -1794,48 +1854,37 @@ out:
1794} 1854}
1795 1855
1796 1856
1797/* Transmit timeout is only called if we are running, carries is up 1857/* Transmit timeout is only called if we are running, carrier is up
1798 * and tx queue is full (stopped). 1858 * and tx queue is full (stopped).
1859 * Called with netif_tx_lock held.
1799 */ 1860 */
1800static void sky2_tx_timeout(struct net_device *dev) 1861static void sky2_tx_timeout(struct net_device *dev)
1801{ 1862{
1802 struct sky2_port *sky2 = netdev_priv(dev); 1863 struct sky2_port *sky2 = netdev_priv(dev);
1803 struct sky2_hw *hw = sky2->hw; 1864 struct sky2_hw *hw = sky2->hw;
1804 unsigned txq = txqaddr[sky2->port]; 1865 u32 imask;
1805 u16 report, done;
1806 1866
1807 if (netif_msg_timer(sky2)) 1867 if (netif_msg_timer(sky2))
1808 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); 1868 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1809 1869
1810 report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1811 done = sky2_read16(hw, Q_ADDR(txq, Q_DONE));
1812
1813 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n", 1870 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
1814 dev->name, 1871 dev->name, sky2->tx_cons, sky2->tx_prod,
1815 sky2->tx_cons, sky2->tx_prod, report, done); 1872 sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
1873 sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
1816 1874
1817 if (report != done) { 1875 imask = sky2_read32(hw, B0_IMSK); /* block IRQ in hw */
1818 printk(KERN_INFO PFX "status burst pending (irq moderation?)\n"); 1876 sky2_write32(hw, B0_IMSK, 0);
1819 1877 sky2_read32(hw, B0_IMSK);
1820 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1821 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1822 } else if (report != sky2->tx_cons) {
1823 printk(KERN_INFO PFX "status report lost?\n");
1824 1878
1825 netif_tx_lock_bh(dev); 1879 netif_poll_disable(hw->dev[0]); /* stop NAPI poll */
1826 sky2_tx_complete(sky2, report); 1880 synchronize_irq(hw->pdev->irq);
1827 netif_tx_unlock_bh(dev);
1828 } else {
1829 printk(KERN_INFO PFX "hardware hung? flushing\n");
1830 1881
1831 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); 1882 netif_start_queue(dev); /* don't wakeup during flush */
1832 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1883 sky2_tx_complete(sky2, sky2->tx_prod); /* Flush transmit queue */
1833 1884
1834 sky2_tx_clean(dev); 1885 sky2_write32(hw, B0_IMSK, imask);
1835 1886
1836 sky2_qset(hw, txq); 1887 sky2_phy_reinit(sky2); /* this clears flow control etc */
1837 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1838 }
1839} 1888}
1840 1889
1841static int sky2_change_mtu(struct net_device *dev, int new_mtu) 1890static int sky2_change_mtu(struct net_device *dev, int new_mtu)
@@ -1849,8 +1898,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1849 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 1898 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1850 return -EINVAL; 1899 return -EINVAL;
1851 1900
1901 /* TSO on Yukon Ultra and MTU > 1500 not supported */
1852 if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN) 1902 if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
1853 return -EINVAL; 1903 dev->features &= ~NETIF_F_TSO;
1854 1904
1855 if (!netif_running(dev)) { 1905 if (!netif_running(dev)) {
1856 dev->mtu = new_mtu; 1906 dev->mtu = new_mtu;
@@ -2089,6 +2139,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2089 goto force_update; 2139 goto force_update;
2090 2140
2091 skb->protocol = eth_type_trans(skb, dev); 2141 skb->protocol = eth_type_trans(skb, dev);
2142 sky2->net_stats.rx_packets++;
2143 sky2->net_stats.rx_bytes += skb->len;
2092 dev->last_rx = jiffies; 2144 dev->last_rx = jiffies;
2093 2145
2094#ifdef SKY2_VLAN_TAG_USED 2146#ifdef SKY2_VLAN_TAG_USED
@@ -2218,8 +2270,8 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2218 2270
2219 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2271 pci_err = sky2_pci_read16(hw, PCI_STATUS);
2220 if (net_ratelimit()) 2272 if (net_ratelimit())
2221 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", 2273 dev_err(&hw->pdev->dev, "PCI hardware error (0x%x)\n",
2222 pci_name(hw->pdev), pci_err); 2274 pci_err);
2223 2275
2224 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2276 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2225 sky2_pci_write16(hw, PCI_STATUS, 2277 sky2_pci_write16(hw, PCI_STATUS,
@@ -2234,8 +2286,8 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2234 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT); 2286 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
2235 2287
2236 if (net_ratelimit()) 2288 if (net_ratelimit())
2237 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 2289 dev_err(&hw->pdev->dev, "PCI Express error (0x%x)\n",
2238 pci_name(hw->pdev), pex_err); 2290 pex_err);
2239 2291
2240 /* clear the interrupt */ 2292 /* clear the interrupt */
2241 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2293 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -2404,6 +2456,7 @@ static inline u32 sky2_mhz(const struct sky2_hw *hw)
2404 switch (hw->chip_id) { 2456 switch (hw->chip_id) {
2405 case CHIP_ID_YUKON_EC: 2457 case CHIP_ID_YUKON_EC:
2406 case CHIP_ID_YUKON_EC_U: 2458 case CHIP_ID_YUKON_EC_U:
2459 case CHIP_ID_YUKON_EX:
2407 return 125; /* 125 Mhz */ 2460 return 125; /* 125 Mhz */
2408 case CHIP_ID_YUKON_FE: 2461 case CHIP_ID_YUKON_FE:
2409 return 100; /* 100 Mhz */ 2462 return 100; /* 100 Mhz */
@@ -2423,34 +2476,62 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2423} 2476}
2424 2477
2425 2478
2426static int sky2_reset(struct sky2_hw *hw) 2479static int __devinit sky2_init(struct sky2_hw *hw)
2427{ 2480{
2428 u16 status;
2429 u8 t8; 2481 u8 t8;
2430 int i;
2431 2482
2432 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2483 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2433 2484
2434 hw->chip_id = sky2_read8(hw, B2_CHIP_ID); 2485 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
2435 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) { 2486 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
2436 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n", 2487 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
2437 pci_name(hw->pdev), hw->chip_id); 2488 hw->chip_id);
2438 return -EOPNOTSUPP; 2489 return -EOPNOTSUPP;
2439 } 2490 }
2440 2491
2492 if (hw->chip_id == CHIP_ID_YUKON_EX)
2493 dev_warn(&hw->pdev->dev, "this driver not yet tested on this chip type\n"
2494 "Please report success or failure to <netdev@vger.kernel.org>\n");
2495
2496 /* Make sure and enable all clocks */
2497 if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U)
2498 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
2499
2441 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; 2500 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2442 2501
2443 /* This rev is really old, and requires untested workarounds */ 2502 /* This rev is really old, and requires untested workarounds */
2444 if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) { 2503 if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
2445 printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n", 2504 dev_err(&hw->pdev->dev, "unsupported revision Yukon-%s (0x%x) rev %d\n",
2446 pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], 2505 yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
2447 hw->chip_id, hw->chip_rev); 2506 hw->chip_id, hw->chip_rev);
2448 return -EOPNOTSUPP; 2507 return -EOPNOTSUPP;
2449 } 2508 }
2450 2509
2510 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
2511 hw->ports = 1;
2512 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2513 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2514 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2515 ++hw->ports;
2516 }
2517
2518 return 0;
2519}
2520
2521static void sky2_reset(struct sky2_hw *hw)
2522{
2523 u16 status;
2524 int i;
2525
2451 /* disable ASF */ 2526 /* disable ASF */
2452 if (hw->chip_id <= CHIP_ID_YUKON_EC) { 2527 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2453 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 2528 if (hw->chip_id == CHIP_ID_YUKON_EX) {
2529 status = sky2_read16(hw, HCU_CCSR);
2530 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
2531 HCU_CCSR_UC_STATE_MSK);
2532 sky2_write16(hw, HCU_CCSR, status);
2533 } else
2534 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2454 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); 2535 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2455 } 2536 }
2456 2537
@@ -2472,15 +2553,7 @@ static int sky2_reset(struct sky2_hw *hw)
2472 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); 2553 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
2473 2554
2474 2555
2475 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); 2556 sky2_power_on(hw);
2476 hw->ports = 1;
2477 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2478 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2479 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2480 ++hw->ports;
2481 }
2482
2483 sky2_set_power_state(hw, PCI_D0);
2484 2557
2485 for (i = 0; i < hw->ports; i++) { 2558 for (i = 0; i < hw->ports; i++) {
2486 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2559 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -2563,7 +2636,37 @@ static int sky2_reset(struct sky2_hw *hw)
2563 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); 2636 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2564 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); 2637 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2565 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); 2638 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2639}
2640
2641static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
2642{
2643 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
2644}
2645
2646static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2647{
2648 const struct sky2_port *sky2 = netdev_priv(dev);
2649
2650 wol->supported = sky2_wol_supported(sky2->hw);
2651 wol->wolopts = sky2->wol;
2652}
2653
2654static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2655{
2656 struct sky2_port *sky2 = netdev_priv(dev);
2657 struct sky2_hw *hw = sky2->hw;
2658
2659 if (wol->wolopts & ~sky2_wol_supported(sky2->hw))
2660 return -EOPNOTSUPP;
2661
2662 sky2->wol = wol->wolopts;
2663
2664 if (hw->chip_id == CHIP_ID_YUKON_EC_U)
2665 sky2_write32(hw, B0_CTST, sky2->wol
2666 ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
2566 2667
2668 if (!netif_running(dev))
2669 sky2_wol_init(sky2);
2567 return 0; 2670 return 0;
2568} 2671}
2569 2672
@@ -2814,25 +2917,9 @@ static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2814 } 2917 }
2815} 2918}
2816 2919
2817/* Use hardware MIB variables for critical path statistics and
2818 * transmit feedback not reported at interrupt.
2819 * Other errors are accounted for in interrupt handler.
2820 */
2821static struct net_device_stats *sky2_get_stats(struct net_device *dev) 2920static struct net_device_stats *sky2_get_stats(struct net_device *dev)
2822{ 2921{
2823 struct sky2_port *sky2 = netdev_priv(dev); 2922 struct sky2_port *sky2 = netdev_priv(dev);
2824 u64 data[13];
2825
2826 sky2_phy_stats(sky2, data, ARRAY_SIZE(data));
2827
2828 sky2->net_stats.tx_bytes = data[0];
2829 sky2->net_stats.rx_bytes = data[1];
2830 sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
2831 sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
2832 sky2->net_stats.multicast = data[3] + data[5];
2833 sky2->net_stats.collisions = data[10];
2834 sky2->net_stats.tx_aborted_errors = data[12];
2835
2836 return &sky2->net_stats; 2923 return &sky2->net_stats;
2837} 2924}
2838 2925
@@ -3191,7 +3278,9 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3191static const struct ethtool_ops sky2_ethtool_ops = { 3278static const struct ethtool_ops sky2_ethtool_ops = {
3192 .get_settings = sky2_get_settings, 3279 .get_settings = sky2_get_settings,
3193 .set_settings = sky2_set_settings, 3280 .set_settings = sky2_set_settings,
3194 .get_drvinfo = sky2_get_drvinfo, 3281 .get_drvinfo = sky2_get_drvinfo,
3282 .get_wol = sky2_get_wol,
3283 .set_wol = sky2_set_wol,
3195 .get_msglevel = sky2_get_msglevel, 3284 .get_msglevel = sky2_get_msglevel,
3196 .set_msglevel = sky2_set_msglevel, 3285 .set_msglevel = sky2_set_msglevel,
3197 .nway_reset = sky2_nway_reset, 3286 .nway_reset = sky2_nway_reset,
@@ -3221,13 +3310,14 @@ static const struct ethtool_ops sky2_ethtool_ops = {
3221 3310
3222/* Initialize network device */ 3311/* Initialize network device */
3223static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, 3312static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3224 unsigned port, int highmem) 3313 unsigned port,
3314 int highmem, int wol)
3225{ 3315{
3226 struct sky2_port *sky2; 3316 struct sky2_port *sky2;
3227 struct net_device *dev = alloc_etherdev(sizeof(*sky2)); 3317 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
3228 3318
3229 if (!dev) { 3319 if (!dev) {
3230 printk(KERN_ERR "sky2 etherdev alloc failed"); 3320 dev_err(&hw->pdev->dev, "etherdev alloc failed");
3231 return NULL; 3321 return NULL;
3232 } 3322 }
3233 3323
@@ -3269,6 +3359,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3269 sky2->speed = -1; 3359 sky2->speed = -1;
3270 sky2->advertising = sky2_supported_modes(hw); 3360 sky2->advertising = sky2_supported_modes(hw);
3271 sky2->rx_csum = 1; 3361 sky2->rx_csum = 1;
3362 sky2->wol = wol;
3272 3363
3273 spin_lock_init(&sky2->phy_lock); 3364 spin_lock_init(&sky2->phy_lock);
3274 sky2->tx_pending = TX_DEF_PENDING; 3365 sky2->tx_pending = TX_DEF_PENDING;
@@ -3278,11 +3369,9 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3278 3369
3279 sky2->port = port; 3370 sky2->port = port;
3280 3371
3281 if (hw->chip_id != CHIP_ID_YUKON_EC_U) 3372 dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG;
3282 dev->features |= NETIF_F_TSO;
3283 if (highmem) 3373 if (highmem)
3284 dev->features |= NETIF_F_HIGHDMA; 3374 dev->features |= NETIF_F_HIGHDMA;
3285 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3286 3375
3287#ifdef SKY2_VLAN_TAG_USED 3376#ifdef SKY2_VLAN_TAG_USED
3288 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3377 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
@@ -3343,8 +3432,7 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3343 3432
3344 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); 3433 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
3345 if (err) { 3434 if (err) {
3346 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3435 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
3347 pci_name(pdev), pdev->irq);
3348 return err; 3436 return err;
3349 } 3437 }
3350 3438
@@ -3355,9 +3443,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3355 3443
3356 if (!hw->msi) { 3444 if (!hw->msi) {
3357 /* MSI test failed, go back to INTx mode */ 3445 /* MSI test failed, go back to INTx mode */
3358 printk(KERN_INFO PFX "%s: No interrupt generated using MSI, " 3446 dev_info(&pdev->dev, "No interrupt generated using MSI, "
3359 "switching to INTx mode.\n", 3447 "switching to INTx mode.\n");
3360 pci_name(pdev));
3361 3448
3362 err = -EOPNOTSUPP; 3449 err = -EOPNOTSUPP;
3363 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); 3450 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
@@ -3371,62 +3458,62 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3371 return err; 3458 return err;
3372} 3459}
3373 3460
3461static int __devinit pci_wake_enabled(struct pci_dev *dev)
3462{
3463 int pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3464 u16 value;
3465
3466 if (!pm)
3467 return 0;
3468 if (pci_read_config_word(dev, pm + PCI_PM_CTRL, &value))
3469 return 0;
3470 return value & PCI_PM_CTRL_PME_ENABLE;
3471}
3472
3374static int __devinit sky2_probe(struct pci_dev *pdev, 3473static int __devinit sky2_probe(struct pci_dev *pdev,
3375 const struct pci_device_id *ent) 3474 const struct pci_device_id *ent)
3376{ 3475{
3377 struct net_device *dev, *dev1 = NULL; 3476 struct net_device *dev;
3378 struct sky2_hw *hw; 3477 struct sky2_hw *hw;
3379 int err, pm_cap, using_dac = 0; 3478 int err, using_dac = 0, wol_default;
3380 3479
3381 err = pci_enable_device(pdev); 3480 err = pci_enable_device(pdev);
3382 if (err) { 3481 if (err) {
3383 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3482 dev_err(&pdev->dev, "cannot enable PCI device\n");
3384 pci_name(pdev));
3385 goto err_out; 3483 goto err_out;
3386 } 3484 }
3387 3485
3388 err = pci_request_regions(pdev, DRV_NAME); 3486 err = pci_request_regions(pdev, DRV_NAME);
3389 if (err) { 3487 if (err) {
3390 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3488 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
3391 pci_name(pdev));
3392 goto err_out; 3489 goto err_out;
3393 } 3490 }
3394 3491
3395 pci_set_master(pdev); 3492 pci_set_master(pdev);
3396 3493
3397 /* Find power-management capability. */
3398 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3399 if (pm_cap == 0) {
3400 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
3401 "aborting.\n");
3402 err = -EIO;
3403 goto err_out_free_regions;
3404 }
3405
3406 if (sizeof(dma_addr_t) > sizeof(u32) && 3494 if (sizeof(dma_addr_t) > sizeof(u32) &&
3407 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 3495 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3408 using_dac = 1; 3496 using_dac = 1;
3409 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3497 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3410 if (err < 0) { 3498 if (err < 0) {
3411 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " 3499 dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
3412 "for consistent allocations\n", pci_name(pdev)); 3500 "for consistent allocations\n");
3413 goto err_out_free_regions; 3501 goto err_out_free_regions;
3414 } 3502 }
3415
3416 } else { 3503 } else {
3417 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3504 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3418 if (err) { 3505 if (err) {
3419 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3506 dev_err(&pdev->dev, "no usable DMA configuration\n");
3420 pci_name(pdev));
3421 goto err_out_free_regions; 3507 goto err_out_free_regions;
3422 } 3508 }
3423 } 3509 }
3424 3510
3511 wol_default = pci_wake_enabled(pdev) ? WAKE_MAGIC : 0;
3512
3425 err = -ENOMEM; 3513 err = -ENOMEM;
3426 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3514 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3427 if (!hw) { 3515 if (!hw) {
3428 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", 3516 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3429 pci_name(pdev));
3430 goto err_out_free_regions; 3517 goto err_out_free_regions;
3431 } 3518 }
3432 3519
@@ -3434,11 +3521,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3434 3521
3435 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3522 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3436 if (!hw->regs) { 3523 if (!hw->regs) {
3437 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3524 dev_err(&pdev->dev, "cannot map device registers\n");
3438 pci_name(pdev));
3439 goto err_out_free_hw; 3525 goto err_out_free_hw;
3440 } 3526 }
3441 hw->pm_cap = pm_cap;
3442 3527
3443#ifdef __BIG_ENDIAN 3528#ifdef __BIG_ENDIAN
3444 /* The sk98lin vendor driver uses hardware byte swapping but 3529 /* The sk98lin vendor driver uses hardware byte swapping but
@@ -3458,18 +3543,22 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3458 if (!hw->st_le) 3543 if (!hw->st_le)
3459 goto err_out_iounmap; 3544 goto err_out_iounmap;
3460 3545
3461 err = sky2_reset(hw); 3546 err = sky2_init(hw);
3462 if (err) 3547 if (err)
3463 goto err_out_iounmap; 3548 goto err_out_iounmap;
3464 3549
3465 printk(KERN_INFO PFX "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n", 3550 dev_info(&pdev->dev, "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n",
3466 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0), 3551 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
3467 pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], 3552 pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
3468 hw->chip_id, hw->chip_rev); 3553 hw->chip_id, hw->chip_rev);
3469 3554
3470 dev = sky2_init_netdev(hw, 0, using_dac); 3555 sky2_reset(hw);
3471 if (!dev) 3556
3557 dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
3558 if (!dev) {
3559 err = -ENOMEM;
3472 goto err_out_free_pci; 3560 goto err_out_free_pci;
3561 }
3473 3562
3474 if (!disable_msi && pci_enable_msi(pdev) == 0) { 3563 if (!disable_msi && pci_enable_msi(pdev) == 0) {
3475 err = sky2_test_msi(hw); 3564 err = sky2_test_msi(hw);
@@ -3481,32 +3570,33 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3481 3570
3482 err = register_netdev(dev); 3571 err = register_netdev(dev);
3483 if (err) { 3572 if (err) {
3484 printk(KERN_ERR PFX "%s: cannot register net device\n", 3573 dev_err(&pdev->dev, "cannot register net device\n");
3485 pci_name(pdev));
3486 goto err_out_free_netdev; 3574 goto err_out_free_netdev;
3487 } 3575 }
3488 3576
3489 err = request_irq(pdev->irq, sky2_intr, hw->msi ? 0 : IRQF_SHARED, 3577 err = request_irq(pdev->irq, sky2_intr, hw->msi ? 0 : IRQF_SHARED,
3490 dev->name, hw); 3578 dev->name, hw);
3491 if (err) { 3579 if (err) {
3492 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3580 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
3493 pci_name(pdev), pdev->irq);
3494 goto err_out_unregister; 3581 goto err_out_unregister;
3495 } 3582 }
3496 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3583 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3497 3584
3498 sky2_show_addr(dev); 3585 sky2_show_addr(dev);
3499 3586
3500 if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) { 3587 if (hw->ports > 1) {
3501 if (register_netdev(dev1) == 0) 3588 struct net_device *dev1;
3502 sky2_show_addr(dev1); 3589
3503 else { 3590 dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
3504 /* Failure to register second port need not be fatal */ 3591 if (!dev1)
3505 printk(KERN_WARNING PFX 3592 dev_warn(&pdev->dev, "allocation for second device failed\n");
3506 "register of second port failed\n"); 3593 else if ((err = register_netdev(dev1))) {
3594 dev_warn(&pdev->dev,
3595 "register of second port failed (%d)\n", err);
3507 hw->dev[1] = NULL; 3596 hw->dev[1] = NULL;
3508 free_netdev(dev1); 3597 free_netdev(dev1);
3509 } 3598 } else
3599 sky2_show_addr(dev1);
3510 } 3600 }
3511 3601
3512 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); 3602 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
@@ -3555,7 +3645,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3555 unregister_netdev(dev1); 3645 unregister_netdev(dev1);
3556 unregister_netdev(dev0); 3646 unregister_netdev(dev0);
3557 3647
3558 sky2_set_power_state(hw, PCI_D3hot); 3648 sky2_power_aux(hw);
3649
3559 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 3650 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
3560 sky2_write8(hw, B0_CTST, CS_RST_SET); 3651 sky2_write8(hw, B0_CTST, CS_RST_SET);
3561 sky2_read8(hw, B0_CTST); 3652 sky2_read8(hw, B0_CTST);
@@ -3580,27 +3671,31 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3580static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) 3671static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
3581{ 3672{
3582 struct sky2_hw *hw = pci_get_drvdata(pdev); 3673 struct sky2_hw *hw = pci_get_drvdata(pdev);
3583 int i; 3674 int i, wol = 0;
3584 pci_power_t pstate = pci_choose_state(pdev, state);
3585
3586 if (!(pstate == PCI_D3hot || pstate == PCI_D3cold))
3587 return -EINVAL;
3588 3675
3589 del_timer_sync(&hw->idle_timer); 3676 del_timer_sync(&hw->idle_timer);
3590 netif_poll_disable(hw->dev[0]); 3677 netif_poll_disable(hw->dev[0]);
3591 3678
3592 for (i = 0; i < hw->ports; i++) { 3679 for (i = 0; i < hw->ports; i++) {
3593 struct net_device *dev = hw->dev[i]; 3680 struct net_device *dev = hw->dev[i];
3681 struct sky2_port *sky2 = netdev_priv(dev);
3594 3682
3595 if (netif_running(dev)) { 3683 if (netif_running(dev))
3596 sky2_down(dev); 3684 sky2_down(dev);
3597 netif_device_detach(dev); 3685
3598 } 3686 if (sky2->wol)
3687 sky2_wol_init(sky2);
3688
3689 wol |= sky2->wol;
3599 } 3690 }
3600 3691
3601 sky2_write32(hw, B0_IMSK, 0); 3692 sky2_write32(hw, B0_IMSK, 0);
3693 sky2_power_aux(hw);
3694
3602 pci_save_state(pdev); 3695 pci_save_state(pdev);
3603 sky2_set_power_state(hw, pstate); 3696 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3697 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3698
3604 return 0; 3699 return 0;
3605} 3700}
3606 3701
@@ -3609,21 +3704,22 @@ static int sky2_resume(struct pci_dev *pdev)
3609 struct sky2_hw *hw = pci_get_drvdata(pdev); 3704 struct sky2_hw *hw = pci_get_drvdata(pdev);
3610 int i, err; 3705 int i, err;
3611 3706
3612 pci_restore_state(pdev); 3707 err = pci_set_power_state(pdev, PCI_D0);
3613 pci_enable_wake(pdev, PCI_D0, 0); 3708 if (err)
3614 sky2_set_power_state(hw, PCI_D0); 3709 goto out;
3615 3710
3616 err = sky2_reset(hw); 3711 err = pci_restore_state(pdev);
3617 if (err) 3712 if (err)
3618 goto out; 3713 goto out;
3619 3714
3715 pci_enable_wake(pdev, PCI_D0, 0);
3716 sky2_reset(hw);
3717
3620 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3718 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3621 3719
3622 for (i = 0; i < hw->ports; i++) { 3720 for (i = 0; i < hw->ports; i++) {
3623 struct net_device *dev = hw->dev[i]; 3721 struct net_device *dev = hw->dev[i];
3624 if (netif_running(dev)) { 3722 if (netif_running(dev)) {
3625 netif_device_attach(dev);
3626
3627 err = sky2_up(dev); 3723 err = sky2_up(dev);
3628 if (err) { 3724 if (err) {
3629 printk(KERN_ERR PFX "%s: could not up: %d\n", 3725 printk(KERN_ERR PFX "%s: could not up: %d\n",
@@ -3636,11 +3732,43 @@ static int sky2_resume(struct pci_dev *pdev)
3636 3732
3637 netif_poll_enable(hw->dev[0]); 3733 netif_poll_enable(hw->dev[0]);
3638 sky2_idle_start(hw); 3734 sky2_idle_start(hw);
3735 return 0;
3639out: 3736out:
3737 dev_err(&pdev->dev, "resume failed (%d)\n", err);
3738 pci_disable_device(pdev);
3640 return err; 3739 return err;
3641} 3740}
3642#endif 3741#endif
3643 3742
3743static void sky2_shutdown(struct pci_dev *pdev)
3744{
3745 struct sky2_hw *hw = pci_get_drvdata(pdev);
3746 int i, wol = 0;
3747
3748 del_timer_sync(&hw->idle_timer);
3749 netif_poll_disable(hw->dev[0]);
3750
3751 for (i = 0; i < hw->ports; i++) {
3752 struct net_device *dev = hw->dev[i];
3753 struct sky2_port *sky2 = netdev_priv(dev);
3754
3755 if (sky2->wol) {
3756 wol = 1;
3757 sky2_wol_init(sky2);
3758 }
3759 }
3760
3761 if (wol)
3762 sky2_power_aux(hw);
3763
3764 pci_enable_wake(pdev, PCI_D3hot, wol);
3765 pci_enable_wake(pdev, PCI_D3cold, wol);
3766
3767 pci_disable_device(pdev);
3768 pci_set_power_state(pdev, PCI_D3hot);
3769
3770}
3771
3644static struct pci_driver sky2_driver = { 3772static struct pci_driver sky2_driver = {
3645 .name = DRV_NAME, 3773 .name = DRV_NAME,
3646 .id_table = sky2_id_table, 3774 .id_table = sky2_id_table,
@@ -3650,6 +3778,7 @@ static struct pci_driver sky2_driver = {
3650 .suspend = sky2_suspend, 3778 .suspend = sky2_suspend,
3651 .resume = sky2_resume, 3779 .resume = sky2_resume,
3652#endif 3780#endif
3781 .shutdown = sky2_shutdown,
3653}; 3782};
3654 3783
3655static int __init sky2_init_module(void) 3784static int __init sky2_init_module(void)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 6ed1d47dbbd3..3b0189569d52 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -32,6 +32,7 @@ enum pci_dev_reg_1 {
32 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */ 32 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
33 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */ 33 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
34 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */ 34 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
35 PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */
35}; 36};
36 37
37enum pci_dev_reg_2 { 38enum pci_dev_reg_2 {
@@ -370,12 +371,9 @@ enum {
370 371
371/* B2_CHIP_ID 8 bit Chip Identification Number */ 372/* B2_CHIP_ID 8 bit Chip Identification Number */
372enum { 373enum {
373 CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
374 CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
375 CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
376 CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
377 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */ 374 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
378 CHIP_ID_YUKON_EC_U = 0xb4, /* Chip ID for YUKON-2 EC Ultra */ 375 CHIP_ID_YUKON_EC_U = 0xb4, /* Chip ID for YUKON-2 EC Ultra */
376 CHIP_ID_YUKON_EX = 0xb5, /* Chip ID for YUKON-2 Extreme */
379 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ 377 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
380 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ 378 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
381 379
@@ -767,6 +765,24 @@ enum {
767 POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */ 765 POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */
768}; 766};
769 767
768enum {
769 SMB_CFG = 0x0e40, /* 32 bit SMBus Config Register */
770 SMB_CSR = 0x0e44, /* 32 bit SMBus Control/Status Register */
771};
772
773enum {
774 CPU_WDOG = 0x0e48, /* 32 bit Watchdog Register */
775 CPU_CNTR = 0x0e4C, /* 32 bit Counter Register */
776 CPU_TIM = 0x0e50,/* 32 bit Timer Compare Register */
777 CPU_AHB_ADDR = 0x0e54, /* 32 bit CPU AHB Debug Register */
778 CPU_AHB_WDATA = 0x0e58, /* 32 bit CPU AHB Debug Register */
779 CPU_AHB_RDATA = 0x0e5C, /* 32 bit CPU AHB Debug Register */
780 HCU_MAP_BASE = 0x0e60, /* 32 bit Reset Mapping Base */
781 CPU_AHB_CTRL = 0x0e64, /* 32 bit CPU AHB Debug Register */
782 HCU_CCSR = 0x0e68, /* 32 bit CPU Control and Status Register */
783 HCU_HCSR = 0x0e6C, /* 32 bit Host Control and Status Register */
784};
785
770/* ASF Subsystem Registers (Yukon-2 only) */ 786/* ASF Subsystem Registers (Yukon-2 only) */
771enum { 787enum {
772 B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */ 788 B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */
@@ -837,33 +853,27 @@ enum {
837 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ 853 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
838 854
839/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ 855/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
840
841 WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
842
843 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ 856 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
844 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ 857 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
845 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ 858 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
846 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ 859 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
847 WOL_PATT_PME = 0x0f2a,/* 8 bit WOL PME Match Enable (Yukon-2) */
848 WOL_PATT_ASFM = 0x0f2b,/* 8 bit WOL ASF Match Enable (Yukon-2) */
849 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ 860 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
850 861
851/* WOL Pattern Length Registers (YUKON only) */ 862/* WOL Pattern Length Registers (YUKON only) */
852
853 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ 863 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
854 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ 864 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
855 865
856/* WOL Pattern Counter Registers (YUKON only) */ 866/* WOL Pattern Counter Registers (YUKON only) */
857
858
859 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ 867 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
860 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ 868 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
861}; 869};
870#define WOL_REGS(port, x) (x + (port)*0x80)
862 871
863enum { 872enum {
864 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ 873 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
865 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ 874 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
866}; 875};
876#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
867 877
868enum { 878enum {
869 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ 879 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
@@ -1654,6 +1664,39 @@ enum {
1654 Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */ 1664 Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */
1655 Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */ 1665 Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */
1656}; 1666};
1667/* HCU_CCSR CPU Control and Status Register */
1668enum {
1669 HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */
1670 HCU_CCSR_CPU_SLEEP = 1<<26, /* CPU sleep status */
1671 /* Clock Stretching Timeout */
1672 HCU_CCSR_CS_TO = 1<<25,
1673 HCU_CCSR_WDOG = 1<<24, /* Watchdog Reset */
1674
1675 HCU_CCSR_CLR_IRQ_HOST = 1<<17, /* Clear IRQ_HOST */
1676 HCU_CCSR_SET_IRQ_HCU = 1<<16, /* Set IRQ_HCU */
1677
1678 HCU_CCSR_AHB_RST = 1<<9, /* Reset AHB bridge */
1679 HCU_CCSR_CPU_RST_MODE = 1<<8, /* CPU Reset Mode */
1680
1681 HCU_CCSR_SET_SYNC_CPU = 1<<5,
1682 HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */
1683 HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3,
1684 HCU_CCSR_OS_PRSNT = 1<<2, /* ASF OS Present */
1685/* Microcontroller State */
1686 HCU_CCSR_UC_STATE_MSK = 3,
1687 HCU_CCSR_UC_STATE_BASE = 1<<0,
1688 HCU_CCSR_ASF_RESET = 0,
1689 HCU_CCSR_ASF_HALTED = 1<<1,
1690 HCU_CCSR_ASF_RUNNING = 1<<0,
1691};
1692
1693/* HCU_HCSR Host Control and Status Register */
1694enum {
1695 HCU_HCSR_SET_IRQ_CPU = 1<<16, /* Set IRQ_CPU */
1696
1697 HCU_HCSR_CLR_IRQ_HCU = 1<<1, /* Clear IRQ_HCU */
1698 HCU_HCSR_SET_IRQ_HOST = 1<<0, /* Set IRQ_HOST */
1699};
1657 1700
1658/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */ 1701/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */
1659enum { 1702enum {
@@ -1715,14 +1758,17 @@ enum {
1715 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ 1758 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
1716 1759
1717#define GMAC_DEF_MSK GM_IS_TX_FF_UR 1760#define GMAC_DEF_MSK GM_IS_TX_FF_UR
1761};
1718 1762
1719/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ 1763/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
1720 /* Bits 15.. 2: reserved */ 1764enum { /* Bits 15.. 2: reserved */
1721 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ 1765 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
1722 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ 1766 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
1767};
1723 1768
1724 1769
1725/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ 1770/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
1771enum {
1726 WOL_CTL_LINK_CHG_OCC = 1<<15, 1772 WOL_CTL_LINK_CHG_OCC = 1<<15,
1727 WOL_CTL_MAGIC_PKT_OCC = 1<<14, 1773 WOL_CTL_MAGIC_PKT_OCC = 1<<14,
1728 WOL_CTL_PATTERN_OCC = 1<<13, 1774 WOL_CTL_PATTERN_OCC = 1<<13,
@@ -1741,17 +1787,6 @@ enum {
1741 WOL_CTL_DIS_PATTERN_UNIT = 1<<0, 1787 WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
1742}; 1788};
1743 1789
1744#define WOL_CTL_DEFAULT \
1745 (WOL_CTL_DIS_PME_ON_LINK_CHG | \
1746 WOL_CTL_DIS_PME_ON_PATTERN | \
1747 WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
1748 WOL_CTL_DIS_LINK_CHG_UNIT | \
1749 WOL_CTL_DIS_PATTERN_UNIT | \
1750 WOL_CTL_DIS_MAGIC_PKT_UNIT)
1751
1752/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
1753#define WOL_CTL_PATT_ENA(x) (1 << (x))
1754
1755 1790
1756/* Control flags */ 1791/* Control flags */
1757enum { 1792enum {
@@ -1875,6 +1910,7 @@ struct sky2_port {
1875 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ 1910 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
1876 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ 1911 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
1877 u8 rx_csum; 1912 u8 rx_csum;
1913 u8 wol;
1878 enum flow_control flow_mode; 1914 enum flow_control flow_mode;
1879 enum flow_control flow_status; 1915 enum flow_control flow_status;
1880 1916
@@ -1887,7 +1923,6 @@ struct sky2_hw {
1887 struct pci_dev *pdev; 1923 struct pci_dev *pdev;
1888 struct net_device *dev[2]; 1924 struct net_device *dev[2];
1889 1925
1890 int pm_cap;
1891 u8 chip_id; 1926 u8 chip_id;
1892 u8 chip_rev; 1927 u8 chip_rev;
1893 u8 pmd_type; 1928 u8 pmd_type;
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 8ea2fc1b96cb..bf6ff39e02bb 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -280,72 +280,67 @@ spider_net_free_chain(struct spider_net_card *card,
280{ 280{
281 struct spider_net_descr *descr; 281 struct spider_net_descr *descr;
282 282
283 for (descr = chain->tail; !descr->bus_addr; descr = descr->next) { 283 descr = chain->ring;
284 pci_unmap_single(card->pdev, descr->bus_addr, 284 do {
285 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
286 descr->bus_addr = 0; 285 descr->bus_addr = 0;
287 } 286 descr->next_descr_addr = 0;
287 descr = descr->next;
288 } while (descr != chain->ring);
289
290 dma_free_coherent(&card->pdev->dev, chain->num_desc,
291 chain->ring, chain->dma_addr);
288} 292}
289 293
290/** 294/**
291 * spider_net_init_chain - links descriptor chain 295 * spider_net_init_chain - alloc and link descriptor chain
292 * @card: card structure 296 * @card: card structure
293 * @chain: address of chain 297 * @chain: address of chain
294 * @start_descr: address of descriptor array
295 * @no: number of descriptors
296 * 298 *
297 * we manage a circular list that mirrors the hardware structure, 299 * We manage a circular list that mirrors the hardware structure,
298 * except that the hardware uses bus addresses. 300 * except that the hardware uses bus addresses.
299 * 301 *
300 * returns 0 on success, <0 on failure 302 * Returns 0 on success, <0 on failure
301 */ 303 */
302static int 304static int
303spider_net_init_chain(struct spider_net_card *card, 305spider_net_init_chain(struct spider_net_card *card,
304 struct spider_net_descr_chain *chain, 306 struct spider_net_descr_chain *chain)
305 struct spider_net_descr *start_descr,
306 int no)
307{ 307{
308 int i; 308 int i;
309 struct spider_net_descr *descr; 309 struct spider_net_descr *descr;
310 dma_addr_t buf; 310 dma_addr_t buf;
311 size_t alloc_size;
311 312
312 descr = start_descr; 313 alloc_size = chain->num_desc * sizeof (struct spider_net_descr);
313 memset(descr, 0, sizeof(*descr) * no);
314 314
315 /* set up the hardware pointers in each descriptor */ 315 chain->ring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
316 for (i=0; i<no; i++, descr++) { 316 &chain->dma_addr, GFP_KERNEL);
317 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 317
318 if (!chain->ring)
319 return -ENOMEM;
318 320
319 buf = pci_map_single(card->pdev, descr, 321 descr = chain->ring;
320 SPIDER_NET_DESCR_SIZE, 322 memset(descr, 0, alloc_size);
321 PCI_DMA_BIDIRECTIONAL);
322 323
323 if (pci_dma_mapping_error(buf)) 324 /* Set up the hardware pointers in each descriptor */
324 goto iommu_error; 325 buf = chain->dma_addr;
326 for (i=0; i < chain->num_desc; i++, descr++) {
327 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
325 328
326 descr->bus_addr = buf; 329 descr->bus_addr = buf;
330 descr->next_descr_addr = 0;
327 descr->next = descr + 1; 331 descr->next = descr + 1;
328 descr->prev = descr - 1; 332 descr->prev = descr - 1;
329 333
334 buf += sizeof(struct spider_net_descr);
330 } 335 }
331 /* do actual circular list */ 336 /* do actual circular list */
332 (descr-1)->next = start_descr; 337 (descr-1)->next = chain->ring;
333 start_descr->prev = descr-1; 338 chain->ring->prev = descr-1;
334 339
335 spin_lock_init(&chain->lock); 340 spin_lock_init(&chain->lock);
336 chain->head = start_descr; 341 chain->head = chain->ring;
337 chain->tail = start_descr; 342 chain->tail = chain->ring;
338
339 return 0; 343 return 0;
340
341iommu_error:
342 descr = start_descr;
343 for (i=0; i < no; i++, descr++)
344 if (descr->bus_addr)
345 pci_unmap_single(card->pdev, descr->bus_addr,
346 SPIDER_NET_DESCR_SIZE,
347 PCI_DMA_BIDIRECTIONAL);
348 return -ENOMEM;
349} 344}
350 345
351/** 346/**
@@ -372,21 +367,20 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
372} 367}
373 368
374/** 369/**
375 * spider_net_prepare_rx_descr - reinitializes a rx descriptor 370 * spider_net_prepare_rx_descr - Reinitialize RX descriptor
376 * @card: card structure 371 * @card: card structure
377 * @descr: descriptor to re-init 372 * @descr: descriptor to re-init
378 * 373 *
379 * return 0 on succes, <0 on failure 374 * Return 0 on succes, <0 on failure.
380 * 375 *
381 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. 376 * Allocates a new rx skb, iommu-maps it and attaches it to the
382 * Activate the descriptor state-wise 377 * descriptor. Mark the descriptor as activated, ready-to-use.
383 */ 378 */
384static int 379static int
385spider_net_prepare_rx_descr(struct spider_net_card *card, 380spider_net_prepare_rx_descr(struct spider_net_card *card,
386 struct spider_net_descr *descr) 381 struct spider_net_descr *descr)
387{ 382{
388 dma_addr_t buf; 383 dma_addr_t buf;
389 int error = 0;
390 int offset; 384 int offset;
391 int bufsize; 385 int bufsize;
392 386
@@ -414,7 +408,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
414 (SPIDER_NET_RXBUF_ALIGN - 1); 408 (SPIDER_NET_RXBUF_ALIGN - 1);
415 if (offset) 409 if (offset)
416 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 410 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
417 /* io-mmu-map the skb */ 411 /* iommu-map the skb */
418 buf = pci_map_single(card->pdev, descr->skb->data, 412 buf = pci_map_single(card->pdev, descr->skb->data,
419 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 413 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
420 descr->buf_addr = buf; 414 descr->buf_addr = buf;
@@ -425,11 +419,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
425 card->spider_stats.rx_iommu_map_error++; 419 card->spider_stats.rx_iommu_map_error++;
426 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 420 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
427 } else { 421 } else {
422 descr->next_descr_addr = 0;
423 wmb();
428 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | 424 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
429 SPIDER_NET_DMAC_NOINTR_COMPLETE; 425 SPIDER_NET_DMAC_NOINTR_COMPLETE;
426
427 wmb();
428 descr->prev->next_descr_addr = descr->bus_addr;
430 } 429 }
431 430
432 return error; 431 return 0;
433} 432}
434 433
435/** 434/**
@@ -493,10 +492,10 @@ spider_net_refill_rx_chain(struct spider_net_card *card)
493} 492}
494 493
495/** 494/**
496 * spider_net_alloc_rx_skbs - allocates rx skbs in rx descriptor chains 495 * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
497 * @card: card structure 496 * @card: card structure
498 * 497 *
499 * returns 0 on success, <0 on failure 498 * Returns 0 on success, <0 on failure.
500 */ 499 */
501static int 500static int
502spider_net_alloc_rx_skbs(struct spider_net_card *card) 501spider_net_alloc_rx_skbs(struct spider_net_card *card)
@@ -507,16 +506,16 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
507 result = -ENOMEM; 506 result = -ENOMEM;
508 507
509 chain = &card->rx_chain; 508 chain = &card->rx_chain;
510 /* put at least one buffer into the chain. if this fails, 509 /* Put at least one buffer into the chain. if this fails,
511 * we've got a problem. if not, spider_net_refill_rx_chain 510 * we've got a problem. If not, spider_net_refill_rx_chain
512 * will do the rest at the end of this function */ 511 * will do the rest at the end of this function. */
513 if (spider_net_prepare_rx_descr(card, chain->head)) 512 if (spider_net_prepare_rx_descr(card, chain->head))
514 goto error; 513 goto error;
515 else 514 else
516 chain->head = chain->head->next; 515 chain->head = chain->head->next;
517 516
518 /* this will allocate the rest of the rx buffers; if not, it's 517 /* This will allocate the rest of the rx buffers;
519 * business as usual later on */ 518 * if not, it's business as usual later on. */
520 spider_net_refill_rx_chain(card); 519 spider_net_refill_rx_chain(card);
521 spider_net_enable_rxdmac(card); 520 spider_net_enable_rxdmac(card);
522 return 0; 521 return 0;
@@ -707,7 +706,7 @@ spider_net_set_low_watermark(struct spider_net_card *card)
707 } 706 }
708 707
709 /* If TX queue is short, don't even bother with interrupts */ 708 /* If TX queue is short, don't even bother with interrupts */
710 if (cnt < card->num_tx_desc/4) 709 if (cnt < card->tx_chain.num_desc/4)
711 return cnt; 710 return cnt;
712 711
713 /* Set low-watermark 3/4th's of the way into the queue. */ 712 /* Set low-watermark 3/4th's of the way into the queue. */
@@ -915,16 +914,13 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
915 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on 914 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
916 * @descr: descriptor to process 915 * @descr: descriptor to process
917 * @card: card structure 916 * @card: card structure
918 * @napi: whether caller is in NAPI context
919 *
920 * returns 1 on success, 0 if no packet was passed to the stack
921 * 917 *
922 * iommu-unmaps the skb, fills out skb structure and passes the data to the 918 * Fills out skb structure and passes the data to the stack.
923 * stack. The descriptor state is not changed. 919 * The descriptor state is not changed.
924 */ 920 */
925static int 921static void
926spider_net_pass_skb_up(struct spider_net_descr *descr, 922spider_net_pass_skb_up(struct spider_net_descr *descr,
927 struct spider_net_card *card, int napi) 923 struct spider_net_card *card)
928{ 924{
929 struct sk_buff *skb; 925 struct sk_buff *skb;
930 struct net_device *netdev; 926 struct net_device *netdev;
@@ -932,23 +928,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
932 928
933 data_status = descr->data_status; 929 data_status = descr->data_status;
934 data_error = descr->data_error; 930 data_error = descr->data_error;
935
936 netdev = card->netdev; 931 netdev = card->netdev;
937 932
938 /* unmap descriptor */
939 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
940 PCI_DMA_FROMDEVICE);
941
942 /* the cases we'll throw away the packet immediately */
943 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
944 if (netif_msg_rx_err(card))
945 pr_err("error in received descriptor found, "
946 "data_status=x%08x, data_error=x%08x\n",
947 data_status, data_error);
948 card->spider_stats.rx_desc_error++;
949 return 0;
950 }
951
952 skb = descr->skb; 933 skb = descr->skb;
953 skb->dev = netdev; 934 skb->dev = netdev;
954 skb_put(skb, descr->valid_size); 935 skb_put(skb, descr->valid_size);
@@ -977,57 +958,72 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
977 } 958 }
978 959
979 /* pass skb up to stack */ 960 /* pass skb up to stack */
980 if (napi) 961 netif_receive_skb(skb);
981 netif_receive_skb(skb);
982 else
983 netif_rx_ni(skb);
984 962
985 /* update netdevice statistics */ 963 /* update netdevice statistics */
986 card->netdev_stats.rx_packets++; 964 card->netdev_stats.rx_packets++;
987 card->netdev_stats.rx_bytes += skb->len; 965 card->netdev_stats.rx_bytes += skb->len;
966}
988 967
989 return 1; 968#ifdef DEBUG
969static void show_rx_chain(struct spider_net_card *card)
970{
971 struct spider_net_descr_chain *chain = &card->rx_chain;
972 struct spider_net_descr *start= chain->tail;
973 struct spider_net_descr *descr= start;
974 int status;
975
976 int cnt = 0;
977 int cstat = spider_net_get_descr_status(descr);
978 printk(KERN_INFO "RX chain tail at descr=%ld\n",
979 (start - card->descr) - card->tx_chain.num_desc);
980 status = cstat;
981 do
982 {
983 status = spider_net_get_descr_status(descr);
984 if (cstat != status) {
985 printk(KERN_INFO "Have %d descrs with stat=x%08x\n", cnt, cstat);
986 cstat = status;
987 cnt = 0;
988 }
989 cnt ++;
990 descr = descr->next;
991 } while (descr != start);
992 printk(KERN_INFO "Last %d descrs with stat=x%08x\n", cnt, cstat);
990} 993}
994#endif
991 995
992/** 996/**
993 * spider_net_decode_one_descr - processes an rx descriptor 997 * spider_net_decode_one_descr - processes an rx descriptor
994 * @card: card structure 998 * @card: card structure
995 * @napi: whether caller is in NAPI context
996 * 999 *
997 * returns 1 if a packet has been sent to the stack, otherwise 0 1000 * Returns 1 if a packet has been sent to the stack, otherwise 0
998 * 1001 *
999 * processes an rx descriptor by iommu-unmapping the data buffer and passing 1002 * Processes an rx descriptor by iommu-unmapping the data buffer and passing
1000 * the packet up to the stack. This function is called in softirq 1003 * the packet up to the stack. This function is called in softirq
1001 * context, e.g. either bottom half from interrupt or NAPI polling context 1004 * context, e.g. either bottom half from interrupt or NAPI polling context
1002 */ 1005 */
1003static int 1006static int
1004spider_net_decode_one_descr(struct spider_net_card *card, int napi) 1007spider_net_decode_one_descr(struct spider_net_card *card)
1005{ 1008{
1006 struct spider_net_descr_chain *chain = &card->rx_chain; 1009 struct spider_net_descr_chain *chain = &card->rx_chain;
1007 struct spider_net_descr *descr = chain->tail; 1010 struct spider_net_descr *descr = chain->tail;
1008 int status; 1011 int status;
1009 int result;
1010 1012
1011 status = spider_net_get_descr_status(descr); 1013 status = spider_net_get_descr_status(descr);
1012 1014
1013 if (status == SPIDER_NET_DESCR_CARDOWNED) { 1015 /* Nothing in the descriptor, or ring must be empty */
1014 /* nothing in the descriptor yet */ 1016 if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
1015 result=0; 1017 (status == SPIDER_NET_DESCR_NOT_IN_USE))
1016 goto out; 1018 return 0;
1017 }
1018
1019 if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
1020 /* not initialized yet, the ring must be empty */
1021 spider_net_refill_rx_chain(card);
1022 spider_net_enable_rxdmac(card);
1023 result=0;
1024 goto out;
1025 }
1026 1019
1027 /* descriptor definitively used -- move on tail */ 1020 /* descriptor definitively used -- move on tail */
1028 chain->tail = descr->next; 1021 chain->tail = descr->next;
1029 1022
1030 result = 0; 1023 /* unmap descriptor */
1024 pci_unmap_single(card->pdev, descr->buf_addr,
1025 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1026
1031 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || 1027 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1032 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || 1028 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1033 (status == SPIDER_NET_DESCR_FORCE_END) ) { 1029 (status == SPIDER_NET_DESCR_FORCE_END) ) {
@@ -1035,31 +1031,55 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1035 pr_err("%s: dropping RX descriptor with state %d\n", 1031 pr_err("%s: dropping RX descriptor with state %d\n",
1036 card->netdev->name, status); 1032 card->netdev->name, status);
1037 card->netdev_stats.rx_dropped++; 1033 card->netdev_stats.rx_dropped++;
1038 pci_unmap_single(card->pdev, descr->buf_addr, 1034 goto bad_desc;
1039 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1040 dev_kfree_skb_irq(descr->skb);
1041 goto refill;
1042 } 1035 }
1043 1036
1044 if ( (status != SPIDER_NET_DESCR_COMPLETE) && 1037 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1045 (status != SPIDER_NET_DESCR_FRAME_END) ) { 1038 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1046 if (netif_msg_rx_err(card)) { 1039 if (netif_msg_rx_err(card))
1047 pr_err("%s: RX descriptor with state %d\n", 1040 pr_err("%s: RX descriptor with unkown state %d\n",
1048 card->netdev->name, status); 1041 card->netdev->name, status);
1049 card->spider_stats.rx_desc_unk_state++; 1042 card->spider_stats.rx_desc_unk_state++;
1050 } 1043 goto bad_desc;
1051 goto refill;
1052 } 1044 }
1053 1045
1054 /* ok, we've got a packet in descr */ 1046 /* The cases we'll throw away the packet immediately */
1055 result = spider_net_pass_skb_up(descr, card, napi); 1047 if (descr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1056refill: 1048 if (netif_msg_rx_err(card))
1049 pr_err("%s: error in received descriptor found, "
1050 "data_status=x%08x, data_error=x%08x\n",
1051 card->netdev->name,
1052 descr->data_status, descr->data_error);
1053 goto bad_desc;
1054 }
1055
1056 if (descr->dmac_cmd_status & 0xfefe) {
1057 pr_err("%s: bad status, cmd_status=x%08x\n",
1058 card->netdev->name,
1059 descr->dmac_cmd_status);
1060 pr_err("buf_addr=x%08x\n", descr->buf_addr);
1061 pr_err("buf_size=x%08x\n", descr->buf_size);
1062 pr_err("next_descr_addr=x%08x\n", descr->next_descr_addr);
1063 pr_err("result_size=x%08x\n", descr->result_size);
1064 pr_err("valid_size=x%08x\n", descr->valid_size);
1065 pr_err("data_status=x%08x\n", descr->data_status);
1066 pr_err("data_error=x%08x\n", descr->data_error);
1067 pr_err("bus_addr=x%08x\n", descr->bus_addr);
1068 pr_err("which=%ld\n", descr - card->rx_chain.ring);
1069
1070 card->spider_stats.rx_desc_error++;
1071 goto bad_desc;
1072 }
1073
1074 /* Ok, we've got a packet in descr */
1075 spider_net_pass_skb_up(descr, card);
1057 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1076 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1058 /* change the descriptor state: */ 1077 return 1;
1059 if (!napi) 1078
1060 spider_net_refill_rx_chain(card); 1079bad_desc:
1061out: 1080 dev_kfree_skb_irq(descr->skb);
1062 return result; 1081 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1082 return 0;
1063} 1083}
1064 1084
1065/** 1085/**
@@ -1085,7 +1105,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1085 packets_to_do = min(*budget, netdev->quota); 1105 packets_to_do = min(*budget, netdev->quota);
1086 1106
1087 while (packets_to_do) { 1107 while (packets_to_do) {
1088 if (spider_net_decode_one_descr(card, 1)) { 1108 if (spider_net_decode_one_descr(card)) {
1089 packets_done++; 1109 packets_done++;
1090 packets_to_do--; 1110 packets_to_do--;
1091 } else { 1111 } else {
@@ -1098,6 +1118,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1098 netdev->quota -= packets_done; 1118 netdev->quota -= packets_done;
1099 *budget -= packets_done; 1119 *budget -= packets_done;
1100 spider_net_refill_rx_chain(card); 1120 spider_net_refill_rx_chain(card);
1121 spider_net_enable_rxdmac(card);
1101 1122
1102 /* if all packets are in the stack, enable interrupts and return 0 */ 1123 /* if all packets are in the stack, enable interrupts and return 0 */
1103 /* if not, return 1 */ 1124 /* if not, return 1 */
@@ -1227,24 +1248,6 @@ spider_net_set_mac(struct net_device *netdev, void *p)
1227} 1248}
1228 1249
1229/** 1250/**
1230 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1231 * @card: card structure
1232 *
1233 * spider_net_handle_rxram_full empties the RX ring so that spider can put
1234 * more packets in it and empty its RX RAM. This is called in bottom half
1235 * context
1236 */
1237static void
1238spider_net_handle_rxram_full(struct spider_net_card *card)
1239{
1240 while (spider_net_decode_one_descr(card, 0))
1241 ;
1242 spider_net_enable_rxchtails(card);
1243 spider_net_enable_rxdmac(card);
1244 netif_rx_schedule(card->netdev);
1245}
1246
1247/**
1248 * spider_net_handle_error_irq - handles errors raised by an interrupt 1251 * spider_net_handle_error_irq - handles errors raised by an interrupt
1249 * @card: card structure 1252 * @card: card structure
1250 * @status_reg: interrupt status register 0 (GHIINT0STS) 1253 * @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1366,10 +1369,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1366 case SPIDER_NET_GRFAFLLINT: /* fallthrough */ 1369 case SPIDER_NET_GRFAFLLINT: /* fallthrough */
1367 case SPIDER_NET_GRMFLLINT: 1370 case SPIDER_NET_GRMFLLINT:
1368 if (netif_msg_intr(card) && net_ratelimit()) 1371 if (netif_msg_intr(card) && net_ratelimit())
1369 pr_debug("Spider RX RAM full, incoming packets " 1372 pr_err("Spider RX RAM full, incoming packets "
1370 "might be discarded!\n"); 1373 "might be discarded!\n");
1371 spider_net_rx_irq_off(card); 1374 spider_net_rx_irq_off(card);
1372 tasklet_schedule(&card->rxram_full_tl); 1375 netif_rx_schedule(card->netdev);
1373 show_error = 0; 1376 show_error = 0;
1374 break; 1377 break;
1375 1378
@@ -1384,7 +1387,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1384 case SPIDER_NET_GDCDCEINT: /* fallthrough */ 1387 case SPIDER_NET_GDCDCEINT: /* fallthrough */
1385 case SPIDER_NET_GDBDCEINT: /* fallthrough */ 1388 case SPIDER_NET_GDBDCEINT: /* fallthrough */
1386 case SPIDER_NET_GDADCEINT: 1389 case SPIDER_NET_GDADCEINT:
1387 if (netif_msg_intr(card)) 1390 if (netif_msg_intr(card) && net_ratelimit())
1388 pr_err("got descriptor chain end interrupt, " 1391 pr_err("got descriptor chain end interrupt, "
1389 "restarting DMAC %c.\n", 1392 "restarting DMAC %c.\n",
1390 'D'-(i-SPIDER_NET_GDDDCEINT)/3); 1393 'D'-(i-SPIDER_NET_GDDDCEINT)/3);
@@ -1455,7 +1458,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1455 break; 1458 break;
1456 } 1459 }
1457 1460
1458 if ((show_error) && (netif_msg_intr(card))) 1461 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
1459 pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, " 1462 pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
1460 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", 1463 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1461 card->netdev->name, 1464 card->netdev->name,
@@ -1651,27 +1654,18 @@ int
1651spider_net_open(struct net_device *netdev) 1654spider_net_open(struct net_device *netdev)
1652{ 1655{
1653 struct spider_net_card *card = netdev_priv(netdev); 1656 struct spider_net_card *card = netdev_priv(netdev);
1654 struct spider_net_descr *descr; 1657 int result;
1655 int i, result;
1656 1658
1657 result = -ENOMEM; 1659 result = spider_net_init_chain(card, &card->tx_chain);
1658 if (spider_net_init_chain(card, &card->tx_chain, card->descr, 1660 if (result)
1659 card->num_tx_desc))
1660 goto alloc_tx_failed; 1661 goto alloc_tx_failed;
1661
1662 card->low_watermark = NULL; 1662 card->low_watermark = NULL;
1663 1663
1664 /* rx_chain is after tx_chain, so offset is descr + tx_count */ 1664 result = spider_net_init_chain(card, &card->rx_chain);
1665 if (spider_net_init_chain(card, &card->rx_chain, 1665 if (result)
1666 card->descr + card->num_tx_desc,
1667 card->num_rx_desc))
1668 goto alloc_rx_failed; 1666 goto alloc_rx_failed;
1669 1667
1670 descr = card->rx_chain.head; 1668 /* Allocate rx skbs */
1671 for (i=0; i < card->num_rx_desc; i++, descr++)
1672 descr->next_descr_addr = descr->next->bus_addr;
1673
1674 /* allocate rx skbs */
1675 if (spider_net_alloc_rx_skbs(card)) 1669 if (spider_net_alloc_rx_skbs(card))
1676 goto alloc_skbs_failed; 1670 goto alloc_skbs_failed;
1677 1671
@@ -1902,7 +1896,6 @@ spider_net_stop(struct net_device *netdev)
1902{ 1896{
1903 struct spider_net_card *card = netdev_priv(netdev); 1897 struct spider_net_card *card = netdev_priv(netdev);
1904 1898
1905 tasklet_kill(&card->rxram_full_tl);
1906 netif_poll_disable(netdev); 1899 netif_poll_disable(netdev);
1907 netif_carrier_off(netdev); 1900 netif_carrier_off(netdev);
1908 netif_stop_queue(netdev); 1901 netif_stop_queue(netdev);
@@ -1924,6 +1917,7 @@ spider_net_stop(struct net_device *netdev)
1924 1917
1925 /* release chains */ 1918 /* release chains */
1926 spider_net_release_tx_chain(card, 1); 1919 spider_net_release_tx_chain(card, 1);
1920 spider_net_free_rx_chain_contents(card);
1927 1921
1928 spider_net_free_rx_chain_contents(card); 1922 spider_net_free_rx_chain_contents(card);
1929 1923
@@ -2046,9 +2040,6 @@ spider_net_setup_netdev(struct spider_net_card *card)
2046 2040
2047 pci_set_drvdata(card->pdev, netdev); 2041 pci_set_drvdata(card->pdev, netdev);
2048 2042
2049 card->rxram_full_tl.data = (unsigned long) card;
2050 card->rxram_full_tl.func =
2051 (void (*)(unsigned long)) spider_net_handle_rxram_full;
2052 init_timer(&card->tx_timer); 2043 init_timer(&card->tx_timer);
2053 card->tx_timer.function = 2044 card->tx_timer.function =
2054 (void (*)(unsigned long)) spider_net_cleanup_tx_ring; 2045 (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
@@ -2057,8 +2048,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
2057 2048
2058 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2049 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2059 2050
2060 card->num_tx_desc = tx_descriptors; 2051 card->tx_chain.num_desc = tx_descriptors;
2061 card->num_rx_desc = rx_descriptors; 2052 card->rx_chain.num_desc = rx_descriptors;
2062 2053
2063 spider_net_setup_netdev_ops(netdev); 2054 spider_net_setup_netdev_ops(netdev);
2064 2055
@@ -2107,12 +2098,8 @@ spider_net_alloc_card(void)
2107{ 2098{
2108 struct net_device *netdev; 2099 struct net_device *netdev;
2109 struct spider_net_card *card; 2100 struct spider_net_card *card;
2110 size_t alloc_size;
2111 2101
2112 alloc_size = sizeof (*card) + 2102 netdev = alloc_etherdev(sizeof(struct spider_net_card));
2113 sizeof (struct spider_net_descr) * rx_descriptors +
2114 sizeof (struct spider_net_descr) * tx_descriptors;
2115 netdev = alloc_etherdev(alloc_size);
2116 if (!netdev) 2103 if (!netdev)
2117 return NULL; 2104 return NULL;
2118 2105
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 3e196df29790..2fec5cf76926 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -24,7 +24,7 @@
24#ifndef _SPIDER_NET_H 24#ifndef _SPIDER_NET_H
25#define _SPIDER_NET_H 25#define _SPIDER_NET_H
26 26
27#define VERSION "1.6 A" 27#define VERSION "1.6 B"
28 28
29#include "sungem_phy.h" 29#include "sungem_phy.h"
30 30
@@ -378,6 +378,9 @@ struct spider_net_descr_chain {
378 spinlock_t lock; 378 spinlock_t lock;
379 struct spider_net_descr *head; 379 struct spider_net_descr *head;
380 struct spider_net_descr *tail; 380 struct spider_net_descr *tail;
381 struct spider_net_descr *ring;
382 int num_desc;
383 dma_addr_t dma_addr;
381}; 384};
382 385
383/* descriptor data_status bits */ 386/* descriptor data_status bits */
@@ -397,8 +400,6 @@ struct spider_net_descr_chain {
397 * 701b8000 would be correct, but every packets gets that flag */ 400 * 701b8000 would be correct, but every packets gets that flag */
398#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000 401#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
399 402
400#define SPIDER_NET_DESCR_SIZE 32
401
402/* this will be bigger some time */ 403/* this will be bigger some time */
403struct spider_net_options { 404struct spider_net_options {
404 int rx_csum; /* for rx: if 0 ip_summed=NONE, 405 int rx_csum; /* for rx: if 0 ip_summed=NONE,
@@ -441,25 +442,16 @@ struct spider_net_card {
441 struct spider_net_descr_chain rx_chain; 442 struct spider_net_descr_chain rx_chain;
442 struct spider_net_descr *low_watermark; 443 struct spider_net_descr *low_watermark;
443 444
444 struct net_device_stats netdev_stats;
445
446 struct spider_net_options options;
447
448 spinlock_t intmask_lock;
449 struct tasklet_struct rxram_full_tl;
450 struct timer_list tx_timer; 445 struct timer_list tx_timer;
451
452 struct work_struct tx_timeout_task; 446 struct work_struct tx_timeout_task;
453 atomic_t tx_timeout_task_counter; 447 atomic_t tx_timeout_task_counter;
454 wait_queue_head_t waitq; 448 wait_queue_head_t waitq;
455 449
456 /* for ethtool */ 450 /* for ethtool */
457 int msg_enable; 451 int msg_enable;
458 int num_rx_desc; 452 struct net_device_stats netdev_stats;
459 int num_tx_desc;
460 struct spider_net_extra_stats spider_stats; 453 struct spider_net_extra_stats spider_stats;
461 454 struct spider_net_options options;
462 struct spider_net_descr descr[0];
463}; 455};
464 456
465#define pr_err(fmt,arg...) \ 457#define pr_err(fmt,arg...) \
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index 91b995102915..6bcf03fc89be 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -158,9 +158,9 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev,
158 struct spider_net_card *card = netdev->priv; 158 struct spider_net_card *card = netdev->priv;
159 159
160 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; 160 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
161 ering->tx_pending = card->num_tx_desc; 161 ering->tx_pending = card->tx_chain.num_desc;
162 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; 162 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
163 ering->rx_pending = card->num_rx_desc; 163 ering->rx_pending = card->rx_chain.num_desc;
164} 164}
165 165
166static int spider_net_get_stats_count(struct net_device *netdev) 166static int spider_net_get_stats_count(struct net_device *netdev)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f4bf62c2a7a5..135c0987deae 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -58,11 +58,7 @@
58#define TG3_VLAN_TAG_USED 0 58#define TG3_VLAN_TAG_USED 0
59#endif 59#endif
60 60
61#ifdef NETIF_F_TSO
62#define TG3_TSO_SUPPORT 1 61#define TG3_TSO_SUPPORT 1
63#else
64#define TG3_TSO_SUPPORT 0
65#endif
66 62
67#include "tg3.h" 63#include "tg3.h"
68 64
@@ -3873,7 +3869,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3873 3869
3874 entry = tp->tx_prod; 3870 entry = tp->tx_prod;
3875 base_flags = 0; 3871 base_flags = 0;
3876#if TG3_TSO_SUPPORT != 0
3877 mss = 0; 3872 mss = 0;
3878 if (skb->len > (tp->dev->mtu + ETH_HLEN) && 3873 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3879 (mss = skb_shinfo(skb)->gso_size) != 0) { 3874 (mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -3906,11 +3901,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3906 } 3901 }
3907 else if (skb->ip_summed == CHECKSUM_PARTIAL) 3902 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3908 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3903 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3909#else
3910 mss = 0;
3911 if (skb->ip_summed == CHECKSUM_PARTIAL)
3912 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3913#endif
3914#if TG3_VLAN_TAG_USED 3904#if TG3_VLAN_TAG_USED
3915 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 3905 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3916 base_flags |= (TXD_FLAG_VLAN | 3906 base_flags |= (TXD_FLAG_VLAN |
@@ -3970,7 +3960,6 @@ out_unlock:
3970 return NETDEV_TX_OK; 3960 return NETDEV_TX_OK;
3971} 3961}
3972 3962
3973#if TG3_TSO_SUPPORT != 0
3974static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); 3963static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3975 3964
3976/* Use GSO to workaround a rare TSO bug that may be triggered when the 3965/* Use GSO to workaround a rare TSO bug that may be triggered when the
@@ -4002,7 +3991,6 @@ tg3_tso_bug_end:
4002 3991
4003 return NETDEV_TX_OK; 3992 return NETDEV_TX_OK;
4004} 3993}
4005#endif
4006 3994
4007/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 3995/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4008 * support TG3_FLG2_HW_TSO_1 or firmware TSO only. 3996 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
@@ -4036,7 +4024,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4036 base_flags = 0; 4024 base_flags = 0;
4037 if (skb->ip_summed == CHECKSUM_PARTIAL) 4025 if (skb->ip_summed == CHECKSUM_PARTIAL)
4038 base_flags |= TXD_FLAG_TCPUDP_CSUM; 4026 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4039#if TG3_TSO_SUPPORT != 0
4040 mss = 0; 4027 mss = 0;
4041 if (skb->len > (tp->dev->mtu + ETH_HLEN) && 4028 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4042 (mss = skb_shinfo(skb)->gso_size) != 0) { 4029 (mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -4091,9 +4078,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4091 } 4078 }
4092 } 4079 }
4093 } 4080 }
4094#else
4095 mss = 0;
4096#endif
4097#if TG3_VLAN_TAG_USED 4081#if TG3_VLAN_TAG_USED
4098 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 4082 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4099 base_flags |= (TXD_FLAG_VLAN | 4083 base_flags |= (TXD_FLAG_VLAN |
@@ -5329,7 +5313,6 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5329 return 0; 5313 return 0;
5330} 5314}
5331 5315
5332#if TG3_TSO_SUPPORT != 0
5333 5316
5334#define TG3_TSO_FW_RELEASE_MAJOR 0x1 5317#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5335#define TG3_TSO_FW_RELASE_MINOR 0x6 5318#define TG3_TSO_FW_RELASE_MINOR 0x6
@@ -5906,7 +5889,6 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
5906 return 0; 5889 return 0;
5907} 5890}
5908 5891
5909#endif /* TG3_TSO_SUPPORT != 0 */
5910 5892
5911/* tp->lock is held. */ 5893/* tp->lock is held. */
5912static void __tg3_set_mac_addr(struct tg3 *tp) 5894static void __tg3_set_mac_addr(struct tg3 *tp)
@@ -6120,7 +6102,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6120 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 6102 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6121 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 6103 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6122 } 6104 }
6123#if TG3_TSO_SUPPORT != 0
6124 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 6105 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6125 int fw_len; 6106 int fw_len;
6126 6107
@@ -6135,7 +6116,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6135 tw32(BUFMGR_MB_POOL_SIZE, 6116 tw32(BUFMGR_MB_POOL_SIZE,
6136 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 6117 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6137 } 6118 }
6138#endif
6139 6119
6140 if (tp->dev->mtu <= ETH_DATA_LEN) { 6120 if (tp->dev->mtu <= ETH_DATA_LEN) {
6141 tw32(BUFMGR_MB_RDMA_LOW_WATER, 6121 tw32(BUFMGR_MB_RDMA_LOW_WATER,
@@ -6337,10 +6317,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6337 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 6317 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6338 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 6318 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6339 6319
6340#if TG3_TSO_SUPPORT != 0
6341 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 6320 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6342 rdmac_mode |= (1 << 27); 6321 rdmac_mode |= (1 << 27);
6343#endif
6344 6322
6345 /* Receive/send statistics. */ 6323 /* Receive/send statistics. */
6346 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 6324 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
@@ -6511,10 +6489,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6511 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 6489 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6512 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); 6490 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6513 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 6491 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6514#if TG3_TSO_SUPPORT != 0
6515 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 6492 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6516 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 6493 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6517#endif
6518 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE); 6494 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6519 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 6495 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6520 6496
@@ -6524,13 +6500,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6524 return err; 6500 return err;
6525 } 6501 }
6526 6502
6527#if TG3_TSO_SUPPORT != 0
6528 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 6503 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6529 err = tg3_load_tso_firmware(tp); 6504 err = tg3_load_tso_firmware(tp);
6530 if (err) 6505 if (err)
6531 return err; 6506 return err;
6532 } 6507 }
6533#endif
6534 6508
6535 tp->tx_mode = TX_MODE_ENABLE; 6509 tp->tx_mode = TX_MODE_ENABLE;
6536 tw32_f(MAC_TX_MODE, tp->tx_mode); 6510 tw32_f(MAC_TX_MODE, tp->tx_mode);
@@ -8062,7 +8036,6 @@ static void tg3_set_msglevel(struct net_device *dev, u32 value)
8062 tp->msg_enable = value; 8036 tp->msg_enable = value;
8063} 8037}
8064 8038
8065#if TG3_TSO_SUPPORT != 0
8066static int tg3_set_tso(struct net_device *dev, u32 value) 8039static int tg3_set_tso(struct net_device *dev, u32 value)
8067{ 8040{
8068 struct tg3 *tp = netdev_priv(dev); 8041 struct tg3 *tp = netdev_priv(dev);
@@ -8081,7 +8054,6 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
8081 } 8054 }
8082 return ethtool_op_set_tso(dev, value); 8055 return ethtool_op_set_tso(dev, value);
8083} 8056}
8084#endif
8085 8057
8086static int tg3_nway_reset(struct net_device *dev) 8058static int tg3_nway_reset(struct net_device *dev)
8087{ 8059{
@@ -9212,10 +9184,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
9212 .set_tx_csum = tg3_set_tx_csum, 9184 .set_tx_csum = tg3_set_tx_csum,
9213 .get_sg = ethtool_op_get_sg, 9185 .get_sg = ethtool_op_get_sg,
9214 .set_sg = ethtool_op_set_sg, 9186 .set_sg = ethtool_op_set_sg,
9215#if TG3_TSO_SUPPORT != 0
9216 .get_tso = ethtool_op_get_tso, 9187 .get_tso = ethtool_op_get_tso,
9217 .set_tso = tg3_set_tso, 9188 .set_tso = tg3_set_tso,
9218#endif
9219 .self_test_count = tg3_get_test_count, 9189 .self_test_count = tg3_get_test_count,
9220 .self_test = tg3_self_test, 9190 .self_test = tg3_self_test,
9221 .get_strings = tg3_get_strings, 9191 .get_strings = tg3_get_strings,
@@ -11856,7 +11826,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11856 11826
11857 tg3_init_bufmgr_config(tp); 11827 tg3_init_bufmgr_config(tp);
11858 11828
11859#if TG3_TSO_SUPPORT != 0
11860 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 11829 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11861 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 11830 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11862 } 11831 }
@@ -11881,7 +11850,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11881 dev->features |= NETIF_F_TSO6; 11850 dev->features |= NETIF_F_TSO6;
11882 } 11851 }
11883 11852
11884#endif
11885 11853
11886 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 11854 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11887 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 11855 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 7e4b23c7c1ba..abb8611c5a91 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2865,8 +2865,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2865 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2865 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2866 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2866 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2867 ugeth->tx_bd_ring_offset[j] = 2867 ugeth->tx_bd_ring_offset[j] =
2868 (u32) (kmalloc((u32) (length + align), 2868 kmalloc((u32) (length + align), GFP_KERNEL);
2869 GFP_KERNEL)); 2869
2870 if (ugeth->tx_bd_ring_offset[j] != 0) 2870 if (ugeth->tx_bd_ring_offset[j] != 0)
2871 ugeth->p_tx_bd_ring[j] = 2871 ugeth->p_tx_bd_ring[j] =
2872 (void*)((ugeth->tx_bd_ring_offset[j] + 2872 (void*)((ugeth->tx_bd_ring_offset[j] +
@@ -2901,7 +2901,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2901 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2901 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2902 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2902 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2903 ugeth->rx_bd_ring_offset[j] = 2903 ugeth->rx_bd_ring_offset[j] =
2904 (u32) (kmalloc((u32) (length + align), GFP_KERNEL)); 2904 kmalloc((u32) (length + align), GFP_KERNEL);
2905 if (ugeth->rx_bd_ring_offset[j] != 0) 2905 if (ugeth->rx_bd_ring_offset[j] != 0)
2906 ugeth->p_rx_bd_ring[j] = 2906 ugeth->p_rx_bd_ring[j] =
2907 (void*)((ugeth->rx_bd_ring_offset[j] + 2907 (void*)((ugeth->rx_bd_ring_offset[j] +
@@ -2927,10 +2927,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2927 /* Init Tx bds */ 2927 /* Init Tx bds */
2928 for (j = 0; j < ug_info->numQueuesTx; j++) { 2928 for (j = 0; j < ug_info->numQueuesTx; j++) {
2929 /* Setup the skbuff rings */ 2929 /* Setup the skbuff rings */
2930 ugeth->tx_skbuff[j] = 2930 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2931 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * 2931 ugeth->ug_info->bdRingLenTx[j],
2932 ugeth->ug_info->bdRingLenTx[j], 2932 GFP_KERNEL);
2933 GFP_KERNEL);
2934 2933
2935 if (ugeth->tx_skbuff[j] == NULL) { 2934 if (ugeth->tx_skbuff[j] == NULL) {
2936 ugeth_err("%s: Could not allocate tx_skbuff", 2935 ugeth_err("%s: Could not allocate tx_skbuff",
@@ -2959,10 +2958,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2959 /* Init Rx bds */ 2958 /* Init Rx bds */
2960 for (j = 0; j < ug_info->numQueuesRx; j++) { 2959 for (j = 0; j < ug_info->numQueuesRx; j++) {
2961 /* Setup the skbuff rings */ 2960 /* Setup the skbuff rings */
2962 ugeth->rx_skbuff[j] = 2961 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2963 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * 2962 ugeth->ug_info->bdRingLenRx[j],
2964 ugeth->ug_info->bdRingLenRx[j], 2963 GFP_KERNEL);
2965 GFP_KERNEL);
2966 2964
2967 if (ugeth->rx_skbuff[j] == NULL) { 2965 if (ugeth->rx_skbuff[j] == NULL) {
2968 ugeth_err("%s: Could not allocate rx_skbuff", 2966 ugeth_err("%s: Could not allocate rx_skbuff",
@@ -3453,8 +3451,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3453 * allocated resources can be released when the channel is freed. 3451 * allocated resources can be released when the channel is freed.
3454 */ 3452 */
3455 if (!(ugeth->p_init_enet_param_shadow = 3453 if (!(ugeth->p_init_enet_param_shadow =
3456 (struct ucc_geth_init_pram *) kmalloc(sizeof(struct ucc_geth_init_pram), 3454 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
3457 GFP_KERNEL))) {
3458 ugeth_err 3455 ugeth_err
3459 ("%s: Can not allocate memory for" 3456 ("%s: Can not allocate memory for"
3460 " p_UccInitEnetParamShadows.", __FUNCTION__); 3457 " p_UccInitEnetParamShadows.", __FUNCTION__);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 21f76f51c95e..61708cf4c85d 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -235,6 +235,19 @@ comment "Cyclades-PC300 MLPPP support is disabled."
235comment "Refer to the file README.mlppp, provided by PC300 package." 235comment "Refer to the file README.mlppp, provided by PC300 package."
236 depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP) 236 depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
237 237
238config PC300TOO
239 tristate "Cyclades PC300 RSV/X21 alternative support"
240 depends on HDLC && PCI
241 help
242 Alternative driver for PC300 RSV/X21 PCI cards made by
243 Cyclades, Inc. If you have such a card, say Y here and see
244 <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
245
246 To compile this as a module, choose M here: the module
247 will be called pc300too.
248
249 If unsure, say N here.
250
238config N2 251config N2
239 tristate "SDL RISCom/N2 support" 252 tristate "SDL RISCom/N2 support"
240 depends on HDLC && ISA 253 depends on HDLC && ISA
@@ -344,17 +357,6 @@ config DLCI
344 To compile this driver as a module, choose M here: the 357 To compile this driver as a module, choose M here: the
345 module will be called dlci. 358 module will be called dlci.
346 359
347config DLCI_COUNT
348 int "Max open DLCI"
349 depends on DLCI
350 default "24"
351 help
352 Maximal number of logical point-to-point frame relay connections
353 (the identifiers of which are called DCLIs) that the driver can
354 handle.
355
356 The default is probably fine.
357
358config DLCI_MAX 360config DLCI_MAX
359 int "Max DLCI per device" 361 int "Max DLCI per device"
360 depends on DLCI 362 depends on DLCI
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 83ec2c87ba3f..d61fef36afc9 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_N2) += n2.o
41obj-$(CONFIG_C101) += c101.o 41obj-$(CONFIG_C101) += c101.o
42obj-$(CONFIG_WANXL) += wanxl.o 42obj-$(CONFIG_WANXL) += wanxl.o
43obj-$(CONFIG_PCI200SYN) += pci200syn.o 43obj-$(CONFIG_PCI200SYN) += pci200syn.o
44obj-$(CONFIG_PC300TOO) += pc300too.o
44 45
45clean-files := wanxlfw.inc 46clean-files := wanxlfw.inc
46$(obj)/wanxl.o: $(obj)/wanxlfw.inc 47$(obj)/wanxl.o: $(obj)/wanxlfw.inc
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index db354e0edbe5..9040d7cf651e 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -222,7 +222,7 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
222 return -EINVAL; 222 return -EINVAL;
223} 223}
224 224
225void hdlc_setup(struct net_device *dev) 225static void hdlc_setup(struct net_device *dev)
226{ 226{
227 hdlc_device *hdlc = dev_to_hdlc(dev); 227 hdlc_device *hdlc = dev_to_hdlc(dev);
228 228
@@ -325,7 +325,6 @@ MODULE_LICENSE("GPL v2");
325EXPORT_SYMBOL(hdlc_open); 325EXPORT_SYMBOL(hdlc_open);
326EXPORT_SYMBOL(hdlc_close); 326EXPORT_SYMBOL(hdlc_close);
327EXPORT_SYMBOL(hdlc_ioctl); 327EXPORT_SYMBOL(hdlc_ioctl);
328EXPORT_SYMBOL(hdlc_setup);
329EXPORT_SYMBOL(alloc_hdlcdev); 328EXPORT_SYMBOL(alloc_hdlcdev);
330EXPORT_SYMBOL(unregister_hdlc_device); 329EXPORT_SYMBOL(unregister_hdlc_device);
331EXPORT_SYMBOL(register_hdlc_protocol); 330EXPORT_SYMBOL(register_hdlc_protocol);
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
new file mode 100644
index 000000000000..79b2d5454d6b
--- /dev/null
+++ b/drivers/net/wan/pc300too.c
@@ -0,0 +1,565 @@
1/*
2 * Cyclades PC300 synchronous serial card driver for Linux
3 *
4 * Copyright (C) 2000-2007 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
11 *
12 * Sources of information:
13 * Hitachi HD64572 SCA-II User's Manual
14 * Cyclades PC300 Linux driver
15 *
16 * This driver currently supports only PC300/RSV (V.24/V.35) and
17 * PC300/X21 cards.
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/types.h>
25#include <linux/fcntl.h>
26#include <linux/in.h>
27#include <linux/string.h>
28#include <linux/errno.h>
29#include <linux/init.h>
30#include <linux/ioport.h>
31#include <linux/moduleparam.h>
32#include <linux/netdevice.h>
33#include <linux/hdlc.h>
34#include <linux/pci.h>
35#include <linux/delay.h>
36#include <asm/io.h>
37
38#include "hd64572.h"
39
40static const char* version = "Cyclades PC300 driver version: 1.17";
41static const char* devname = "PC300";
42
43#undef DEBUG_PKT
44#define DEBUG_RINGS
45
46#define PC300_PLX_SIZE 0x80 /* PLX control window size (128 B) */
47#define PC300_SCA_SIZE 0x400 /* SCA window size (1 KB) */
48#define ALL_PAGES_ALWAYS_MAPPED
49#define NEED_DETECT_RAM
50#define NEED_SCA_MSCI_INTR
51#define MAX_TX_BUFFERS 10
52
53static int pci_clock_freq = 33000000;
54static int use_crystal_clock = 0;
55static unsigned int CLOCK_BASE;
56
57/* Masks to access the init_ctrl PLX register */
58#define PC300_CLKSEL_MASK (0x00000004UL)
59#define PC300_CHMEDIA_MASK(port) (0x00000020UL << ((port) * 3))
60#define PC300_CTYPE_MASK (0x00000800UL)
61
62
63enum { PC300_RSV = 1, PC300_X21, PC300_TE }; /* card types */
64
65/*
66 * PLX PCI9050-1 local configuration and shared runtime registers.
67 * This structure can be used to access 9050 registers (memory mapped).
68 */
69typedef struct {
70 u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
71 u32 loc_rom_range; /* 10h : Local ROM Range */
72 u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
73 u32 loc_rom_base; /* 24h : Local ROM Base */
74 u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
75 u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
76 u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
77 u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
78 u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
79}plx9050;
80
81
82
83typedef struct port_s {
84 struct net_device *dev;
85 struct card_s *card;
86 spinlock_t lock; /* TX lock */
87 sync_serial_settings settings;
88 int rxpart; /* partial frame received, next frame invalid*/
89 unsigned short encoding;
90 unsigned short parity;
91 unsigned int iface;
92 u16 rxin; /* rx ring buffer 'in' pointer */
93 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
94 u16 txlast;
95 u8 rxs, txs, tmc; /* SCA registers */
96 u8 phy_node; /* physical port # - 0 or 1 */
97}port_t;
98
99
100
101typedef struct card_s {
102 int type; /* RSV, X21, etc. */
103 int n_ports; /* 1 or 2 ports */
104 u8* __iomem rambase; /* buffer memory base (virtual) */
105 u8* __iomem scabase; /* SCA memory base (virtual) */
106 plx9050 __iomem *plxbase; /* PLX registers memory base (virtual) */
107 u32 init_ctrl_value; /* Saved value - 9050 bug workaround */
108 u16 rx_ring_buffers; /* number of buffers in a ring */
109 u16 tx_ring_buffers;
110 u16 buff_offset; /* offset of first buffer of first channel */
111 u8 irq; /* interrupt request level */
112
113 port_t ports[2];
114}card_t;
115
116
117#define sca_in(reg, card) readb(card->scabase + (reg))
118#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
119#define sca_inw(reg, card) readw(card->scabase + (reg))
120#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
121#define sca_inl(reg, card) readl(card->scabase + (reg))
122#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
123
124#define port_to_card(port) (port->card)
125#define log_node(port) (port->phy_node)
126#define phy_node(port) (port->phy_node)
127#define winbase(card) (card->rambase)
128#define get_port(card, port) ((port) < (card)->n_ports ? \
129 (&(card)->ports[port]) : (NULL))
130
131#include "hd6457x.c"
132
133
134static void pc300_set_iface(port_t *port)
135{
136 card_t *card = port->card;
137 u32* init_ctrl = &card->plxbase->init_ctrl;
138 u16 msci = get_msci(port);
139 u8 rxs = port->rxs & CLK_BRG_MASK;
140 u8 txs = port->txs & CLK_BRG_MASK;
141
142 sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
143 port_to_card(port));
144 switch(port->settings.clock_type) {
145 case CLOCK_INT:
146 rxs |= CLK_BRG; /* BRG output */
147 txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
148 break;
149
150 case CLOCK_TXINT:
151 rxs |= CLK_LINE; /* RXC input */
152 txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
153 break;
154
155 case CLOCK_TXFROMRX:
156 rxs |= CLK_LINE; /* RXC input */
157 txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
158 break;
159
160 default: /* EXTernal clock */
161 rxs |= CLK_LINE; /* RXC input */
162 txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
163 break;
164 }
165
166 port->rxs = rxs;
167 port->txs = txs;
168 sca_out(rxs, msci + RXS, card);
169 sca_out(txs, msci + TXS, card);
170 sca_set_port(port);
171
172 if (port->card->type == PC300_RSV) {
173 if (port->iface == IF_IFACE_V35)
174 writel(card->init_ctrl_value |
175 PC300_CHMEDIA_MASK(port->phy_node), init_ctrl);
176 else
177 writel(card->init_ctrl_value &
178 ~PC300_CHMEDIA_MASK(port->phy_node), init_ctrl);
179 }
180}
181
182
183
184static int pc300_open(struct net_device *dev)
185{
186 port_t *port = dev_to_port(dev);
187
188 int result = hdlc_open(dev);
189 if (result)
190 return result;
191
192 sca_open(dev);
193 pc300_set_iface(port);
194 return 0;
195}
196
197
198
199static int pc300_close(struct net_device *dev)
200{
201 sca_close(dev);
202 hdlc_close(dev);
203 return 0;
204}
205
206
207
208static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
209{
210 const size_t size = sizeof(sync_serial_settings);
211 sync_serial_settings new_line;
212 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
213 int new_type;
214 port_t *port = dev_to_port(dev);
215
216#ifdef DEBUG_RINGS
217 if (cmd == SIOCDEVPRIVATE) {
218 sca_dump_rings(dev);
219 return 0;
220 }
221#endif
222 if (cmd != SIOCWANDEV)
223 return hdlc_ioctl(dev, ifr, cmd);
224
225 if (ifr->ifr_settings.type == IF_GET_IFACE) {
226 ifr->ifr_settings.type = port->iface;
227 if (ifr->ifr_settings.size < size) {
228 ifr->ifr_settings.size = size; /* data size wanted */
229 return -ENOBUFS;
230 }
231 if (copy_to_user(line, &port->settings, size))
232 return -EFAULT;
233 return 0;
234
235 }
236
237 if (port->card->type == PC300_X21 &&
238 (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
239 ifr->ifr_settings.type == IF_IFACE_X21))
240 new_type = IF_IFACE_X21;
241
242 else if (port->card->type == PC300_RSV &&
243 (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
244 ifr->ifr_settings.type == IF_IFACE_V35))
245 new_type = IF_IFACE_V35;
246
247 else if (port->card->type == PC300_RSV &&
248 ifr->ifr_settings.type == IF_IFACE_V24)
249 new_type = IF_IFACE_V24;
250
251 else
252 return hdlc_ioctl(dev, ifr, cmd);
253
254 if (!capable(CAP_NET_ADMIN))
255 return -EPERM;
256
257 if (copy_from_user(&new_line, line, size))
258 return -EFAULT;
259
260 if (new_line.clock_type != CLOCK_EXT &&
261 new_line.clock_type != CLOCK_TXFROMRX &&
262 new_line.clock_type != CLOCK_INT &&
263 new_line.clock_type != CLOCK_TXINT)
264 return -EINVAL; /* No such clock setting */
265
266 if (new_line.loopback != 0 && new_line.loopback != 1)
267 return -EINVAL;
268
269 memcpy(&port->settings, &new_line, size); /* Update settings */
270 port->iface = new_type;
271 pc300_set_iface(port);
272 return 0;
273}
274
275
276
277static void pc300_pci_remove_one(struct pci_dev *pdev)
278{
279 int i;
280 card_t *card = pci_get_drvdata(pdev);
281
282 for (i = 0; i < 2; i++)
283 if (card->ports[i].card) {
284 struct net_device *dev = port_to_dev(&card->ports[i]);
285 unregister_hdlc_device(dev);
286 }
287
288 if (card->irq)
289 free_irq(card->irq, card);
290
291 if (card->rambase)
292 iounmap(card->rambase);
293 if (card->scabase)
294 iounmap(card->scabase);
295 if (card->plxbase)
296 iounmap(card->plxbase);
297
298 pci_release_regions(pdev);
299 pci_disable_device(pdev);
300 pci_set_drvdata(pdev, NULL);
301 if (card->ports[0].dev)
302 free_netdev(card->ports[0].dev);
303 if (card->ports[1].dev)
304 free_netdev(card->ports[1].dev);
305 kfree(card);
306}
307
308
309
310static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
311 const struct pci_device_id *ent)
312{
313 card_t *card;
314 u8 rev_id;
315 u32 __iomem *p;
316 int i;
317 u32 ramsize;
318 u32 ramphys; /* buffer memory base */
319 u32 scaphys; /* SCA memory base */
320 u32 plxphys; /* PLX registers memory base */
321
322#ifndef MODULE
323 static int printed_version;
324 if (!printed_version++)
325 printk(KERN_INFO "%s\n", version);
326#endif
327
328 i = pci_enable_device(pdev);
329 if (i)
330 return i;
331
332 i = pci_request_regions(pdev, "PC300");
333 if (i) {
334 pci_disable_device(pdev);
335 return i;
336 }
337
338 card = kmalloc(sizeof(card_t), GFP_KERNEL);
339 if (card == NULL) {
340 printk(KERN_ERR "pc300: unable to allocate memory\n");
341 pci_release_regions(pdev);
342 pci_disable_device(pdev);
343 return -ENOBUFS;
344 }
345 memset(card, 0, sizeof(card_t));
346 pci_set_drvdata(pdev, card);
347
348 if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
349 pdev->device == PCI_DEVICE_ID_PC300_TE_2)
350 card->type = PC300_TE; /* not fully supported */
351 else if (card->init_ctrl_value & PC300_CTYPE_MASK)
352 card->type = PC300_X21;
353 else
354 card->type = PC300_RSV;
355
356 if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
357 pdev->device == PCI_DEVICE_ID_PC300_TE_1)
358 card->n_ports = 1;
359 else
360 card->n_ports = 2;
361
362 for (i = 0; i < card->n_ports; i++)
363 if (!(card->ports[i].dev = alloc_hdlcdev(&card->ports[i]))) {
364 printk(KERN_ERR "pc300: unable to allocate memory\n");
365 pc300_pci_remove_one(pdev);
366 return -ENOMEM;
367 }
368
369 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
370 if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
371 pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
372 pci_resource_len(pdev, 3) < 16384) {
373 printk(KERN_ERR "pc300: invalid card EEPROM parameters\n");
374 pc300_pci_remove_one(pdev);
375 return -EFAULT;
376 }
377
378 plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
379 card->plxbase = ioremap(plxphys, PC300_PLX_SIZE);
380
381 scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
382 card->scabase = ioremap(scaphys, PC300_SCA_SIZE);
383
384 ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
385 card->rambase = ioremap(ramphys, pci_resource_len(pdev,3));
386
387 if (card->plxbase == NULL ||
388 card->scabase == NULL ||
389 card->rambase == NULL) {
390 printk(KERN_ERR "pc300: ioremap() failed\n");
391 pc300_pci_remove_one(pdev);
392 }
393
394 /* PLX PCI 9050 workaround for local configuration register read bug */
395 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys);
396 card->init_ctrl_value = readl(&((plx9050*)card->scabase)->init_ctrl);
397 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys);
398
399 /* Reset PLX */
400 p = &card->plxbase->init_ctrl;
401 writel(card->init_ctrl_value | 0x40000000, p);
402 readl(p); /* Flush the write - do not use sca_flush */
403 udelay(1);
404
405 writel(card->init_ctrl_value, p);
406 readl(p); /* Flush the write - do not use sca_flush */
407 udelay(1);
408
409 /* Reload Config. Registers from EEPROM */
410 writel(card->init_ctrl_value | 0x20000000, p);
411 readl(p); /* Flush the write - do not use sca_flush */
412 udelay(1);
413
414 writel(card->init_ctrl_value, p);
415 readl(p); /* Flush the write - do not use sca_flush */
416 udelay(1);
417
418 ramsize = sca_detect_ram(card, card->rambase,
419 pci_resource_len(pdev, 3));
420
421 if (use_crystal_clock)
422 card->init_ctrl_value &= ~PC300_CLKSEL_MASK;
423 else
424 card->init_ctrl_value |= PC300_CLKSEL_MASK;
425
426 writel(card->init_ctrl_value, &card->plxbase->init_ctrl);
427 /* number of TX + RX buffers for one port */
428 i = ramsize / (card->n_ports * (sizeof(pkt_desc) + HDLC_MAX_MRU));
429 card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
430 card->rx_ring_buffers = i - card->tx_ring_buffers;
431
432 card->buff_offset = card->n_ports * sizeof(pkt_desc) *
433 (card->tx_ring_buffers + card->rx_ring_buffers);
434
435 printk(KERN_INFO "pc300: PC300/%s, %u KB RAM at 0x%x, IRQ%u, "
436 "using %u TX + %u RX packets rings\n",
437 card->type == PC300_X21 ? "X21" :
438 card->type == PC300_TE ? "TE" : "RSV",
439 ramsize / 1024, ramphys, pdev->irq,
440 card->tx_ring_buffers, card->rx_ring_buffers);
441
442 if (card->tx_ring_buffers < 1) {
443 printk(KERN_ERR "pc300: RAM test failed\n");
444 pc300_pci_remove_one(pdev);
445 return -EFAULT;
446 }
447
448 /* Enable interrupts on the PCI bridge, LINTi1 active low */
449 writew(0x0041, &card->plxbase->intr_ctrl_stat);
450
451 /* Allocate IRQ */
452 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) {
453 printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n",
454 pdev->irq);
455 pc300_pci_remove_one(pdev);
456 return -EBUSY;
457 }
458 card->irq = pdev->irq;
459
460 sca_init(card, 0);
461
462 // COTE not set - allows better TX DMA settings
463 // sca_out(sca_in(PCR, card) | PCR_COTE, PCR, card);
464
465 sca_out(0x10, BTCR, card);
466
467 for (i = 0; i < card->n_ports; i++) {
468 port_t *port = &card->ports[i];
469 struct net_device *dev = port_to_dev(port);
470 hdlc_device *hdlc = dev_to_hdlc(dev);
471 port->phy_node = i;
472
473 spin_lock_init(&port->lock);
474 SET_MODULE_OWNER(dev);
475 dev->irq = card->irq;
476 dev->mem_start = ramphys;
477 dev->mem_end = ramphys + ramsize - 1;
478 dev->tx_queue_len = 50;
479 dev->do_ioctl = pc300_ioctl;
480 dev->open = pc300_open;
481 dev->stop = pc300_close;
482 hdlc->attach = sca_attach;
483 hdlc->xmit = sca_xmit;
484 port->settings.clock_type = CLOCK_EXT;
485 port->card = card;
486 if (card->type == PC300_X21)
487 port->iface = IF_IFACE_X21;
488 else
489 port->iface = IF_IFACE_V35;
490
491 if (register_hdlc_device(dev)) {
492 printk(KERN_ERR "pc300: unable to register hdlc "
493 "device\n");
494 port->card = NULL;
495 pc300_pci_remove_one(pdev);
496 return -ENOBUFS;
497 }
498 sca_init_sync_port(port); /* Set up SCA memory */
499
500 printk(KERN_INFO "%s: PC300 node %d\n",
501 dev->name, port->phy_node);
502 }
503 return 0;
504}
505
506
507
508static struct pci_device_id pc300_pci_tbl[] __devinitdata = {
509 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
510 PCI_ANY_ID, 0, 0, 0 },
511 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
512 PCI_ANY_ID, 0, 0, 0 },
513 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_1, PCI_ANY_ID,
514 PCI_ANY_ID, 0, 0, 0 },
515 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_2, PCI_ANY_ID,
516 PCI_ANY_ID, 0, 0, 0 },
517 { 0, }
518};
519
520
521static struct pci_driver pc300_pci_driver = {
522 name: "PC300",
523 id_table: pc300_pci_tbl,
524 probe: pc300_pci_init_one,
525 remove: pc300_pci_remove_one,
526};
527
528
529static int __init pc300_init_module(void)
530{
531#ifdef MODULE
532 printk(KERN_INFO "%s\n", version);
533#endif
534 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
535 printk(KERN_ERR "pc300: Invalid PCI clock frequency\n");
536 return -EINVAL;
537 }
538 if (use_crystal_clock != 0 && use_crystal_clock != 1) {
539 printk(KERN_ERR "pc300: Invalid 'use_crystal_clock' value\n");
540 return -EINVAL;
541 }
542
543 CLOCK_BASE = use_crystal_clock ? 24576000 : pci_clock_freq;
544
545 return pci_module_init(&pc300_pci_driver);
546}
547
548
549
550static void __exit pc300_cleanup_module(void)
551{
552 pci_unregister_driver(&pc300_pci_driver);
553}
554
555MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
556MODULE_DESCRIPTION("Cyclades PC300 serial port driver");
557MODULE_LICENSE("GPL v2");
558MODULE_DEVICE_TABLE(pci, pc300_pci_tbl);
559module_param(pci_clock_freq, int, 0444);
560MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
561module_param(use_crystal_clock, int, 0444);
562MODULE_PARM_DESC(use_crystal_clock,
563 "Use 24.576 MHz clock instead of PCI clock");
564module_init(pc300_init_module);
565module_exit(pc300_cleanup_module);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 59ddd21c3958..8dbcf83bb5f3 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -331,8 +331,7 @@ static void z8530_rtsdtr(struct z8530_channel *c, int set)
331static void z8530_rx(struct z8530_channel *c) 331static void z8530_rx(struct z8530_channel *c)
332{ 332{
333 u8 ch,stat; 333 u8 ch,stat;
334 spin_lock(c->lock); 334
335
336 while(1) 335 while(1)
337 { 336 {
338 /* FIFO empty ? */ 337 /* FIFO empty ? */
@@ -390,7 +389,6 @@ static void z8530_rx(struct z8530_channel *c)
390 */ 389 */
391 write_zsctrl(c, ERR_RES); 390 write_zsctrl(c, ERR_RES);
392 write_zsctrl(c, RES_H_IUS); 391 write_zsctrl(c, RES_H_IUS);
393 spin_unlock(c->lock);
394} 392}
395 393
396 394
@@ -406,7 +404,6 @@ static void z8530_rx(struct z8530_channel *c)
406 404
407static void z8530_tx(struct z8530_channel *c) 405static void z8530_tx(struct z8530_channel *c)
408{ 406{
409 spin_lock(c->lock);
410 while(c->txcount) { 407 while(c->txcount) {
411 /* FIFO full ? */ 408 /* FIFO full ? */
412 if(!(read_zsreg(c, R0)&4)) 409 if(!(read_zsreg(c, R0)&4))
@@ -434,7 +431,6 @@ static void z8530_tx(struct z8530_channel *c)
434 431
435 z8530_tx_done(c); 432 z8530_tx_done(c);
436 write_zsctrl(c, RES_H_IUS); 433 write_zsctrl(c, RES_H_IUS);
437 spin_unlock(c->lock);
438} 434}
439 435
440/** 436/**
@@ -452,7 +448,6 @@ static void z8530_status(struct z8530_channel *chan)
452{ 448{
453 u8 status, altered; 449 u8 status, altered;
454 450
455 spin_lock(chan->lock);
456 status=read_zsreg(chan, R0); 451 status=read_zsreg(chan, R0);
457 altered=chan->status^status; 452 altered=chan->status^status;
458 453
@@ -487,7 +482,6 @@ static void z8530_status(struct z8530_channel *chan)
487 } 482 }
488 write_zsctrl(chan, RES_EXT_INT); 483 write_zsctrl(chan, RES_EXT_INT);
489 write_zsctrl(chan, RES_H_IUS); 484 write_zsctrl(chan, RES_H_IUS);
490 spin_unlock(chan->lock);
491} 485}
492 486
493struct z8530_irqhandler z8530_sync= 487struct z8530_irqhandler z8530_sync=
@@ -511,7 +505,6 @@ EXPORT_SYMBOL(z8530_sync);
511 505
512static void z8530_dma_rx(struct z8530_channel *chan) 506static void z8530_dma_rx(struct z8530_channel *chan)
513{ 507{
514 spin_lock(chan->lock);
515 if(chan->rxdma_on) 508 if(chan->rxdma_on)
516 { 509 {
517 /* Special condition check only */ 510 /* Special condition check only */
@@ -534,7 +527,6 @@ static void z8530_dma_rx(struct z8530_channel *chan)
534 /* DMA is off right now, drain the slow way */ 527 /* DMA is off right now, drain the slow way */
535 z8530_rx(chan); 528 z8530_rx(chan);
536 } 529 }
537 spin_unlock(chan->lock);
538} 530}
539 531
540/** 532/**
@@ -547,7 +539,6 @@ static void z8530_dma_rx(struct z8530_channel *chan)
547 539
548static void z8530_dma_tx(struct z8530_channel *chan) 540static void z8530_dma_tx(struct z8530_channel *chan)
549{ 541{
550 spin_lock(chan->lock);
551 if(!chan->dma_tx) 542 if(!chan->dma_tx)
552 { 543 {
553 printk(KERN_WARNING "Hey who turned the DMA off?\n"); 544 printk(KERN_WARNING "Hey who turned the DMA off?\n");
@@ -557,7 +548,6 @@ static void z8530_dma_tx(struct z8530_channel *chan)
557 /* This shouldnt occur in DMA mode */ 548 /* This shouldnt occur in DMA mode */
558 printk(KERN_ERR "DMA tx - bogus event!\n"); 549 printk(KERN_ERR "DMA tx - bogus event!\n");
559 z8530_tx(chan); 550 z8530_tx(chan);
560 spin_unlock(chan->lock);
561} 551}
562 552
563/** 553/**
@@ -596,7 +586,6 @@ static void z8530_dma_status(struct z8530_channel *chan)
596 } 586 }
597 } 587 }
598 588
599 spin_lock(chan->lock);
600 if(altered&chan->dcdcheck) 589 if(altered&chan->dcdcheck)
601 { 590 {
602 if(status&chan->dcdcheck) 591 if(status&chan->dcdcheck)
@@ -618,7 +607,6 @@ static void z8530_dma_status(struct z8530_channel *chan)
618 607
619 write_zsctrl(chan, RES_EXT_INT); 608 write_zsctrl(chan, RES_EXT_INT);
620 write_zsctrl(chan, RES_H_IUS); 609 write_zsctrl(chan, RES_H_IUS);
621 spin_unlock(chan->lock);
622} 610}
623 611
624struct z8530_irqhandler z8530_dma_sync= 612struct z8530_irqhandler z8530_dma_sync=
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 8286678513b9..3a064def162e 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -352,6 +352,10 @@
352#define BCM43xx_UCODEFLAG_UNKPACTRL 0x0040 352#define BCM43xx_UCODEFLAG_UNKPACTRL 0x0040
353#define BCM43xx_UCODEFLAG_JAPAN 0x0080 353#define BCM43xx_UCODEFLAG_JAPAN 0x0080
354 354
355/* Hardware Radio Enable masks */
356#define BCM43xx_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16)
357#define BCM43xx_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4)
358
355/* Generic-Interrupt reasons. */ 359/* Generic-Interrupt reasons. */
356#define BCM43xx_IRQ_READY (1 << 0) 360#define BCM43xx_IRQ_READY (1 << 0)
357#define BCM43xx_IRQ_BEACON (1 << 1) 361#define BCM43xx_IRQ_BEACON (1 << 1)
@@ -758,7 +762,8 @@ struct bcm43xx_private {
758 bad_frames_preempt:1, /* Use "Bad Frames Preemption" (default off) */ 762 bad_frames_preempt:1, /* Use "Bad Frames Preemption" (default off) */
759 reg124_set_0x4:1, /* Some variable to keep track of IRQ stuff. */ 763 reg124_set_0x4:1, /* Some variable to keep track of IRQ stuff. */
760 short_preamble:1, /* TRUE, if short preamble is enabled. */ 764 short_preamble:1, /* TRUE, if short preamble is enabled. */
761 firmware_norelease:1; /* Do not release the firmware. Used on suspend. */ 765 firmware_norelease:1, /* Do not release the firmware. Used on suspend. */
766 radio_hw_enable:1; /* TRUE if radio is hardware enabled */
762 767
763 struct bcm43xx_stats stats; 768 struct bcm43xx_stats stats;
764 769
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
index 7d383a27b927..8f198befba39 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
@@ -26,6 +26,7 @@
26*/ 26*/
27 27
28#include "bcm43xx_leds.h" 28#include "bcm43xx_leds.h"
29#include "bcm43xx_radio.h"
29#include "bcm43xx.h" 30#include "bcm43xx.h"
30 31
31#include <asm/bitops.h> 32#include <asm/bitops.h>
@@ -108,6 +109,7 @@ static void bcm43xx_led_init_hardcoded(struct bcm43xx_private *bcm,
108 switch (led_index) { 109 switch (led_index) {
109 case 0: 110 case 0:
110 led->behaviour = BCM43xx_LED_ACTIVITY; 111 led->behaviour = BCM43xx_LED_ACTIVITY;
112 led->activelow = 1;
111 if (bcm->board_vendor == PCI_VENDOR_ID_COMPAQ) 113 if (bcm->board_vendor == PCI_VENDOR_ID_COMPAQ)
112 led->behaviour = BCM43xx_LED_RADIO_ALL; 114 led->behaviour = BCM43xx_LED_RADIO_ALL;
113 break; 115 break;
@@ -199,20 +201,21 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
199 turn_on = activity; 201 turn_on = activity;
200 break; 202 break;
201 case BCM43xx_LED_RADIO_ALL: 203 case BCM43xx_LED_RADIO_ALL:
202 turn_on = radio->enabled; 204 turn_on = radio->enabled && bcm43xx_is_hw_radio_enabled(bcm);
203 break; 205 break;
204 case BCM43xx_LED_RADIO_A: 206 case BCM43xx_LED_RADIO_A:
205 case BCM43xx_LED_BCM4303_2: 207 case BCM43xx_LED_BCM4303_2:
206 turn_on = (radio->enabled && phy->type == BCM43xx_PHYTYPE_A); 208 turn_on = (radio->enabled && bcm43xx_is_hw_radio_enabled(bcm) &&
209 phy->type == BCM43xx_PHYTYPE_A);
207 break; 210 break;
208 case BCM43xx_LED_RADIO_B: 211 case BCM43xx_LED_RADIO_B:
209 case BCM43xx_LED_BCM4303_1: 212 case BCM43xx_LED_BCM4303_1:
210 turn_on = (radio->enabled && 213 turn_on = (radio->enabled && bcm43xx_is_hw_radio_enabled(bcm) &&
211 (phy->type == BCM43xx_PHYTYPE_B || 214 (phy->type == BCM43xx_PHYTYPE_B ||
212 phy->type == BCM43xx_PHYTYPE_G)); 215 phy->type == BCM43xx_PHYTYPE_G));
213 break; 216 break;
214 case BCM43xx_LED_MODE_BG: 217 case BCM43xx_LED_MODE_BG:
215 if (phy->type == BCM43xx_PHYTYPE_G && 218 if (phy->type == BCM43xx_PHYTYPE_G && bcm43xx_is_hw_radio_enabled(bcm) &&
216 1/*FIXME: using G rates.*/) 219 1/*FIXME: using G rates.*/)
217 turn_on = 1; 220 turn_on = 1;
218 break; 221 break;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 91b752e3d07e..23aaf1ed8541 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -2441,6 +2441,9 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
2441 if (err) 2441 if (err)
2442 goto err_gpio_cleanup; 2442 goto err_gpio_cleanup;
2443 bcm43xx_radio_turn_on(bcm); 2443 bcm43xx_radio_turn_on(bcm);
2444 bcm->radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm);
2445 dprintk(KERN_INFO PFX "Radio %s by hardware\n",
2446 (bcm->radio_hw_enable == 0) ? "disabled" : "enabled");
2444 2447
2445 bcm43xx_write16(bcm, 0x03E6, 0x0000); 2448 bcm43xx_write16(bcm, 0x03E6, 0x0000);
2446 err = bcm43xx_phy_init(bcm); 2449 err = bcm43xx_phy_init(bcm);
@@ -3175,9 +3178,24 @@ static void bcm43xx_periodic_every30sec(struct bcm43xx_private *bcm)
3175 3178
3176static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm) 3179static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm)
3177{ 3180{
3181 bcm43xx_phy_xmitpower(bcm); //FIXME: unless scanning?
3182 //TODO for APHY (temperature?)
3183}
3184
3185static void bcm43xx_periodic_every1sec(struct bcm43xx_private *bcm)
3186{
3178 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 3187 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
3179 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm); 3188 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
3189 int radio_hw_enable;
3180 3190
3191 /* check if radio hardware enabled status changed */
3192 radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm);
3193 if (unlikely(bcm->radio_hw_enable != radio_hw_enable)) {
3194 bcm->radio_hw_enable = radio_hw_enable;
3195 dprintk(KERN_INFO PFX "Radio hardware status changed to %s\n",
3196 (radio_hw_enable == 0) ? "disabled" : "enabled");
3197 bcm43xx_leds_update(bcm, 0);
3198 }
3181 if (phy->type == BCM43xx_PHYTYPE_G) { 3199 if (phy->type == BCM43xx_PHYTYPE_G) {
3182 //TODO: update_aci_moving_average 3200 //TODO: update_aci_moving_average
3183 if (radio->aci_enable && radio->aci_wlan_automatic) { 3201 if (radio->aci_enable && radio->aci_wlan_automatic) {
@@ -3201,21 +3219,21 @@ static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm)
3201 //TODO: implement rev1 workaround 3219 //TODO: implement rev1 workaround
3202 } 3220 }
3203 } 3221 }
3204 bcm43xx_phy_xmitpower(bcm); //FIXME: unless scanning?
3205 //TODO for APHY (temperature?)
3206} 3222}
3207 3223
3208static void do_periodic_work(struct bcm43xx_private *bcm) 3224static void do_periodic_work(struct bcm43xx_private *bcm)
3209{ 3225{
3210 if (bcm->periodic_state % 8 == 0) 3226 if (bcm->periodic_state % 120 == 0)
3211 bcm43xx_periodic_every120sec(bcm); 3227 bcm43xx_periodic_every120sec(bcm);
3212 if (bcm->periodic_state % 4 == 0) 3228 if (bcm->periodic_state % 60 == 0)
3213 bcm43xx_periodic_every60sec(bcm); 3229 bcm43xx_periodic_every60sec(bcm);
3214 if (bcm->periodic_state % 2 == 0) 3230 if (bcm->periodic_state % 30 == 0)
3215 bcm43xx_periodic_every30sec(bcm); 3231 bcm43xx_periodic_every30sec(bcm);
3216 bcm43xx_periodic_every15sec(bcm); 3232 if (bcm->periodic_state % 15 == 0)
3233 bcm43xx_periodic_every15sec(bcm);
3234 bcm43xx_periodic_every1sec(bcm);
3217 3235
3218 schedule_delayed_work(&bcm->periodic_work, HZ * 15); 3236 schedule_delayed_work(&bcm->periodic_work, HZ);
3219} 3237}
3220 3238
3221static void bcm43xx_periodic_work_handler(struct work_struct *work) 3239static void bcm43xx_periodic_work_handler(struct work_struct *work)
@@ -3228,7 +3246,7 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work)
3228 unsigned long orig_trans_start = 0; 3246 unsigned long orig_trans_start = 0;
3229 3247
3230 mutex_lock(&bcm->mutex); 3248 mutex_lock(&bcm->mutex);
3231 if (unlikely(bcm->periodic_state % 4 == 0)) { 3249 if (unlikely(bcm->periodic_state % 60 == 0)) {
3232 /* Periodic work will take a long time, so we want it to 3250 /* Periodic work will take a long time, so we want it to
3233 * be preemtible. 3251 * be preemtible.
3234 */ 3252 */
@@ -3260,7 +3278,7 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work)
3260 3278
3261 do_periodic_work(bcm); 3279 do_periodic_work(bcm);
3262 3280
3263 if (unlikely(bcm->periodic_state % 4 == 0)) { 3281 if (unlikely(bcm->periodic_state % 60 == 0)) {
3264 spin_lock_irqsave(&bcm->irq_lock, flags); 3282 spin_lock_irqsave(&bcm->irq_lock, flags);
3265 tasklet_enable(&bcm->isr_tasklet); 3283 tasklet_enable(&bcm->isr_tasklet);
3266 bcm43xx_interrupt_enable(bcm, savedirqs); 3284 bcm43xx_interrupt_enable(bcm, savedirqs);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
index bb9c484d7e19..af19a07032a3 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
@@ -1981,6 +1981,7 @@ void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm)
1981 } 1981 }
1982 radio->enabled = 1; 1982 radio->enabled = 1;
1983 dprintk(KERN_INFO PFX "Radio turned on\n"); 1983 dprintk(KERN_INFO PFX "Radio turned on\n");
1984 bcm43xx_leds_update(bcm, 0);
1984} 1985}
1985 1986
1986void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm) 1987void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm)
@@ -2001,6 +2002,7 @@ void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm)
2001 bcm43xx_phy_write(bcm, 0x0015, 0xAA00); 2002 bcm43xx_phy_write(bcm, 0x0015, 0xAA00);
2002 radio->enabled = 0; 2003 radio->enabled = 0;
2003 dprintk(KERN_INFO PFX "Radio turned off\n"); 2004 dprintk(KERN_INFO PFX "Radio turned off\n");
2005 bcm43xx_leds_update(bcm, 0);
2004} 2006}
2005 2007
2006void bcm43xx_radio_clear_tssi(struct bcm43xx_private *bcm) 2008void bcm43xx_radio_clear_tssi(struct bcm43xx_private *bcm)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.h b/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
index 9ed18039fa3e..77a98a53a2e2 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
@@ -65,6 +65,22 @@ void bcm43xx_radio_init2060(struct bcm43xx_private *bcm);
65void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm); 65void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm);
66void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm); 66void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm);
67 67
68static inline
69int bcm43xx_is_hw_radio_enabled(struct bcm43xx_private *bcm)
70{
71 /* function to return state of hardware enable of radio
72 * returns 0 if radio disabled, 1 if radio enabled
73 */
74 if (bcm->current_core->rev >= 3)
75 return ((bcm43xx_read32(bcm, BCM43xx_MMIO_RADIO_HWENABLED_HI)
76 & BCM43xx_MMIO_RADIO_HWENABLED_HI_MASK)
77 == 0) ? 1 : 0;
78 else
79 return ((bcm43xx_read16(bcm, BCM43xx_MMIO_RADIO_HWENABLED_LO)
80 & BCM43xx_MMIO_RADIO_HWENABLED_LO_MASK)
81 == 0) ? 0 : 1;
82}
83
68int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm, u8 channel, 84int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm, u8 channel,
69 int synthetic_pu_workaround); 85 int synthetic_pu_workaround);
70 86
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 22cb3fb7502e..c878a2f3239c 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -9166,7 +9166,7 @@ static int ipw_wx_set_rts(struct net_device *dev,
9166{ 9166{
9167 struct ipw_priv *priv = ieee80211_priv(dev); 9167 struct ipw_priv *priv = ieee80211_priv(dev);
9168 mutex_lock(&priv->mutex); 9168 mutex_lock(&priv->mutex);
9169 if (wrqu->rts.disabled) 9169 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9170 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 9170 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9171 else { 9171 else {
9172 if (wrqu->rts.value < MIN_RTS_THRESHOLD || 9172 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
@@ -9255,7 +9255,7 @@ static int ipw_wx_set_frag(struct net_device *dev,
9255{ 9255{
9256 struct ipw_priv *priv = ieee80211_priv(dev); 9256 struct ipw_priv *priv = ieee80211_priv(dev);
9257 mutex_lock(&priv->mutex); 9257 mutex_lock(&priv->mutex);
9258 if (wrqu->frag.disabled) 9258 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9259 priv->ieee->fts = DEFAULT_FTS; 9259 priv->ieee->fts = DEFAULT_FTS;
9260 else { 9260 else {
9261 if (wrqu->frag.value < MIN_FRAG_THRESHOLD || 9261 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 936c888e03e1..2a65bb93de26 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2059,7 +2059,7 @@ static int determine_firmware(struct net_device *dev)
2059 int err; 2059 int err;
2060 struct comp_id nic_id, sta_id; 2060 struct comp_id nic_id, sta_id;
2061 unsigned int firmver; 2061 unsigned int firmver;
2062 char tmp[SYMBOL_MAX_VER_LEN+1]; 2062 char tmp[SYMBOL_MAX_VER_LEN+1] __attribute__((aligned(2)));
2063 2063
2064 /* Get the hardware version */ 2064 /* Get the hardware version */
2065 err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id); 2065 err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index f057fd9fcd79..a037b11dac9d 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22 22
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/ethtool.h>
24#include <linux/pci.h> 25#include <linux/pci.h>
25#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
26#include <linux/delay.h> 27#include <linux/delay.h>
@@ -787,6 +788,17 @@ islpci_set_multicast_list(struct net_device *dev)
787} 788}
788#endif 789#endif
789 790
791static void islpci_ethtool_get_drvinfo(struct net_device *dev,
792 struct ethtool_drvinfo *info)
793{
794 strcpy(info->driver, DRV_NAME);
795 strcpy(info->version, DRV_VERSION);
796}
797
798static struct ethtool_ops islpci_ethtool_ops = {
799 .get_drvinfo = islpci_ethtool_get_drvinfo,
800};
801
790struct net_device * 802struct net_device *
791islpci_setup(struct pci_dev *pdev) 803islpci_setup(struct pci_dev *pdev)
792{ 804{
@@ -813,6 +825,7 @@ islpci_setup(struct pci_dev *pdev)
813 ndev->do_ioctl = &prism54_ioctl; 825 ndev->do_ioctl = &prism54_ioctl;
814 ndev->wireless_handlers = 826 ndev->wireless_handlers =
815 (struct iw_handler_def *) &prism54_handler_def; 827 (struct iw_handler_def *) &prism54_handler_def;
828 ndev->ethtool_ops = &islpci_ethtool_ops;
816 829
817 ndev->hard_start_xmit = &islpci_eth_transmit; 830 ndev->hard_start_xmit = &islpci_eth_transmit;
818 /* ndev->set_multicast_list = &islpci_set_multicast_list; */ 831 /* ndev->set_multicast_list = &islpci_set_multicast_list; */
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index a9aa1662eaa4..736666da6c24 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -211,4 +211,8 @@ islpci_trigger(islpci_private *priv)
211 211
212int islpci_free_memory(islpci_private *); 212int islpci_free_memory(islpci_private *);
213struct net_device *islpci_setup(struct pci_dev *); 213struct net_device *islpci_setup(struct pci_dev *);
214
215#define DRV_NAME "prism54"
216#define DRV_VERSION "1.2"
217
214#endif /* _ISLPCI_DEV_H */ 218#endif /* _ISLPCI_DEV_H */
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 58257b40c043..3dcb13bb7d57 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -28,9 +28,6 @@
28#include "islpci_mgt.h" /* for pc_debug */ 28#include "islpci_mgt.h" /* for pc_debug */
29#include "isl_oid.h" 29#include "isl_oid.h"
30 30
31#define DRV_NAME "prism54"
32#define DRV_VERSION "1.2"
33
34MODULE_AUTHOR("[Intersil] R.Bastings and W.Termorshuizen, The prism54.org Development Team <prism54-devel@prism54.org>"); 31MODULE_AUTHOR("[Intersil] R.Bastings and W.Termorshuizen, The prism54.org Development Team <prism54-devel@prism54.org>");
35MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter"); 32MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter");
36MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 78ea72fb8f0c..12dfc0b6efe6 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -84,6 +84,18 @@ static void print_id(struct zd_chip *chip)
84 dev_info(zd_chip_dev(chip), "%s\n", buffer); 84 dev_info(zd_chip_dev(chip), "%s\n", buffer);
85} 85}
86 86
87static zd_addr_t inc_addr(zd_addr_t addr)
88{
89 u16 a = (u16)addr;
90 /* Control registers use byte addressing, but everything else uses word
91 * addressing. */
92 if ((a & 0xf000) == CR_START)
93 a += 2;
94 else
95 a += 1;
96 return (zd_addr_t)a;
97}
98
87/* Read a variable number of 32-bit values. Parameter count is not allowed to 99/* Read a variable number of 32-bit values. Parameter count is not allowed to
88 * exceed USB_MAX_IOREAD32_COUNT. 100 * exceed USB_MAX_IOREAD32_COUNT.
89 */ 101 */
@@ -114,7 +126,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
114 for (i = 0; i < count; i++) { 126 for (i = 0; i < count; i++) {
115 int j = 2*i; 127 int j = 2*i;
116 /* We read the high word always first. */ 128 /* We read the high word always first. */
117 a16[j] = zd_inc_word(addr[i]); 129 a16[j] = inc_addr(addr[i]);
118 a16[j+1] = addr[i]; 130 a16[j+1] = addr[i];
119 } 131 }
120 132
@@ -163,7 +175,7 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
163 j = 2*i; 175 j = 2*i;
164 /* We write the high word always first. */ 176 /* We write the high word always first. */
165 ioreqs16[j].value = ioreqs[i].value >> 16; 177 ioreqs16[j].value = ioreqs[i].value >> 16;
166 ioreqs16[j].addr = zd_inc_word(ioreqs[i].addr); 178 ioreqs16[j].addr = inc_addr(ioreqs[i].addr);
167 ioreqs16[j+1].value = ioreqs[i].value; 179 ioreqs16[j+1].value = ioreqs[i].value;
168 ioreqs16[j+1].addr = ioreqs[i].addr; 180 ioreqs16[j+1].addr = ioreqs[i].addr;
169 } 181 }
@@ -466,7 +478,8 @@ static int read_values(struct zd_chip *chip, u8 *values, size_t count,
466 478
467 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 479 ZD_ASSERT(mutex_is_locked(&chip->mutex));
468 for (i = 0;;) { 480 for (i = 0;;) {
469 r = zd_ioread32_locked(chip, &v, e2p_addr+i/2); 481 r = zd_ioread32_locked(chip, &v,
482 (zd_addr_t)((u16)e2p_addr+i/2));
470 if (r) 483 if (r)
471 return r; 484 return r;
472 v -= guard; 485 v -= guard;
@@ -798,47 +811,18 @@ static int hw_reset_phy(struct zd_chip *chip)
798static int zd1211_hw_init_hmac(struct zd_chip *chip) 811static int zd1211_hw_init_hmac(struct zd_chip *chip)
799{ 812{
800 static const struct zd_ioreq32 ioreqs[] = { 813 static const struct zd_ioreq32 ioreqs[] = {
801 { CR_ACK_TIMEOUT_EXT, 0x20 },
802 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
803 { CR_ZD1211_RETRY_MAX, 0x2 }, 814 { CR_ZD1211_RETRY_MAX, 0x2 },
804 { CR_SNIFFER_ON, 0 },
805 { CR_RX_FILTER, STA_RX_FILTER },
806 { CR_GROUP_HASH_P1, 0x00 },
807 { CR_GROUP_HASH_P2, 0x80000000 },
808 { CR_REG1, 0xa4 },
809 { CR_ADDA_PWR_DWN, 0x7f },
810 { CR_BCN_PLCP_CFG, 0x00f00401 },
811 { CR_PHY_DELAY, 0x00 },
812 { CR_ACK_TIMEOUT_EXT, 0x80 },
813 { CR_ADDA_PWR_DWN, 0x00 },
814 { CR_ACK_TIME_80211, 0x100 },
815 { CR_RX_PE_DELAY, 0x70 },
816 { CR_PS_CTRL, 0x10000000 },
817 { CR_RTS_CTS_RATE, 0x02030203 },
818 { CR_RX_THRESHOLD, 0x000c0640 }, 815 { CR_RX_THRESHOLD, 0x000c0640 },
819 { CR_AFTER_PNP, 0x1 },
820 { CR_WEP_PROTECT, 0x114 },
821 }; 816 };
822 817
823 int r;
824
825 dev_dbg_f(zd_chip_dev(chip), "\n"); 818 dev_dbg_f(zd_chip_dev(chip), "\n");
826 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 819 ZD_ASSERT(mutex_is_locked(&chip->mutex));
827 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 820 return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
828#ifdef DEBUG
829 if (r) {
830 dev_err(zd_chip_dev(chip),
831 "error in zd_iowrite32a_locked. Error number %d\n", r);
832 }
833#endif /* DEBUG */
834 return r;
835} 821}
836 822
837static int zd1211b_hw_init_hmac(struct zd_chip *chip) 823static int zd1211b_hw_init_hmac(struct zd_chip *chip)
838{ 824{
839 static const struct zd_ioreq32 ioreqs[] = { 825 static const struct zd_ioreq32 ioreqs[] = {
840 { CR_ACK_TIMEOUT_EXT, 0x20 },
841 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
842 { CR_ZD1211B_RETRY_MAX, 0x02020202 }, 826 { CR_ZD1211B_RETRY_MAX, 0x02020202 },
843 { CR_ZD1211B_TX_PWR_CTL4, 0x007f003f }, 827 { CR_ZD1211B_TX_PWR_CTL4, 0x007f003f },
844 { CR_ZD1211B_TX_PWR_CTL3, 0x007f003f }, 828 { CR_ZD1211B_TX_PWR_CTL3, 0x007f003f },
@@ -847,6 +831,20 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
847 { CR_ZD1211B_AIFS_CTL1, 0x00280028 }, 831 { CR_ZD1211B_AIFS_CTL1, 0x00280028 },
848 { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, 832 { CR_ZD1211B_AIFS_CTL2, 0x008C003C },
849 { CR_ZD1211B_TXOP, 0x01800824 }, 833 { CR_ZD1211B_TXOP, 0x01800824 },
834 { CR_RX_THRESHOLD, 0x000c0eff, },
835 };
836
837 dev_dbg_f(zd_chip_dev(chip), "\n");
838 ZD_ASSERT(mutex_is_locked(&chip->mutex));
839 return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
840}
841
842static int hw_init_hmac(struct zd_chip *chip)
843{
844 int r;
845 static const struct zd_ioreq32 ioreqs[] = {
846 { CR_ACK_TIMEOUT_EXT, 0x20 },
847 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
850 { CR_SNIFFER_ON, 0 }, 848 { CR_SNIFFER_ON, 0 },
851 { CR_RX_FILTER, STA_RX_FILTER }, 849 { CR_RX_FILTER, STA_RX_FILTER },
852 { CR_GROUP_HASH_P1, 0x00 }, 850 { CR_GROUP_HASH_P1, 0x00 },
@@ -861,25 +859,16 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
861 { CR_RX_PE_DELAY, 0x70 }, 859 { CR_RX_PE_DELAY, 0x70 },
862 { CR_PS_CTRL, 0x10000000 }, 860 { CR_PS_CTRL, 0x10000000 },
863 { CR_RTS_CTS_RATE, 0x02030203 }, 861 { CR_RTS_CTS_RATE, 0x02030203 },
864 { CR_RX_THRESHOLD, 0x000c0eff, },
865 { CR_AFTER_PNP, 0x1 }, 862 { CR_AFTER_PNP, 0x1 },
866 { CR_WEP_PROTECT, 0x114 }, 863 { CR_WEP_PROTECT, 0x114 },
864 { CR_IFS_VALUE, IFS_VALUE_DEFAULT },
867 }; 865 };
868 866
869 int r;
870
871 dev_dbg_f(zd_chip_dev(chip), "\n");
872 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 867 ZD_ASSERT(mutex_is_locked(&chip->mutex));
873 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 868 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
874 if (r) { 869 if (r)
875 dev_dbg_f(zd_chip_dev(chip), 870 return r;
876 "error in zd_iowrite32a_locked. Error number %d\n", r);
877 }
878 return r;
879}
880 871
881static int hw_init_hmac(struct zd_chip *chip)
882{
883 return chip->is_zd1211b ? 872 return chip->is_zd1211b ?
884 zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip); 873 zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip);
885} 874}
@@ -974,16 +963,14 @@ static int hw_init(struct zd_chip *chip)
974 if (r) 963 if (r)
975 return r; 964 return r;
976 965
977 /* Although the vendor driver defaults to a different value during
978 * init, it overwrites the IFS value with the following every time
979 * the channel changes. We should aim to be more intelligent... */
980 r = zd_iowrite32_locked(chip, IFS_VALUE_DEFAULT, CR_IFS_VALUE);
981 if (r)
982 return r;
983
984 return set_beacon_interval(chip, 100); 966 return set_beacon_interval(chip, 100);
985} 967}
986 968
969static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
970{
971 return (zd_addr_t)((u16)chip->fw_regs_base + offset);
972}
973
987#ifdef DEBUG 974#ifdef DEBUG
988static int dump_cr(struct zd_chip *chip, const zd_addr_t addr, 975static int dump_cr(struct zd_chip *chip, const zd_addr_t addr,
989 const char *addr_string) 976 const char *addr_string)
@@ -1018,9 +1005,11 @@ static int test_init(struct zd_chip *chip)
1018 1005
1019static void dump_fw_registers(struct zd_chip *chip) 1006static void dump_fw_registers(struct zd_chip *chip)
1020{ 1007{
1021 static const zd_addr_t addr[4] = { 1008 const zd_addr_t addr[4] = {
1022 FW_FIRMWARE_VER, FW_USB_SPEED, FW_FIX_TX_RATE, 1009 fw_reg_addr(chip, FW_REG_FIRMWARE_VER),
1023 FW_LINK_STATUS 1010 fw_reg_addr(chip, FW_REG_USB_SPEED),
1011 fw_reg_addr(chip, FW_REG_FIX_TX_RATE),
1012 fw_reg_addr(chip, FW_REG_LED_LINK_STATUS),
1024 }; 1013 };
1025 1014
1026 int r; 1015 int r;
@@ -1046,7 +1035,8 @@ static int print_fw_version(struct zd_chip *chip)
1046 int r; 1035 int r;
1047 u16 version; 1036 u16 version;
1048 1037
1049 r = zd_ioread16_locked(chip, &version, FW_FIRMWARE_VER); 1038 r = zd_ioread16_locked(chip, &version,
1039 fw_reg_addr(chip, FW_REG_FIRMWARE_VER));
1050 if (r) 1040 if (r)
1051 return r; 1041 return r;
1052 1042
@@ -1126,6 +1116,22 @@ int zd_chip_disable_hwint(struct zd_chip *chip)
1126 return r; 1116 return r;
1127} 1117}
1128 1118
1119static int read_fw_regs_offset(struct zd_chip *chip)
1120{
1121 int r;
1122
1123 ZD_ASSERT(mutex_is_locked(&chip->mutex));
1124 r = zd_ioread16_locked(chip, (u16*)&chip->fw_regs_base,
1125 FWRAW_REGS_ADDR);
1126 if (r)
1127 return r;
1128 dev_dbg_f(zd_chip_dev(chip), "fw_regs_base: %#06hx\n",
1129 (u16)chip->fw_regs_base);
1130
1131 return 0;
1132}
1133
1134
1129int zd_chip_init_hw(struct zd_chip *chip, u8 device_type) 1135int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
1130{ 1136{
1131 int r; 1137 int r;
@@ -1145,7 +1151,7 @@ int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
1145 if (r) 1151 if (r)
1146 goto out; 1152 goto out;
1147 1153
1148 r = zd_usb_init_hw(&chip->usb); 1154 r = read_fw_regs_offset(chip);
1149 if (r) 1155 if (r)
1150 goto out; 1156 goto out;
1151 1157
@@ -1325,15 +1331,15 @@ u8 zd_chip_get_channel(struct zd_chip *chip)
1325 1331
1326int zd_chip_control_leds(struct zd_chip *chip, enum led_status status) 1332int zd_chip_control_leds(struct zd_chip *chip, enum led_status status)
1327{ 1333{
1328 static const zd_addr_t a[] = { 1334 const zd_addr_t a[] = {
1329 FW_LINK_STATUS, 1335 fw_reg_addr(chip, FW_REG_LED_LINK_STATUS),
1330 CR_LED, 1336 CR_LED,
1331 }; 1337 };
1332 1338
1333 int r; 1339 int r;
1334 u16 v[ARRAY_SIZE(a)]; 1340 u16 v[ARRAY_SIZE(a)];
1335 struct zd_ioreq16 ioreqs[ARRAY_SIZE(a)] = { 1341 struct zd_ioreq16 ioreqs[ARRAY_SIZE(a)] = {
1336 [0] = { FW_LINK_STATUS }, 1342 [0] = { fw_reg_addr(chip, FW_REG_LED_LINK_STATUS) },
1337 [1] = { CR_LED }, 1343 [1] = { CR_LED },
1338 }; 1344 };
1339 u16 other_led; 1345 u16 other_led;
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index a4e3cee9b59d..b07569e391ee 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -18,7 +18,6 @@
18#ifndef _ZD_CHIP_H 18#ifndef _ZD_CHIP_H
19#define _ZD_CHIP_H 19#define _ZD_CHIP_H
20 20
21#include "zd_types.h"
22#include "zd_rf.h" 21#include "zd_rf.h"
23#include "zd_usb.h" 22#include "zd_usb.h"
24 23
@@ -27,6 +26,37 @@
27 * adds a processor for handling the USB protocol. 26 * adds a processor for handling the USB protocol.
28 */ 27 */
29 28
29/* Address space */
30enum {
31 /* CONTROL REGISTERS */
32 CR_START = 0x9000,
33
34
35 /* FIRMWARE */
36 FW_START = 0xee00,
37
38
39 /* EEPROM */
40 E2P_START = 0xf800,
41 E2P_LEN = 0x800,
42
43 /* EEPROM layout */
44 E2P_LOAD_CODE_LEN = 0xe, /* base 0xf800 */
45 E2P_LOAD_VECT_LEN = 0x9, /* base 0xf80e */
46 /* E2P_DATA indexes into this */
47 E2P_DATA_LEN = 0x7e, /* base 0xf817 */
48 E2P_BOOT_CODE_LEN = 0x760, /* base 0xf895 */
49 E2P_INTR_VECT_LEN = 0xb, /* base 0xfff5 */
50
51 /* Some precomputed offsets into the EEPROM */
52 E2P_DATA_OFFSET = E2P_LOAD_CODE_LEN + E2P_LOAD_VECT_LEN,
53 E2P_BOOT_CODE_OFFSET = E2P_DATA_OFFSET + E2P_DATA_LEN,
54};
55
56#define CTL_REG(offset) ((zd_addr_t)(CR_START + (offset)))
57#define E2P_DATA(offset) ((zd_addr_t)(E2P_START + E2P_DATA_OFFSET + (offset)))
58#define FWRAW_DATA(offset) ((zd_addr_t)(FW_START + (offset)))
59
30/* 8-bit hardware registers */ 60/* 8-bit hardware registers */
31#define CR0 CTL_REG(0x0000) 61#define CR0 CTL_REG(0x0000)
32#define CR1 CTL_REG(0x0004) 62#define CR1 CTL_REG(0x0004)
@@ -302,7 +332,7 @@
302 332
303#define CR_MAX_PHY_REG 255 333#define CR_MAX_PHY_REG 255
304 334
305/* Taken from the ZYDAS driver, not all of them are relevant for the ZSD1211 335/* Taken from the ZYDAS driver, not all of them are relevant for the ZD1211
306 * driver. 336 * driver.
307 */ 337 */
308 338
@@ -594,81 +624,71 @@
594/* 624/*
595 * Upper 16 bit contains the regulatory domain. 625 * Upper 16 bit contains the regulatory domain.
596 */ 626 */
597#define E2P_SUBID E2P_REG(0x00) 627#define E2P_SUBID E2P_DATA(0x00)
598#define E2P_POD E2P_REG(0x02) 628#define E2P_POD E2P_DATA(0x02)
599#define E2P_MAC_ADDR_P1 E2P_REG(0x04) 629#define E2P_MAC_ADDR_P1 E2P_DATA(0x04)
600#define E2P_MAC_ADDR_P2 E2P_REG(0x06) 630#define E2P_MAC_ADDR_P2 E2P_DATA(0x06)
601#define E2P_PWR_CAL_VALUE1 E2P_REG(0x08) 631#define E2P_PWR_CAL_VALUE1 E2P_DATA(0x08)
602#define E2P_PWR_CAL_VALUE2 E2P_REG(0x0a) 632#define E2P_PWR_CAL_VALUE2 E2P_DATA(0x0a)
603#define E2P_PWR_CAL_VALUE3 E2P_REG(0x0c) 633#define E2P_PWR_CAL_VALUE3 E2P_DATA(0x0c)
604#define E2P_PWR_CAL_VALUE4 E2P_REG(0x0e) 634#define E2P_PWR_CAL_VALUE4 E2P_DATA(0x0e)
605#define E2P_PWR_INT_VALUE1 E2P_REG(0x10) 635#define E2P_PWR_INT_VALUE1 E2P_DATA(0x10)
606#define E2P_PWR_INT_VALUE2 E2P_REG(0x12) 636#define E2P_PWR_INT_VALUE2 E2P_DATA(0x12)
607#define E2P_PWR_INT_VALUE3 E2P_REG(0x14) 637#define E2P_PWR_INT_VALUE3 E2P_DATA(0x14)
608#define E2P_PWR_INT_VALUE4 E2P_REG(0x16) 638#define E2P_PWR_INT_VALUE4 E2P_DATA(0x16)
609 639
610/* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30) 640/* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30)
611 * also only 11 channels. */ 641 * also only 11 channels. */
612#define E2P_ALLOWED_CHANNEL E2P_REG(0x18) 642#define E2P_ALLOWED_CHANNEL E2P_DATA(0x18)
613 643
614#define E2P_PHY_REG E2P_REG(0x1a) 644#define E2P_PHY_REG E2P_DATA(0x1a)
615#define E2P_DEVICE_VER E2P_REG(0x20) 645#define E2P_DEVICE_VER E2P_DATA(0x20)
616#define E2P_36M_CAL_VALUE1 E2P_REG(0x28) 646#define E2P_36M_CAL_VALUE1 E2P_DATA(0x28)
617#define E2P_36M_CAL_VALUE2 E2P_REG(0x2a) 647#define E2P_36M_CAL_VALUE2 E2P_DATA(0x2a)
618#define E2P_36M_CAL_VALUE3 E2P_REG(0x2c) 648#define E2P_36M_CAL_VALUE3 E2P_DATA(0x2c)
619#define E2P_36M_CAL_VALUE4 E2P_REG(0x2e) 649#define E2P_36M_CAL_VALUE4 E2P_DATA(0x2e)
620#define E2P_11A_INT_VALUE1 E2P_REG(0x30) 650#define E2P_11A_INT_VALUE1 E2P_DATA(0x30)
621#define E2P_11A_INT_VALUE2 E2P_REG(0x32) 651#define E2P_11A_INT_VALUE2 E2P_DATA(0x32)
622#define E2P_11A_INT_VALUE3 E2P_REG(0x34) 652#define E2P_11A_INT_VALUE3 E2P_DATA(0x34)
623#define E2P_11A_INT_VALUE4 E2P_REG(0x36) 653#define E2P_11A_INT_VALUE4 E2P_DATA(0x36)
624#define E2P_48M_CAL_VALUE1 E2P_REG(0x38) 654#define E2P_48M_CAL_VALUE1 E2P_DATA(0x38)
625#define E2P_48M_CAL_VALUE2 E2P_REG(0x3a) 655#define E2P_48M_CAL_VALUE2 E2P_DATA(0x3a)
626#define E2P_48M_CAL_VALUE3 E2P_REG(0x3c) 656#define E2P_48M_CAL_VALUE3 E2P_DATA(0x3c)
627#define E2P_48M_CAL_VALUE4 E2P_REG(0x3e) 657#define E2P_48M_CAL_VALUE4 E2P_DATA(0x3e)
628#define E2P_48M_INT_VALUE1 E2P_REG(0x40) 658#define E2P_48M_INT_VALUE1 E2P_DATA(0x40)
629#define E2P_48M_INT_VALUE2 E2P_REG(0x42) 659#define E2P_48M_INT_VALUE2 E2P_DATA(0x42)
630#define E2P_48M_INT_VALUE3 E2P_REG(0x44) 660#define E2P_48M_INT_VALUE3 E2P_DATA(0x44)
631#define E2P_48M_INT_VALUE4 E2P_REG(0x46) 661#define E2P_48M_INT_VALUE4 E2P_DATA(0x46)
632#define E2P_54M_CAL_VALUE1 E2P_REG(0x48) /* ??? */ 662#define E2P_54M_CAL_VALUE1 E2P_DATA(0x48) /* ??? */
633#define E2P_54M_CAL_VALUE2 E2P_REG(0x4a) 663#define E2P_54M_CAL_VALUE2 E2P_DATA(0x4a)
634#define E2P_54M_CAL_VALUE3 E2P_REG(0x4c) 664#define E2P_54M_CAL_VALUE3 E2P_DATA(0x4c)
635#define E2P_54M_CAL_VALUE4 E2P_REG(0x4e) 665#define E2P_54M_CAL_VALUE4 E2P_DATA(0x4e)
636#define E2P_54M_INT_VALUE1 E2P_REG(0x50) 666#define E2P_54M_INT_VALUE1 E2P_DATA(0x50)
637#define E2P_54M_INT_VALUE2 E2P_REG(0x52) 667#define E2P_54M_INT_VALUE2 E2P_DATA(0x52)
638#define E2P_54M_INT_VALUE3 E2P_REG(0x54) 668#define E2P_54M_INT_VALUE3 E2P_DATA(0x54)
639#define E2P_54M_INT_VALUE4 E2P_REG(0x56) 669#define E2P_54M_INT_VALUE4 E2P_DATA(0x56)
640 670
641/* All 16 bit values */ 671/* This word contains the base address of the FW_REG_ registers below */
642#define FW_FIRMWARE_VER FW_REG(0) 672#define FWRAW_REGS_ADDR FWRAW_DATA(0x1d)
643/* non-zero if USB high speed connection */ 673
644#define FW_USB_SPEED FW_REG(1) 674/* All 16 bit values, offset from the address in FWRAW_REGS_ADDR */
645#define FW_FIX_TX_RATE FW_REG(2) 675enum {
646/* Seems to be able to control LEDs over the firmware */ 676 FW_REG_FIRMWARE_VER = 0,
647#define FW_LINK_STATUS FW_REG(3) 677 /* non-zero if USB high speed connection */
648#define FW_SOFT_RESET FW_REG(4) 678 FW_REG_USB_SPEED = 1,
649#define FW_FLASH_CHK FW_REG(5) 679 FW_REG_FIX_TX_RATE = 2,
680 /* Seems to be able to control LEDs over the firmware */
681 FW_REG_LED_LINK_STATUS = 3,
682 FW_REG_SOFT_RESET = 4,
683 FW_REG_FLASH_CHK = 5,
684};
650 685
686/* Values for FW_LINK_STATUS */
651#define FW_LINK_OFF 0x0 687#define FW_LINK_OFF 0x0
652#define FW_LINK_TX 0x1 688#define FW_LINK_TX 0x1
653/* 0x2 - link led on? */ 689/* 0x2 - link led on? */
654 690
655enum { 691enum {
656 CR_BASE_OFFSET = 0x9000,
657 FW_START_OFFSET = 0xee00,
658 FW_BASE_ADDR_OFFSET = FW_START_OFFSET + 0x1d,
659 EEPROM_START_OFFSET = 0xf800,
660 EEPROM_SIZE = 0x800, /* words */
661 LOAD_CODE_SIZE = 0xe, /* words */
662 LOAD_VECT_SIZE = 0x10000 - 0xfff7, /* words */
663 EEPROM_REGS_OFFSET = LOAD_CODE_SIZE + LOAD_VECT_SIZE,
664 EEPROM_REGS_SIZE = 0x7e, /* words */
665 E2P_BASE_OFFSET = EEPROM_START_OFFSET +
666 EEPROM_REGS_OFFSET,
667};
668
669#define FW_REG_TABLE_ADDR USB_ADDR(FW_START_OFFSET + 0x1d)
670
671enum {
672 /* indices for ofdm_cal_values */ 692 /* indices for ofdm_cal_values */
673 OFDM_36M_INDEX = 0, 693 OFDM_36M_INDEX = 0,
674 OFDM_48M_INDEX = 1, 694 OFDM_48M_INDEX = 1,
@@ -679,6 +699,8 @@ struct zd_chip {
679 struct zd_usb usb; 699 struct zd_usb usb;
680 struct zd_rf rf; 700 struct zd_rf rf;
681 struct mutex mutex; 701 struct mutex mutex;
702 /* Base address of FW_REG_ registers */
703 zd_addr_t fw_regs_base;
682 u8 e2p_mac[ETH_ALEN]; 704 u8 e2p_mac[ETH_ALEN];
683 /* EepSetPoint in the vendor driver */ 705 /* EepSetPoint in the vendor driver */
684 u8 pwr_cal_values[E2P_CHANNEL_COUNT]; 706 u8 pwr_cal_values[E2P_CHANNEL_COUNT];
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index fb22f62cf1f3..deb99d1eaa77 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -23,6 +23,8 @@
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25 25
26typedef u16 __nocast zd_addr_t;
27
26#define dev_printk_f(level, dev, fmt, args...) \ 28#define dev_printk_f(level, dev, fmt, args...) \
27 dev_printk(level, dev, "%s() " fmt, __func__, ##args) 29 dev_printk(level, dev, "%s() " fmt, __func__, ##args)
28 30
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
index 26b8298dff8c..c4f36d39642b 100644
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
@@ -2,7 +2,6 @@
2#define _ZD_IEEE80211_H 2#define _ZD_IEEE80211_H
3 3
4#include <net/ieee80211.h> 4#include <net/ieee80211.h>
5#include "zd_types.h"
6 5
7/* Additional definitions from the standards. 6/* Additional definitions from the standards.
8 */ 7 */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index 676b3734f1ed..a57732eb69e1 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -18,8 +18,6 @@
18#ifndef _ZD_RF_H 18#ifndef _ZD_RF_H
19#define _ZD_RF_H 19#define _ZD_RF_H
20 20
21#include "zd_types.h"
22
23#define UW2451_RF 0x2 21#define UW2451_RF 0x2
24#define UCHIP_RF 0x3 22#define UCHIP_RF 0x3
25#define AL2230_RF 0x4 23#define AL2230_RF 0x4
diff --git a/drivers/net/wireless/zd1211rw/zd_types.h b/drivers/net/wireless/zd1211rw/zd_types.h
deleted file mode 100644
index 0155a1584ed3..000000000000
--- a/drivers/net/wireless/zd1211rw/zd_types.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/* zd_types.h
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_TYPES_H
19#define _ZD_TYPES_H
20
21#include <linux/types.h>
22
23/* We have three register spaces mapped into the overall USB address space of
24 * 64K words (16-bit values). There is the control register space of
25 * double-word registers, the eeprom register space and the firmware register
26 * space. The control register space is byte mapped, the others are word
27 * mapped.
28 *
29 * For that reason, we are using byte offsets for control registers and word
30 * offsets for everything else.
31 */
32
33typedef u32 __nocast zd_addr_t;
34
35enum {
36 ADDR_BASE_MASK = 0xff000000,
37 ADDR_OFFSET_MASK = 0x0000ffff,
38 ADDR_ZERO_MASK = 0x00ff0000,
39 NULL_BASE = 0x00000000,
40 USB_BASE = 0x01000000,
41 CR_BASE = 0x02000000,
42 CR_MAX_OFFSET = 0x0b30,
43 E2P_BASE = 0x03000000,
44 E2P_MAX_OFFSET = 0x007e,
45 FW_BASE = 0x04000000,
46 FW_MAX_OFFSET = 0x0005,
47};
48
49#define ZD_ADDR_BASE(addr) ((u32)(addr) & ADDR_BASE_MASK)
50#define ZD_OFFSET(addr) ((u32)(addr) & ADDR_OFFSET_MASK)
51
52#define ZD_ADDR(base, offset) \
53 ((zd_addr_t)(((base) & ADDR_BASE_MASK) | ((offset) & ADDR_OFFSET_MASK)))
54
55#define ZD_NULL_ADDR ((zd_addr_t)0)
56#define USB_REG(offset) ZD_ADDR(USB_BASE, offset) /* word addressing */
57#define CTL_REG(offset) ZD_ADDR(CR_BASE, offset) /* byte addressing */
58#define E2P_REG(offset) ZD_ADDR(E2P_BASE, offset) /* word addressing */
59#define FW_REG(offset) ZD_ADDR(FW_BASE, offset) /* word addressing */
60
61static inline zd_addr_t zd_inc_word(zd_addr_t addr)
62{
63 u32 base = ZD_ADDR_BASE(addr);
64 u32 offset = ZD_OFFSET(addr);
65
66 offset += base == CR_BASE ? 2 : 1;
67
68 return base | offset;
69}
70
71#endif /* _ZD_TYPES_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 605e96e74057..75ef55624d7f 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -58,6 +58,10 @@ static struct usb_device_id usb_ids[] = {
58 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, 58 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
59 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, 59 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
60 { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, 60 { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
61 { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
62 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
63 { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B },
61 /* "Driverless" devices that need ejecting */ 65 /* "Driverless" devices that need ejecting */
62 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 66 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
63 {} 67 {}
@@ -73,96 +77,6 @@ MODULE_DEVICE_TABLE(usb, usb_ids);
73#define FW_ZD1211_PREFIX "zd1211/zd1211_" 77#define FW_ZD1211_PREFIX "zd1211/zd1211_"
74#define FW_ZD1211B_PREFIX "zd1211/zd1211b_" 78#define FW_ZD1211B_PREFIX "zd1211/zd1211b_"
75 79
76/* register address handling */
77
78#ifdef DEBUG
79static int check_addr(struct zd_usb *usb, zd_addr_t addr)
80{
81 u32 base = ZD_ADDR_BASE(addr);
82 u32 offset = ZD_OFFSET(addr);
83
84 if ((u32)addr & ADDR_ZERO_MASK)
85 goto invalid_address;
86 switch (base) {
87 case USB_BASE:
88 break;
89 case CR_BASE:
90 if (offset > CR_MAX_OFFSET) {
91 dev_dbg(zd_usb_dev(usb),
92 "CR offset %#010x larger than"
93 " CR_MAX_OFFSET %#10x\n",
94 offset, CR_MAX_OFFSET);
95 goto invalid_address;
96 }
97 if (offset & 1) {
98 dev_dbg(zd_usb_dev(usb),
99 "CR offset %#010x is not a multiple of 2\n",
100 offset);
101 goto invalid_address;
102 }
103 break;
104 case E2P_BASE:
105 if (offset > E2P_MAX_OFFSET) {
106 dev_dbg(zd_usb_dev(usb),
107 "E2P offset %#010x larger than"
108 " E2P_MAX_OFFSET %#010x\n",
109 offset, E2P_MAX_OFFSET);
110 goto invalid_address;
111 }
112 break;
113 case FW_BASE:
114 if (!usb->fw_base_offset) {
115 dev_dbg(zd_usb_dev(usb),
116 "ERROR: fw base offset has not been set\n");
117 return -EAGAIN;
118 }
119 if (offset > FW_MAX_OFFSET) {
120 dev_dbg(zd_usb_dev(usb),
121 "FW offset %#10x is larger than"
122 " FW_MAX_OFFSET %#010x\n",
123 offset, FW_MAX_OFFSET);
124 goto invalid_address;
125 }
126 break;
127 default:
128 dev_dbg(zd_usb_dev(usb),
129 "address has unsupported base %#010x\n", addr);
130 goto invalid_address;
131 }
132
133 return 0;
134invalid_address:
135 dev_dbg(zd_usb_dev(usb),
136 "ERROR: invalid address: %#010x\n", addr);
137 return -EINVAL;
138}
139#endif /* DEBUG */
140
141static u16 usb_addr(struct zd_usb *usb, zd_addr_t addr)
142{
143 u32 base;
144 u16 offset;
145
146 base = ZD_ADDR_BASE(addr);
147 offset = ZD_OFFSET(addr);
148
149 ZD_ASSERT(check_addr(usb, addr) == 0);
150
151 switch (base) {
152 case CR_BASE:
153 offset += CR_BASE_OFFSET;
154 break;
155 case E2P_BASE:
156 offset += E2P_BASE_OFFSET;
157 break;
158 case FW_BASE:
159 offset += usb->fw_base_offset;
160 break;
161 }
162
163 return offset;
164}
165
166/* USB device initialization */ 80/* USB device initialization */
167 81
168static int request_fw_file( 82static int request_fw_file(
@@ -295,14 +209,13 @@ static int handle_version_mismatch(struct usb_device *udev, u8 device_type,
295 if (r) 209 if (r)
296 goto error; 210 goto error;
297 211
298 r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START_OFFSET, 212 r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START, REBOOT);
299 REBOOT);
300 if (r) 213 if (r)
301 goto error; 214 goto error;
302 215
303 offset = ((EEPROM_REGS_OFFSET + EEPROM_REGS_SIZE) * sizeof(u16)); 216 offset = (E2P_BOOT_CODE_OFFSET * sizeof(u16));
304 r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset, 217 r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset,
305 E2P_BASE_OFFSET + EEPROM_REGS_SIZE, REBOOT); 218 E2P_START + E2P_BOOT_CODE_OFFSET, REBOOT);
306 219
307 /* At this point, the vendor driver downloads the whole firmware 220 /* At this point, the vendor driver downloads the whole firmware
308 * image, hacks around with version IDs, and uploads it again, 221 * image, hacks around with version IDs, and uploads it again,
@@ -331,7 +244,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
331 if (r) 244 if (r)
332 goto error; 245 goto error;
333 246
334 fw_bcdDevice = get_word(ub_fw->data, EEPROM_REGS_OFFSET); 247 fw_bcdDevice = get_word(ub_fw->data, E2P_DATA_OFFSET);
335 248
336 if (fw_bcdDevice != bcdDevice) { 249 if (fw_bcdDevice != bcdDevice) {
337 dev_info(&udev->dev, 250 dev_info(&udev->dev,
@@ -357,8 +270,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
357 if (r) 270 if (r)
358 goto error; 271 goto error;
359 272
360 r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START_OFFSET, 273 r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START, REBOOT);
361 REBOOT);
362 if (r) { 274 if (r) {
363 dev_err(&udev->dev, 275 dev_err(&udev->dev,
364 "Could not upload firmware code uph. Error number %d\n", 276 "Could not upload firmware code uph. Error number %d\n",
@@ -858,7 +770,7 @@ static inline void init_usb_interrupt(struct zd_usb *usb)
858 spin_lock_init(&intr->lock); 770 spin_lock_init(&intr->lock);
859 intr->interval = int_urb_interval(zd_usb_to_usbdev(usb)); 771 intr->interval = int_urb_interval(zd_usb_to_usbdev(usb));
860 init_completion(&intr->read_regs.completion); 772 init_completion(&intr->read_regs.completion);
861 intr->read_regs.cr_int_addr = cpu_to_le16(usb_addr(usb, CR_INTERRUPT)); 773 intr->read_regs.cr_int_addr = cpu_to_le16((u16)CR_INTERRUPT);
862} 774}
863 775
864static inline void init_usb_rx(struct zd_usb *usb) 776static inline void init_usb_rx(struct zd_usb *usb)
@@ -890,22 +802,6 @@ void zd_usb_init(struct zd_usb *usb, struct net_device *netdev,
890 init_usb_rx(usb); 802 init_usb_rx(usb);
891} 803}
892 804
893int zd_usb_init_hw(struct zd_usb *usb)
894{
895 int r;
896 struct zd_chip *chip = zd_usb_to_chip(usb);
897
898 ZD_ASSERT(mutex_is_locked(&chip->mutex));
899 r = zd_ioread16_locked(chip, &usb->fw_base_offset,
900 USB_REG((u16)FW_BASE_ADDR_OFFSET));
901 if (r)
902 return r;
903 dev_dbg_f(zd_usb_dev(usb), "fw_base_offset: %#06hx\n",
904 usb->fw_base_offset);
905
906 return 0;
907}
908
909void zd_usb_clear(struct zd_usb *usb) 805void zd_usb_clear(struct zd_usb *usb)
910{ 806{
911 usb_set_intfdata(usb->intf, NULL); 807 usb_set_intfdata(usb->intf, NULL);
@@ -1253,7 +1149,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1253 return -ENOMEM; 1149 return -ENOMEM;
1254 req->id = cpu_to_le16(USB_REQ_READ_REGS); 1150 req->id = cpu_to_le16(USB_REQ_READ_REGS);
1255 for (i = 0; i < count; i++) 1151 for (i = 0; i < count; i++)
1256 req->addr[i] = cpu_to_le16(usb_addr(usb, addresses[i])); 1152 req->addr[i] = cpu_to_le16((u16)addresses[i]);
1257 1153
1258 udev = zd_usb_to_usbdev(usb); 1154 udev = zd_usb_to_usbdev(usb);
1259 prepare_read_regs_int(usb); 1155 prepare_read_regs_int(usb);
@@ -1318,7 +1214,7 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1318 req->id = cpu_to_le16(USB_REQ_WRITE_REGS); 1214 req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
1319 for (i = 0; i < count; i++) { 1215 for (i = 0; i < count; i++) {
1320 struct reg_data *rw = &req->reg_writes[i]; 1216 struct reg_data *rw = &req->reg_writes[i];
1321 rw->addr = cpu_to_le16(usb_addr(usb, ioreqs[i].addr)); 1217 rw->addr = cpu_to_le16((u16)ioreqs[i].addr);
1322 rw->value = cpu_to_le16(ioreqs[i].value); 1218 rw->value = cpu_to_le16(ioreqs[i].value);
1323 } 1219 }
1324 1220
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 317d37c36679..506ea6a74393 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -25,7 +25,6 @@
25#include <linux/usb.h> 25#include <linux/usb.h>
26 26
27#include "zd_def.h" 27#include "zd_def.h"
28#include "zd_types.h"
29 28
30enum devicetype { 29enum devicetype {
31 DEVICE_ZD1211 = 0, 30 DEVICE_ZD1211 = 0,
@@ -181,15 +180,14 @@ struct zd_usb_tx {
181 spinlock_t lock; 180 spinlock_t lock;
182}; 181};
183 182
184/* Contains the usb parts. The structure doesn't require a lock, because intf 183/* Contains the usb parts. The structure doesn't require a lock because intf
185 * and fw_base_offset, will not be changed after initialization. 184 * will not be changed after initialization.
186 */ 185 */
187struct zd_usb { 186struct zd_usb {
188 struct zd_usb_interrupt intr; 187 struct zd_usb_interrupt intr;
189 struct zd_usb_rx rx; 188 struct zd_usb_rx rx;
190 struct zd_usb_tx tx; 189 struct zd_usb_tx tx;
191 struct usb_interface *intf; 190 struct usb_interface *intf;
192 u16 fw_base_offset;
193}; 191};
194 192
195#define zd_usb_dev(usb) (&usb->intf->dev) 193#define zd_usb_dev(usb) (&usb->intf->dev)
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c
index 896449f0cf85..4206df2d61b7 100644
--- a/drivers/usb/net/asix.c
+++ b/drivers/usb/net/asix.c
@@ -1449,6 +1449,10 @@ static const struct usb_device_id products [] = {
1449 // Linksys USB1000 1449 // Linksys USB1000
1450 USB_DEVICE (0x1737, 0x0039), 1450 USB_DEVICE (0x1737, 0x0039),
1451 .driver_info = (unsigned long) &ax88178_info, 1451 .driver_info = (unsigned long) &ax88178_info,
1452}, {
1453 // IO-DATA ETG-US2
1454 USB_DEVICE (0x04bb, 0x0930),
1455 .driver_info = (unsigned long) &ax88178_info,
1452}, 1456},
1453 { }, // END 1457 { }, // END
1454}; 1458};
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h
index 98f6898cae1f..c7467823cd1c 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/usb/net/pegasus.h
@@ -214,9 +214,9 @@ PEGASUS_DEV( "Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988,
214 DEFAULT_GPIO_RESET ) 214 DEFAULT_GPIO_RESET )
215PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511, 215PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511,
216 DEFAULT_GPIO_RESET | PEGASUS_II ) 216 DEFAULT_GPIO_RESET | PEGASUS_II )
217PEGASUS_DEV( "Corega FEter USB-TX", VENDOR_COREGA, 0x0004, 217PEGASUS_DEV( "Corega FEther USB-TX", VENDOR_COREGA, 0x0004,
218 DEFAULT_GPIO_RESET ) 218 DEFAULT_GPIO_RESET )
219PEGASUS_DEV( "Corega FEter USB-TXS", VENDOR_COREGA, 0x000d, 219PEGASUS_DEV( "Corega FEther USB-TXS", VENDOR_COREGA, 0x000d,
220 DEFAULT_GPIO_RESET | PEGASUS_II ) 220 DEFAULT_GPIO_RESET | PEGASUS_II )
221PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001, 221PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001,
222 DEFAULT_GPIO_RESET ) 222 DEFAULT_GPIO_RESET )
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index ccd706f876ec..5b4f1e3caf5d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2070,6 +2070,8 @@
2070#define PCI_VENDOR_ID_TDI 0x192E 2070#define PCI_VENDOR_ID_TDI 0x192E
2071#define PCI_DEVICE_ID_TDI_EHCI 0x0101 2071#define PCI_DEVICE_ID_TDI_EHCI 0x0101
2072 2072
2073#define PCI_VENDOR_ID_PASEMI 0x1959
2074
2073#define PCI_VENDOR_ID_JMICRON 0x197B 2075#define PCI_VENDOR_ID_JMICRON 0x197B
2074#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 2076#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
2075#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 2077#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index fa2f7da606a9..fb58e03b3fbd 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -265,6 +265,12 @@ ieee80211softmac_wx_get_rate(struct net_device *net_dev,
265 int err = -EINVAL; 265 int err = -EINVAL;
266 266
267 spin_lock_irqsave(&mac->lock, flags); 267 spin_lock_irqsave(&mac->lock, flags);
268
269 if (unlikely(!mac->running)) {
270 err = -ENODEV;
271 goto out_unlock;
272 }
273
268 switch (mac->txrates.default_rate) { 274 switch (mac->txrates.default_rate) {
269 case IEEE80211_CCK_RATE_1MB: 275 case IEEE80211_CCK_RATE_1MB:
270 data->bitrate.value = 1000000; 276 data->bitrate.value = 1000000;