aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c509.c4
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/acenic.c1
-rw-r--r--drivers/net/appletalk/ipddp.c42
-rw-r--r--drivers/net/arm/ep93xx_eth.c4
-rw-r--r--drivers/net/arm/ixp4xx_eth.c4
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c49
-rw-r--r--drivers/net/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/atlx/atl1.c7
-rw-r--r--drivers/net/atlx/atlx.h6
-rw-r--r--drivers/net/b44.c4
-rw-r--r--drivers/net/b44.h1
-rw-r--r--drivers/net/benet/be_main.c131
-rw-r--r--drivers/net/bfin_mac.c264
-rw-r--r--drivers/net/bnx2.c26
-rw-r--r--drivers/net/bnx2x_main.c1
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/bonding/bonding.h3
-rw-r--r--drivers/net/can/Kconfig9
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c39
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c21
-rw-r--r--drivers/net/can/sja1000/sja1000.c110
-rw-r--r--drivers/net/can/sja1000/sja1000.h9
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c235
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c19
-rw-r--r--drivers/net/chelsio/sge.c1
-rw-r--r--drivers/net/cpmac.c8
-rw-r--r--drivers/net/cxgb3/Makefile2
-rw-r--r--drivers/net/cxgb3/adapter.h6
-rw-r--r--drivers/net/cxgb3/ael1002.c823
-rw-r--r--drivers/net/cxgb3/aq100x.c355
-rw-r--r--drivers/net/cxgb3/common.h4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c89
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c27
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h3
-rw-r--r--drivers/net/cxgb3/sge.c29
-rw-r--r--drivers/net/cxgb3/t3_hw.c12
-rw-r--r--drivers/net/cxgb3/version.h4
-rw-r--r--drivers/net/davinci_emac.c10
-rw-r--r--drivers/net/declance.c5
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/e100.c199
-rw-r--r--drivers/net/e1000/e1000_main.c34
-rw-r--r--drivers/net/e1000e/82571.c99
-rw-r--r--drivers/net/e1000e/defines.h25
-rw-r--r--drivers/net/e1000e/e1000.h61
-rw-r--r--drivers/net/e1000e/es2lan.c3
-rw-r--r--drivers/net/e1000e/ethtool.c46
-rw-r--r--drivers/net/e1000e/hw.h18
-rw-r--r--drivers/net/e1000e/ich8lan.c448
-rw-r--r--drivers/net/e1000e/lib.c38
-rw-r--r--drivers/net/e1000e/netdev.c261
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/e1000e/phy.c699
-rw-r--r--drivers/net/enic/enic_main.c2
-rw-r--r--drivers/net/forcedeth.c232
-rw-r--r--drivers/net/fsl_pq_mdio.c6
-rw-r--r--drivers/net/gianfar.h2
-rw-r--r--drivers/net/hamachi.c3
-rw-r--r--drivers/net/igb/igb_main.c6
-rw-r--r--drivers/net/igbvf/netdev.c6
-rw-r--r--drivers/net/irda/irda-usb.c40
-rw-r--r--drivers/net/ixgb/ixgb_main.c5
-rw-r--r--drivers/net/ixgbe/ixgbe.h122
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c115
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c1132
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c272
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c907
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c686
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h168
-rw-r--r--drivers/net/jme.c1
-rw-r--r--drivers/net/korina.c14
-rw-r--r--drivers/net/ks8842.c732
-rw-r--r--drivers/net/mac8390.c31
-rw-r--r--drivers/net/macvlan.c12
-rw-r--r--drivers/net/mdio.c17
-rw-r--r--drivers/net/mlx4/Makefile2
-rw-r--r--drivers/net/mlx4/en_ethtool.c (renamed from drivers/net/mlx4/en_params.c)67
-rw-r--r--drivers/net/mlx4/en_main.c68
-rw-r--r--drivers/net/mlx4/en_netdev.c175
-rw-r--r--drivers/net/mlx4/en_rx.c78
-rw-r--r--drivers/net/mlx4/en_tx.c120
-rw-r--r--drivers/net/mlx4/eq.c4
-rw-r--r--drivers/net/mlx4/mlx4_en.h49
-rw-r--r--drivers/net/mlx4/mr.c7
-rw-r--r--drivers/net/mv643xx_eth.c11
-rw-r--r--drivers/net/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/netxen/netxen_nic_init.c9
-rw-r--r--drivers/net/netxen/netxen_nic_main.c1
-rw-r--r--drivers/net/niu.c9
-rw-r--r--drivers/net/ns83820.c6
-rw-r--r--drivers/net/phy/marvell.c1
-rw-r--r--drivers/net/pppol2tp.c11
-rw-r--r--drivers/net/qla3xxx.c1
-rw-r--r--drivers/net/qlge/qlge.h31
-rw-r--r--drivers/net/qlge/qlge_ethtool.c6
-rw-r--r--drivers/net/qlge/qlge_main.c134
-rw-r--r--drivers/net/qlge/qlge_mpi.c58
-rw-r--r--drivers/net/r6040.c6
-rw-r--r--drivers/net/r8169.c106
-rw-r--r--drivers/net/s2io.c23
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sfc/selftest.c1
-rw-r--r--drivers/net/sfc/tenxpress.c11
-rw-r--r--drivers/net/sfc/tx.c7
-rw-r--r--drivers/net/sis190.c59
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sky2.c1
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/sundance.c53
-rw-r--r--drivers/net/tehuti.c14
-rw-r--r--drivers/net/tg3.c18
-rw-r--r--drivers/net/tulip/Kconfig12
-rw-r--r--drivers/net/tulip/de2104x.c13
-rw-r--r--drivers/net/tun.c19
-rw-r--r--drivers/net/ucc_geth.c79
-rw-r--r--drivers/net/ucc_geth.h28
-rw-r--r--drivers/net/usb/hso.c47
-rw-r--r--drivers/net/usb/rtl8150.c9
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/via-rhine.c58
-rw-r--r--drivers/net/via-velocity.c22
-rw-r--r--drivers/net/via-velocity.h1
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vxge/vxge-config.c12
-rw-r--r--drivers/net/vxge/vxge-main.c6
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wimax/i2400m/control.c100
-rw-r--r--drivers/net/wimax/i2400m/driver.c5
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h5
-rw-r--r--drivers/net/wimax/i2400m/netdev.c4
-rw-r--r--drivers/net/wimax/i2400m/rx.c6
-rw-r--r--drivers/net/wimax/i2400m/sdio.c18
-rw-r--r--drivers/net/wimax/i2400m/usb.c35
-rw-r--r--drivers/net/wireless/Kconfig5
-rw-r--r--drivers/net/wireless/at76c50x-usb.c12
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h44
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h8
-rw-r--r--drivers/net/wireless/ath/ar9170/led.c17
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c55
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c774
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c6
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c191
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h9
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c31
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h12
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/rfkill.c121
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c155
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c115
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c1
-rw-r--r--drivers/net/wireless/ath/regd.c29
-rw-r--r--drivers/net/wireless/b43/Kconfig7
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h17
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/leds.c9
-rw-r--r--drivers/net/wireless/b43/main.c87
-rw-r--r--drivers/net/wireless/b43/main.h1
-rw-r--r--drivers/net/wireless/b43/phy_a.c4
-rw-r--r--drivers/net/wireless/b43/phy_common.c17
-rw-r--r--drivers/net/wireless/b43/phy_common.h6
-rw-r--r--drivers/net/wireless/b43/phy_g.c4
-rw-r--r--drivers/net/wireless/b43/phy_lp.c2
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/b43/pio.c2
-rw-r--r--drivers/net/wireless/b43/rfkill.c170
-rw-r--r--drivers/net/wireless/b43/rfkill.h47
-rw-r--r--drivers/net/wireless/b43/xmit.c5
-rw-r--r--drivers/net/wireless/b43legacy/Kconfig8
-rw-r--r--drivers/net/wireless/b43legacy/Makefile2
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h3
-rw-r--r--drivers/net/wireless/b43legacy/leds.c10
-rw-r--r--drivers/net/wireless/b43legacy/main.c17
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.c172
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.h54
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig5
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c59
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c152
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c144
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.h48
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c269
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c10
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rfkill.c88
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c2
-rw-r--r--drivers/net/wireless/libertas/11d.c26
-rw-r--r--drivers/net/wireless/libertas/11d.h29
-rw-r--r--drivers/net/wireless/libertas/assoc.c758
-rw-r--r--drivers/net/wireless/libertas/assoc.h13
-rw-r--r--drivers/net/wireless/libertas/cmd.c16
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c17
-rw-r--r--drivers/net/wireless/libertas/debugfs.c8
-rw-r--r--drivers/net/wireless/libertas/dev.h10
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h41
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c76
-rw-r--r--drivers/net/wireless/libertas/if_spi.c126
-rw-r--r--drivers/net/wireless/libertas/main.c20
-rw-r--r--drivers/net/wireless/libertas/scan.c63
-rw-r--r--drivers/net/wireless/libertas/types.h150
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c27
-rw-r--r--drivers/net/wireless/p54/p54common.c46
-rw-r--r--drivers/net/wireless/p54/p54usb.c4
-rw-r--r--drivers/net/wireless/rndis_wlan.c292
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c9
-rw-r--r--drivers/net/wireless/wavelan.c4
-rw-r--r--drivers/net/wireless/wavelan_cs.c5
-rw-r--r--drivers/net/yellowfin.c3
253 files changed, 10840 insertions, 4823 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 8d9aa49de145..d2137efbd455 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -480,9 +480,13 @@ static int pnp_registered;
480 480
481#ifdef CONFIG_EISA 481#ifdef CONFIG_EISA
482static struct eisa_device_id el3_eisa_ids[] = { 482static struct eisa_device_id el3_eisa_ids[] = {
483 { "TCM5090" },
484 { "TCM5091" },
483 { "TCM5092" }, 485 { "TCM5092" },
484 { "TCM5093" }, 486 { "TCM5093" },
487 { "TCM5094" },
485 { "TCM5095" }, 488 { "TCM5095" },
489 { "TCM5098" },
486 { "" } 490 { "" }
487}; 491};
488MODULE_DEVICE_TABLE(eisa, el3_eisa_ids); 492MODULE_DEVICE_TABLE(eisa, el3_eisa_ids);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 43a5254df98d..3f739cfd92fa 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1001,7 +1001,7 @@ config SMC911X
1001 1001
1002config SMSC911X 1002config SMSC911X
1003 tristate "SMSC LAN911x/LAN921x families embedded ethernet support" 1003 tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
1004 depends on ARM || SUPERH 1004 depends on ARM || SUPERH || BLACKFIN
1005 select CRC32 1005 select CRC32
1006 select MII 1006 select MII
1007 select PHYLIB 1007 select PHYLIB
@@ -1723,6 +1723,11 @@ config TLAN
1723 1723
1724 Please email feedback to <torben.mathiasen@compaq.com>. 1724 Please email feedback to <torben.mathiasen@compaq.com>.
1725 1725
1726config KS8842
1727 tristate "Micrel KSZ8842"
1728 help
1729 This platform driver is for Micrel KSZ8842 chip.
1730
1726config VIA_RHINE 1731config VIA_RHINE
1727 tristate "VIA Rhine support" 1732 tristate "VIA Rhine support"
1728 depends on NET_PCI && PCI 1733 depends on NET_PCI && PCI
@@ -1859,8 +1864,8 @@ config 68360_ENET
1859 the Motorola 68360 processor. 1864 the Motorola 68360 processor.
1860 1865
1861config FEC 1866config FEC
1862 bool "FEC ethernet controller (of ColdFire CPUs)" 1867 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1863 depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 1868 depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35
1864 help 1869 help
1865 Say Y here if you want to use the built-in 10/100 Fast ethernet 1870 Say Y here if you want to use the built-in 10/100 Fast ethernet
1866 controller on some Motorola ColdFire and Freescale i.MX processors. 1871 controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -2720,6 +2725,8 @@ source "drivers/net/wan/Kconfig"
2720 2725
2721source "drivers/atm/Kconfig" 2726source "drivers/atm/Kconfig"
2722 2727
2728source "drivers/ieee802154/Kconfig"
2729
2723source "drivers/s390/net/Kconfig" 2730source "drivers/s390/net/Kconfig"
2724 2731
2725config XEN_NETDEV_FRONTEND 2732config XEN_NETDEV_FRONTEND
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f07a1e956417..1c378dd5933e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_TC35815) += tc35815.o
86obj-$(CONFIG_SKGE) += skge.o 86obj-$(CONFIG_SKGE) += skge.o
87obj-$(CONFIG_SKY2) += sky2.o 87obj-$(CONFIG_SKY2) += sky2.o
88obj-$(CONFIG_SKFP) += skfp/ 88obj-$(CONFIG_SKFP) += skfp/
89obj-$(CONFIG_KS8842) += ks8842.o
89obj-$(CONFIG_VIA_RHINE) += via-rhine.o 90obj-$(CONFIG_VIA_RHINE) += via-rhine.o
90obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 91obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
91obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 92obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
@@ -105,7 +106,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o
105obj-$(CONFIG_NET) += Space.o loopback.o 106obj-$(CONFIG_NET) += Space.o loopback.o
106obj-$(CONFIG_SEEQ8005) += seeq8005.o 107obj-$(CONFIG_SEEQ8005) += seeq8005.o
107obj-$(CONFIG_NET_SB1000) += sb1000.o 108obj-$(CONFIG_NET_SB1000) += sb1000.o
108obj-$(CONFIG_MAC8390) += mac8390.o 8390.o 109obj-$(CONFIG_MAC8390) += mac8390.o
109obj-$(CONFIG_APNE) += apne.o 8390.o 110obj-$(CONFIG_APNE) += apne.o 8390.o
110obj-$(CONFIG_PCMCIA_PCNET) += 8390.o 111obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
111obj-$(CONFIG_HP100) += hp100.o 112obj-$(CONFIG_HP100) += hp100.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 57bc71527850..08419ee10290 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2573,7 +2573,6 @@ restart:
2573 netif_wake_queue(dev); 2573 netif_wake_queue(dev);
2574 } 2574 }
2575 2575
2576 dev->trans_start = jiffies;
2577 return NETDEV_TX_OK; 2576 return NETDEV_TX_OK;
2578 2577
2579overflow: 2578overflow:
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index f939e92fcf8a..78cea5e80b1d 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -39,6 +39,7 @@
39static const char version[] = KERN_INFO "ipddp.c:v0.01 8/28/97 Bradford W. Johnson <johns393@maroon.tc.umn.edu>\n"; 39static const char version[] = KERN_INFO "ipddp.c:v0.01 8/28/97 Bradford W. Johnson <johns393@maroon.tc.umn.edu>\n";
40 40
41static struct ipddp_route *ipddp_route_list; 41static struct ipddp_route *ipddp_route_list;
42static DEFINE_SPINLOCK(ipddp_route_lock);
42 43
43#ifdef CONFIG_IPDDP_ENCAP 44#ifdef CONFIG_IPDDP_ENCAP
44static int ipddp_mode = IPDDP_ENCAP; 45static int ipddp_mode = IPDDP_ENCAP;
@@ -50,7 +51,7 @@ static int ipddp_mode = IPDDP_DECAP;
50static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev); 51static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev);
51static int ipddp_create(struct ipddp_route *new_rt); 52static int ipddp_create(struct ipddp_route *new_rt);
52static int ipddp_delete(struct ipddp_route *rt); 53static int ipddp_delete(struct ipddp_route *rt);
53static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt); 54static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt);
54static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 55static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
55 56
56static const struct net_device_ops ipddp_netdev_ops = { 57static const struct net_device_ops ipddp_netdev_ops = {
@@ -114,11 +115,13 @@ static struct net_device * __init ipddp_init(void)
114 */ 115 */
115static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev) 116static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
116{ 117{
117 __be32 paddr = ((struct rtable*)skb->dst)->rt_gateway; 118 __be32 paddr = skb_rtable(skb)->rt_gateway;
118 struct ddpehdr *ddp; 119 struct ddpehdr *ddp;
119 struct ipddp_route *rt; 120 struct ipddp_route *rt;
120 struct atalk_addr *our_addr; 121 struct atalk_addr *our_addr;
121 122
123 spin_lock(&ipddp_route_lock);
124
122 /* 125 /*
123 * Find appropriate route to use, based only on IP number. 126 * Find appropriate route to use, based only on IP number.
124 */ 127 */
@@ -127,8 +130,10 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
127 if(rt->ip == paddr) 130 if(rt->ip == paddr)
128 break; 131 break;
129 } 132 }
130 if(rt == NULL) 133 if(rt == NULL) {
134 spin_unlock(&ipddp_route_lock);
131 return 0; 135 return 0;
136 }
132 137
133 our_addr = atalk_find_dev_addr(rt->dev); 138 our_addr = atalk_find_dev_addr(rt->dev);
134 139
@@ -174,6 +179,8 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
174 if(aarp_send_ddp(rt->dev, skb, &rt->at, NULL) < 0) 179 if(aarp_send_ddp(rt->dev, skb, &rt->at, NULL) < 0)
175 dev_kfree_skb(skb); 180 dev_kfree_skb(skb);
176 181
182 spin_unlock(&ipddp_route_lock);
183
177 return 0; 184 return 0;
178} 185}
179 186
@@ -196,7 +203,9 @@ static int ipddp_create(struct ipddp_route *new_rt)
196 return -ENETUNREACH; 203 return -ENETUNREACH;
197 } 204 }
198 205
199 if (ipddp_find_route(rt)) { 206 spin_lock_bh(&ipddp_route_lock);
207 if (__ipddp_find_route(rt)) {
208 spin_unlock_bh(&ipddp_route_lock);
200 kfree(rt); 209 kfree(rt);
201 return -EEXIST; 210 return -EEXIST;
202 } 211 }
@@ -204,6 +213,8 @@ static int ipddp_create(struct ipddp_route *new_rt)
204 rt->next = ipddp_route_list; 213 rt->next = ipddp_route_list;
205 ipddp_route_list = rt; 214 ipddp_route_list = rt;
206 215
216 spin_unlock_bh(&ipddp_route_lock);
217
207 return 0; 218 return 0;
208} 219}
209 220
@@ -216,6 +227,7 @@ static int ipddp_delete(struct ipddp_route *rt)
216 struct ipddp_route **r = &ipddp_route_list; 227 struct ipddp_route **r = &ipddp_route_list;
217 struct ipddp_route *tmp; 228 struct ipddp_route *tmp;
218 229
230 spin_lock_bh(&ipddp_route_lock);
219 while((tmp = *r) != NULL) 231 while((tmp = *r) != NULL)
220 { 232 {
221 if(tmp->ip == rt->ip 233 if(tmp->ip == rt->ip
@@ -223,19 +235,21 @@ static int ipddp_delete(struct ipddp_route *rt)
223 && tmp->at.s_node == rt->at.s_node) 235 && tmp->at.s_node == rt->at.s_node)
224 { 236 {
225 *r = tmp->next; 237 *r = tmp->next;
238 spin_unlock_bh(&ipddp_route_lock);
226 kfree(tmp); 239 kfree(tmp);
227 return 0; 240 return 0;
228 } 241 }
229 r = &tmp->next; 242 r = &tmp->next;
230 } 243 }
231 244
245 spin_unlock_bh(&ipddp_route_lock);
232 return (-ENOENT); 246 return (-ENOENT);
233} 247}
234 248
235/* 249/*
236 * Find a routing entry, we only return a FULL match 250 * Find a routing entry, we only return a FULL match
237 */ 251 */
238static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt) 252static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
239{ 253{
240 struct ipddp_route *f; 254 struct ipddp_route *f;
241 255
@@ -253,7 +267,7 @@ static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt)
253static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 267static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
254{ 268{
255 struct ipddp_route __user *rt = ifr->ifr_data; 269 struct ipddp_route __user *rt = ifr->ifr_data;
256 struct ipddp_route rcp; 270 struct ipddp_route rcp, rcp2, *rp;
257 271
258 if(!capable(CAP_NET_ADMIN)) 272 if(!capable(CAP_NET_ADMIN))
259 return -EPERM; 273 return -EPERM;
@@ -267,9 +281,19 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
267 return (ipddp_create(&rcp)); 281 return (ipddp_create(&rcp));
268 282
269 case SIOCFINDIPDDPRT: 283 case SIOCFINDIPDDPRT:
270 if(copy_to_user(rt, ipddp_find_route(&rcp), sizeof(struct ipddp_route))) 284 spin_lock_bh(&ipddp_route_lock);
271 return -EFAULT; 285 rp = __ipddp_find_route(&rcp);
272 return 0; 286 if (rp)
287 memcpy(&rcp2, rp, sizeof(rcp2));
288 spin_unlock_bh(&ipddp_route_lock);
289
290 if (rp) {
291 if (copy_to_user(rt, &rcp2,
292 sizeof(struct ipddp_route)))
293 return -EFAULT;
294 return 0;
295 } else
296 return -ENOENT;
273 297
274 case SIOCDELIPDDPRT: 298 case SIOCDELIPDDPRT:
275 return (ipddp_delete(&rcp)); 299 return (ipddp_delete(&rcp));
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index b72b3d639f6e..fbf4645417d4 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -253,7 +253,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
253 skb = dev_alloc_skb(length + 2); 253 skb = dev_alloc_skb(length + 2);
254 if (likely(skb != NULL)) { 254 if (likely(skb != NULL)) {
255 skb_reserve(skb, 2); 255 skb_reserve(skb, 2);
256 dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, 256 dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr,
257 length, DMA_FROM_DEVICE); 257 length, DMA_FROM_DEVICE);
258 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); 258 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
259 skb_put(skb, length); 259 skb_put(skb, length);
@@ -331,7 +331,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
331 ep->descs->tdesc[entry].tdesc1 = 331 ep->descs->tdesc[entry].tdesc1 =
332 TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); 332 TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
333 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); 333 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
334 dma_sync_single(NULL, ep->descs->tdesc[entry].buf_addr, 334 dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr,
335 skb->len, DMA_TO_DEVICE); 335 skb->len, DMA_TO_DEVICE);
336 dev_kfree_skb(skb); 336 dev_kfree_skb(skb);
337 337
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 322c49b908dc..1fcf8388b1c8 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -561,8 +561,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
561 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, 561 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
562 RX_BUFF_SIZE, DMA_FROM_DEVICE); 562 RX_BUFF_SIZE, DMA_FROM_DEVICE);
563#else 563#else
564 dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN, 564 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
565 RX_BUFF_SIZE, DMA_FROM_DEVICE); 565 RX_BUFF_SIZE, DMA_FROM_DEVICE);
566 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], 566 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
567 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); 567 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
568#endif 568#endif
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 45c5b7332cd3..e4afbd628c23 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -271,7 +271,7 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
271 struct atl1c_adapter *adapter = netdev_priv(netdev); 271 struct atl1c_adapter *adapter = netdev_priv(netdev);
272 272
273 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | 273 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
274 WAKE_MCAST | WAKE_BCAST | WAKE_MCAST)) 274 WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
275 return -EOPNOTSUPP; 275 return -EOPNOTSUPP;
276 /* these settings will always override what we currently have */ 276 /* these settings will always override what we currently have */
277 adapter->wol = 0; 277 adapter->wol = 0;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index fc1092b835d2..cd547a205fb9 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -164,6 +164,24 @@ static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
164} 164}
165 165
166/* 166/*
167 * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
168 * of the idle status register until the device is actually idle
169 */
170static u32 atl1c_wait_until_idle(struct atl1c_hw *hw)
171{
172 int timeout;
173 u32 data;
174
175 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
176 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
177 if ((data & IDLE_STATUS_MASK) == 0)
178 return 0;
179 msleep(1);
180 }
181 return data;
182}
183
184/*
167 * atl1c_phy_config - Timer Call-back 185 * atl1c_phy_config - Timer Call-back
168 * @data: pointer to netdev cast into an unsigned long 186 * @data: pointer to netdev cast into an unsigned long
169 */ 187 */
@@ -1106,7 +1124,6 @@ static void atl1c_configure_dma(struct atl1c_adapter *adapter)
1106static int atl1c_stop_mac(struct atl1c_hw *hw) 1124static int atl1c_stop_mac(struct atl1c_hw *hw)
1107{ 1125{
1108 u32 data; 1126 u32 data;
1109 int timeout;
1110 1127
1111 AT_READ_REG(hw, REG_RXQ_CTRL, &data); 1128 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1112 data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN | 1129 data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN |
@@ -1117,25 +1134,13 @@ static int atl1c_stop_mac(struct atl1c_hw *hw)
1117 data &= ~TXQ_CTRL_EN; 1134 data &= ~TXQ_CTRL_EN;
1118 AT_WRITE_REG(hw, REG_TWSI_CTRL, data); 1135 AT_WRITE_REG(hw, REG_TWSI_CTRL, data);
1119 1136
1120 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { 1137 atl1c_wait_until_idle(hw);
1121 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
1122 if ((data & (IDLE_STATUS_RXQ_NO_IDLE |
1123 IDLE_STATUS_TXQ_NO_IDLE)) == 0)
1124 break;
1125 msleep(1);
1126 }
1127 1138
1128 AT_READ_REG(hw, REG_MAC_CTRL, &data); 1139 AT_READ_REG(hw, REG_MAC_CTRL, &data);
1129 data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN); 1140 data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
1130 AT_WRITE_REG(hw, REG_MAC_CTRL, data); 1141 AT_WRITE_REG(hw, REG_MAC_CTRL, data);
1131 1142
1132 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { 1143 return (int)atl1c_wait_until_idle(hw);
1133 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
1134 if ((data & IDLE_STATUS_MASK) == 0)
1135 return 0;
1136 msleep(1);
1137 }
1138 return data;
1139} 1144}
1140 1145
1141static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw) 1146static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
@@ -1178,8 +1183,6 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
1178{ 1183{
1179 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 1184 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
1180 struct pci_dev *pdev = adapter->pdev; 1185 struct pci_dev *pdev = adapter->pdev;
1181 u32 idle_status_data = 0;
1182 int timeout = 0;
1183 int ret; 1186 int ret;
1184 1187
1185 AT_WRITE_REG(hw, REG_IMR, 0); 1188 AT_WRITE_REG(hw, REG_IMR, 0);
@@ -1198,15 +1201,10 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
1198 AT_WRITE_FLUSH(hw); 1201 AT_WRITE_FLUSH(hw);
1199 msleep(10); 1202 msleep(10);
1200 /* Wait at least 10ms for All module to be Idle */ 1203 /* Wait at least 10ms for All module to be Idle */
1201 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { 1204
1202 AT_READ_REG(hw, REG_IDLE_STATUS, &idle_status_data); 1205 if (atl1c_wait_until_idle(hw)) {
1203 if ((idle_status_data & IDLE_STATUS_MASK) == 0)
1204 break;
1205 msleep(1);
1206 }
1207 if (timeout >= AT_HW_MAX_IDLE_DELAY) {
1208 dev_err(&pdev->dev, 1206 dev_err(&pdev->dev,
1209 "MAC state machine cann't be idle since" 1207 "MAC state machine can't be idle since"
1210 " disabled for 10ms second\n"); 1208 " disabled for 10ms second\n");
1211 return -1; 1209 return -1;
1212 } 1210 }
@@ -2113,7 +2111,6 @@ static int atl1c_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2113 atl1c_tx_map(adapter, skb, tpd, type); 2111 atl1c_tx_map(adapter, skb, tpd, type);
2114 atl1c_tx_queue(adapter, skb, tpd, type); 2112 atl1c_tx_queue(adapter, skb, tpd, type);
2115 2113
2116 netdev->trans_start = jiffies;
2117 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2114 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2118 return NETDEV_TX_OK; 2115 return NETDEV_TX_OK;
2119} 2116}
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index c271b7537fab..e1ae10cf30c1 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -37,6 +37,7 @@ char atl1e_driver_version[] = DRV_VERSION;
37 */ 37 */
38static struct pci_device_id atl1e_pci_tbl[] = { 38static struct pci_device_id atl1e_pci_tbl[] = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, 39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
40 /* required last entry */ 41 /* required last entry */
41 { 0 } 42 { 0 }
42}; 43};
@@ -1893,7 +1894,7 @@ static int atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1893 atl1e_tx_map(adapter, skb, tpd); 1894 atl1e_tx_map(adapter, skb, tpd);
1894 atl1e_tx_queue(adapter, tpd_req, tpd); 1895 atl1e_tx_queue(adapter, tpd_req, tpd);
1895 1896
1896 netdev->trans_start = jiffies; 1897 netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1897 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1898 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1898 return NETDEV_TX_OK; 1899 return NETDEV_TX_OK;
1899} 1900}
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 13f0bdc32449..560f3873d347 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -82,6 +82,12 @@
82 82
83#include "atl1.h" 83#include "atl1.h"
84 84
85#define ATLX_DRIVER_VERSION "2.1.3"
86MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
87 Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
88MODULE_LICENSE("GPL");
89MODULE_VERSION(ATLX_DRIVER_VERSION);
90
85/* Temporary hack for merging atl1 and atl2 */ 91/* Temporary hack for merging atl1 and atl2 */
86#include "atlx.c" 92#include "atlx.c"
87 93
@@ -2431,7 +2437,6 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2431 atl1_tx_queue(adapter, count, ptpd); 2437 atl1_tx_queue(adapter, count, ptpd);
2432 atl1_update_mailbox(adapter); 2438 atl1_update_mailbox(adapter);
2433 mmiowb(); 2439 mmiowb();
2434 netdev->trans_start = jiffies;
2435 return NETDEV_TX_OK; 2440 return NETDEV_TX_OK;
2436} 2441}
2437 2442
diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h
index 297a03da6b7f..14054b75aa62 100644
--- a/drivers/net/atlx/atlx.h
+++ b/drivers/net/atlx/atlx.h
@@ -29,12 +29,6 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/types.h> 30#include <linux/types.h>
31 31
32#define ATLX_DRIVER_VERSION "2.1.3"
33MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
34 Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
35MODULE_LICENSE("GPL");
36MODULE_VERSION(ATLX_DRIVER_VERSION);
37
38#define ATLX_ERR_PHY 2 32#define ATLX_ERR_PHY 2
39#define ATLX_ERR_PHY_SPEED 7 33#define ATLX_ERR_PHY_SPEED 7
40#define ATLX_ERR_PHY_RES 8 34#define ATLX_ERR_PHY_RES 8
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index b70b81ec34c3..36d4d377ec2f 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -782,7 +782,7 @@ static int b44_rx(struct b44 *bp, int budget)
782 drop_it: 782 drop_it:
783 b44_recycle_rx(bp, cons, bp->rx_prod); 783 b44_recycle_rx(bp, cons, bp->rx_prod);
784 drop_it_no_recycle: 784 drop_it_no_recycle:
785 bp->stats.rx_dropped++; 785 bp->dev->stats.rx_dropped++;
786 goto next_pkt; 786 goto next_pkt;
787 } 787 }
788 788
@@ -1647,7 +1647,7 @@ static int b44_close(struct net_device *dev)
1647static struct net_device_stats *b44_get_stats(struct net_device *dev) 1647static struct net_device_stats *b44_get_stats(struct net_device *dev)
1648{ 1648{
1649 struct b44 *bp = netdev_priv(dev); 1649 struct b44 *bp = netdev_priv(dev);
1650 struct net_device_stats *nstat = &bp->stats; 1650 struct net_device_stats *nstat = &dev->stats;
1651 struct b44_hw_stats *hwstat = &bp->hw_stats; 1651 struct b44_hw_stats *hwstat = &bp->hw_stats;
1652 1652
1653 /* Convert HW stats into netdevice stats. */ 1653 /* Convert HW stats into netdevice stats. */
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index e678498de6db..0443f6801f60 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -384,7 +384,6 @@ struct b44 {
384 384
385 struct timer_list timer; 385 struct timer_list timer;
386 386
387 struct net_device_stats stats;
388 struct b44_hw_stats hw_stats; 387 struct b44_hw_stats hw_stats;
389 388
390 struct ssb_device *sdev; 389 struct ssb_device *sdev;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index ae2f6b58ba25..66bb56874d9b 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -168,6 +168,7 @@ static void netdev_stats_update(struct be_adapter *adapter)
168 struct be_port_rxf_stats *port_stats = 168 struct be_port_rxf_stats *port_stats =
169 &rxf_stats->port[adapter->port_num]; 169 &rxf_stats->port[adapter->port_num];
170 struct net_device_stats *dev_stats = &adapter->stats.net_stats; 170 struct net_device_stats *dev_stats = &adapter->stats.net_stats;
171 struct be_erx_stats *erx_stats = &hw_stats->erx;
171 172
172 dev_stats->rx_packets = port_stats->rx_total_frames; 173 dev_stats->rx_packets = port_stats->rx_total_frames;
173 dev_stats->tx_packets = port_stats->tx_unicastframes + 174 dev_stats->tx_packets = port_stats->tx_unicastframes +
@@ -181,29 +182,33 @@ static void netdev_stats_update(struct be_adapter *adapter)
181 dev_stats->rx_errors = port_stats->rx_crc_errors + 182 dev_stats->rx_errors = port_stats->rx_crc_errors +
182 port_stats->rx_alignment_symbol_errors + 183 port_stats->rx_alignment_symbol_errors +
183 port_stats->rx_in_range_errors + 184 port_stats->rx_in_range_errors +
184 port_stats->rx_out_range_errors + port_stats->rx_frame_too_long; 185 port_stats->rx_out_range_errors +
185 186 port_stats->rx_frame_too_long +
186 /* packet transmit problems */ 187 port_stats->rx_dropped_too_small +
187 dev_stats->tx_errors = 0; 188 port_stats->rx_dropped_too_short +
188 189 port_stats->rx_dropped_header_too_small +
189 /* no space in linux buffers */ 190 port_stats->rx_dropped_tcp_length +
190 dev_stats->rx_dropped = 0; 191 port_stats->rx_dropped_runt +
191 192 port_stats->rx_tcp_checksum_errs +
192 /* no space available in linux */ 193 port_stats->rx_ip_checksum_errs +
193 dev_stats->tx_dropped = 0; 194 port_stats->rx_udp_checksum_errs;
194 195
195 dev_stats->multicast = port_stats->tx_multicastframes; 196 /* no space in linux buffers: best possible approximation */
196 dev_stats->collisions = 0; 197 dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
197 198
198 /* detailed rx errors */ 199 /* detailed rx errors */
199 dev_stats->rx_length_errors = port_stats->rx_in_range_errors + 200 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
200 port_stats->rx_out_range_errors + port_stats->rx_frame_too_long; 201 port_stats->rx_out_range_errors +
202 port_stats->rx_frame_too_long;
203
201 /* receive ring buffer overflow */ 204 /* receive ring buffer overflow */
202 dev_stats->rx_over_errors = 0; 205 dev_stats->rx_over_errors = 0;
206
203 dev_stats->rx_crc_errors = port_stats->rx_crc_errors; 207 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
204 208
205 /* frame alignment errors */ 209 /* frame alignment errors */
206 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors; 210 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
211
207 /* receiver fifo overrun */ 212 /* receiver fifo overrun */
208 /* drops_no_pbuf is no per i/f, it's per BE card */ 213 /* drops_no_pbuf is no per i/f, it's per BE card */
209 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + 214 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
@@ -211,6 +216,16 @@ static void netdev_stats_update(struct be_adapter *adapter)
211 rxf_stats->rx_drops_no_pbuf; 216 rxf_stats->rx_drops_no_pbuf;
212 /* receiver missed packetd */ 217 /* receiver missed packetd */
213 dev_stats->rx_missed_errors = 0; 218 dev_stats->rx_missed_errors = 0;
219
220 /* packet transmit problems */
221 dev_stats->tx_errors = 0;
222
223 /* no space available in linux */
224 dev_stats->tx_dropped = 0;
225
226 dev_stats->multicast = port_stats->tx_multicastframes;
227 dev_stats->collisions = 0;
228
214 /* detailed tx_errors */ 229 /* detailed tx_errors */
215 dev_stats->tx_aborted_errors = 0; 230 dev_stats->tx_aborted_errors = 0;
216 dev_stats->tx_carrier_errors = 0; 231 dev_stats->tx_carrier_errors = 0;
@@ -337,13 +352,10 @@ static void be_tx_stats_update(struct be_adapter *adapter,
337/* Determine number of WRB entries needed to xmit data in an skb */ 352/* Determine number of WRB entries needed to xmit data in an skb */
338static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) 353static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
339{ 354{
340 int cnt = 0; 355 int cnt = (skb->len > skb->data_len);
341 while (skb) { 356
342 if (skb->len > skb->data_len) 357 cnt += skb_shinfo(skb)->nr_frags;
343 cnt++; 358
344 cnt += skb_shinfo(skb)->nr_frags;
345 skb = skb_shinfo(skb)->frag_list;
346 }
347 /* to account for hdr wrb */ 359 /* to account for hdr wrb */
348 cnt++; 360 cnt++;
349 if (cnt & 1) { 361 if (cnt & 1) {
@@ -409,31 +421,28 @@ static int make_tx_wrbs(struct be_adapter *adapter,
409 hdr = queue_head_node(txq); 421 hdr = queue_head_node(txq);
410 queue_head_inc(txq); 422 queue_head_inc(txq);
411 423
412 while (skb) { 424 if (skb->len > skb->data_len) {
413 if (skb->len > skb->data_len) { 425 int len = skb->len - skb->data_len;
414 int len = skb->len - skb->data_len; 426 busaddr = pci_map_single(pdev, skb->data, len,
415 busaddr = pci_map_single(pdev, skb->data, len, 427 PCI_DMA_TODEVICE);
416 PCI_DMA_TODEVICE); 428 wrb = queue_head_node(txq);
417 wrb = queue_head_node(txq); 429 wrb_fill(wrb, busaddr, len);
418 wrb_fill(wrb, busaddr, len); 430 be_dws_cpu_to_le(wrb, sizeof(*wrb));
419 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 431 queue_head_inc(txq);
420 queue_head_inc(txq); 432 copied += len;
421 copied += len; 433 }
422 }
423 434
424 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 435 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
425 struct skb_frag_struct *frag = 436 struct skb_frag_struct *frag =
426 &skb_shinfo(skb)->frags[i]; 437 &skb_shinfo(skb)->frags[i];
427 busaddr = pci_map_page(pdev, frag->page, 438 busaddr = pci_map_page(pdev, frag->page,
428 frag->page_offset, 439 frag->page_offset,
429 frag->size, PCI_DMA_TODEVICE); 440 frag->size, PCI_DMA_TODEVICE);
430 wrb = queue_head_node(txq); 441 wrb = queue_head_node(txq);
431 wrb_fill(wrb, busaddr, frag->size); 442 wrb_fill(wrb, busaddr, frag->size);
432 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 443 be_dws_cpu_to_le(wrb, sizeof(*wrb));
433 queue_head_inc(txq); 444 queue_head_inc(txq);
434 copied += frag->size; 445 copied += frag->size;
435 }
436 skb = skb_shinfo(skb)->frag_list;
437 } 446 }
438 447
439 if (dummy_wrb) { 448 if (dummy_wrb) {
@@ -478,8 +487,6 @@ static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
478 487
479 be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt); 488 be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt);
480 489
481 netdev->trans_start = jiffies;
482
483 be_tx_stats_update(adapter, wrb_cnt, copied, stopped); 490 be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
484 return NETDEV_TX_OK; 491 return NETDEV_TX_OK;
485} 492}
@@ -736,7 +743,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
736 743
737 if (pktsize <= rx_frag_size) { 744 if (pktsize <= rx_frag_size) {
738 BUG_ON(num_rcvd != 1); 745 BUG_ON(num_rcvd != 1);
739 return; 746 goto done;
740 } 747 }
741 748
742 /* More frags present for this completion */ 749 /* More frags present for this completion */
@@ -758,6 +765,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
758 memset(page_info, 0, sizeof(*page_info)); 765 memset(page_info, 0, sizeof(*page_info));
759 } 766 }
760 767
768done:
761 be_rx_stats_update(adapter, pktsize, num_rcvd); 769 be_rx_stats_update(adapter, pktsize, num_rcvd);
762 return; 770 return;
763} 771}
@@ -868,12 +876,19 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
868 876
869 be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); 877 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
870 878
871 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
872
873 queue_tail_inc(&adapter->rx_obj.cq); 879 queue_tail_inc(&adapter->rx_obj.cq);
874 return rxcp; 880 return rxcp;
875} 881}
876 882
883/* To reset the valid bit, we need to reset the whole word as
884 * when walking the queue the valid entries are little-endian
885 * and invalid entries are host endian
886 */
887static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
888{
889 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
890}
891
877static inline struct page *be_alloc_pages(u32 size) 892static inline struct page *be_alloc_pages(u32 size)
878{ 893{
879 gfp_t alloc_flags = GFP_ATOMIC; 894 gfp_t alloc_flags = GFP_ATOMIC;
@@ -1005,6 +1020,7 @@ static void be_rx_q_clean(struct be_adapter *adapter)
1005 /* First cleanup pending rx completions */ 1020 /* First cleanup pending rx completions */
1006 while ((rxcp = be_rx_compl_get(adapter)) != NULL) { 1021 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1007 be_rx_compl_discard(adapter, rxcp); 1022 be_rx_compl_discard(adapter, rxcp);
1023 be_rx_compl_reset(rxcp);
1008 be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1); 1024 be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1);
1009 } 1025 }
1010 1026
@@ -1040,8 +1056,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
1040 struct be_queue_info *q; 1056 struct be_queue_info *q;
1041 1057
1042 q = &adapter->tx_obj.q; 1058 q = &adapter->tx_obj.q;
1043 if (q->created) 1059 if (q->created) {
1044 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ); 1060 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ);
1061
1062 /* No more tx completions can be rcvd now; clean up if there
1063 * are any pending completions or pending tx requests */
1064 be_tx_q_clean(adapter);
1065 }
1045 be_queue_free(adapter, q); 1066 be_queue_free(adapter, q);
1046 1067
1047 q = &adapter->tx_obj.cq; 1068 q = &adapter->tx_obj.cq;
@@ -1049,10 +1070,6 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
1049 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ); 1070 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
1050 be_queue_free(adapter, q); 1071 be_queue_free(adapter, q);
1051 1072
1052 /* No more tx completions can be rcvd now; clean up if there are
1053 * any pending completions or pending tx requests */
1054 be_tx_q_clean(adapter);
1055
1056 q = &adapter->tx_eq.q; 1073 q = &adapter->tx_eq.q;
1057 if (q->created) 1074 if (q->created)
1058 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ); 1075 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
@@ -1286,6 +1303,8 @@ int be_poll_rx(struct napi_struct *napi, int budget)
1286 be_rx_compl_process_lro(adapter, rxcp); 1303 be_rx_compl_process_lro(adapter, rxcp);
1287 else 1304 else
1288 be_rx_compl_process(adapter, rxcp); 1305 be_rx_compl_process(adapter, rxcp);
1306
1307 be_rx_compl_reset(rxcp);
1289 } 1308 }
1290 1309
1291 lro_flush_all(&adapter->rx_obj.lro_mgr); 1310 lro_flush_all(&adapter->rx_obj.lro_mgr);
@@ -1541,7 +1560,7 @@ static int be_close(struct net_device *netdev)
1541 struct be_eq_obj *tx_eq = &adapter->tx_eq; 1560 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1542 int vec; 1561 int vec;
1543 1562
1544 cancel_delayed_work(&adapter->work); 1563 cancel_delayed_work_sync(&adapter->work);
1545 1564
1546 netif_stop_queue(netdev); 1565 netif_stop_queue(netdev);
1547 netif_carrier_off(netdev); 1566 netif_carrier_off(netdev);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 9f971ed6b58d..c15fc281f79f 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -194,13 +194,13 @@ static int desc_list_init(void)
194 struct dma_descriptor *b = &(r->desc_b); 194 struct dma_descriptor *b = &(r->desc_b);
195 195
196 /* allocate a new skb for next time receive */ 196 /* allocate a new skb for next time receive */
197 new_skb = dev_alloc_skb(PKT_BUF_SZ + 2); 197 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
198 if (!new_skb) { 198 if (!new_skb) {
199 printk(KERN_NOTICE DRV_NAME 199 printk(KERN_NOTICE DRV_NAME
200 ": init: low on mem - packet dropped\n"); 200 ": init: low on mem - packet dropped\n");
201 goto init_error; 201 goto init_error;
202 } 202 }
203 skb_reserve(new_skb, 2); 203 skb_reserve(new_skb, NET_IP_ALIGN);
204 r->skb = new_skb; 204 r->skb = new_skb;
205 205
206 /* 206 /*
@@ -566,9 +566,9 @@ static void adjust_tx_list(void)
566 */ 566 */
567 if (current_tx_ptr->next->next == tx_list_head) { 567 if (current_tx_ptr->next->next == tx_list_head) {
568 while (tx_list_head->status.status_word == 0) { 568 while (tx_list_head->status.status_word == 0) {
569 mdelay(1); 569 udelay(10);
570 if (tx_list_head->status.status_word != 0 570 if (tx_list_head->status.status_word != 0
571 || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) { 571 || !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
572 goto adjust_head; 572 goto adjust_head;
573 } 573 }
574 if (timeout_cnt-- < 0) { 574 if (timeout_cnt-- < 0) {
@@ -606,93 +606,41 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
606 struct net_device *dev) 606 struct net_device *dev)
607{ 607{
608 u16 *data; 608 u16 *data;
609 609 u32 data_align = (unsigned long)(skb->data) & 0x3;
610 current_tx_ptr->skb = skb; 610 current_tx_ptr->skb = skb;
611 611
612 if (ANOMALY_05000285) { 612 if (data_align == 0x2) {
613 /* 613 /* move skb->data to current_tx_ptr payload */
614 * TXDWA feature is not avaible to older revision < 0.3 silicon 614 data = (u16 *)(skb->data) - 1;
615 * of BF537 615 *data = (u16)(skb->len);
616 * 616 current_tx_ptr->desc_a.start_addr = (u32)data;
617 * Only if data buffer is ODD WORD alignment, we do not 617 /* this is important! */
618 * need to memcpy 618 blackfin_dcache_flush_range((u32)data,
619 */ 619 (u32)((u8 *)data + skb->len + 4));
620 u32 data_align = (u32)(skb->data) & 0x3;
621 if (data_align == 0x2) {
622 /* move skb->data to current_tx_ptr payload */
623 data = (u16 *)(skb->data) - 1;
624 *data = (u16)(skb->len);
625 current_tx_ptr->desc_a.start_addr = (u32)data;
626 /* this is important! */
627 blackfin_dcache_flush_range((u32)data,
628 (u32)((u8 *)data + skb->len + 4));
629 } else {
630 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
631 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
632 skb->len);
633 current_tx_ptr->desc_a.start_addr =
634 (u32)current_tx_ptr->packet;
635 if (current_tx_ptr->status.status_word != 0)
636 current_tx_ptr->status.status_word = 0;
637 blackfin_dcache_flush_range(
638 (u32)current_tx_ptr->packet,
639 (u32)(current_tx_ptr->packet + skb->len + 2));
640 }
641 } else { 620 } else {
642 /* 621 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
643 * TXDWA feature is avaible to revision < 0.3 silicon of 622 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
644 * BF537 and always avaible to BF52x 623 skb->len);
645 */ 624 current_tx_ptr->desc_a.start_addr =
646 u32 data_align = (u32)(skb->data) & 0x3; 625 (u32)current_tx_ptr->packet;
647 if (data_align == 0x0) { 626 if (current_tx_ptr->status.status_word != 0)
648 u16 sysctl = bfin_read_EMAC_SYSCTL(); 627 current_tx_ptr->status.status_word = 0;
649 sysctl |= TXDWA; 628 blackfin_dcache_flush_range(
650 bfin_write_EMAC_SYSCTL(sysctl); 629 (u32)current_tx_ptr->packet,
651 630 (u32)(current_tx_ptr->packet + skb->len + 2));
652 /* move skb->data to current_tx_ptr payload */
653 data = (u16 *)(skb->data) - 2;
654 *data = (u16)(skb->len);
655 current_tx_ptr->desc_a.start_addr = (u32)data;
656 /* this is important! */
657 blackfin_dcache_flush_range(
658 (u32)data,
659 (u32)((u8 *)data + skb->len + 4));
660 } else if (data_align == 0x2) {
661 u16 sysctl = bfin_read_EMAC_SYSCTL();
662 sysctl &= ~TXDWA;
663 bfin_write_EMAC_SYSCTL(sysctl);
664
665 /* move skb->data to current_tx_ptr payload */
666 data = (u16 *)(skb->data) - 1;
667 *data = (u16)(skb->len);
668 current_tx_ptr->desc_a.start_addr = (u32)data;
669 /* this is important! */
670 blackfin_dcache_flush_range(
671 (u32)data,
672 (u32)((u8 *)data + skb->len + 4));
673 } else {
674 u16 sysctl = bfin_read_EMAC_SYSCTL();
675 sysctl &= ~TXDWA;
676 bfin_write_EMAC_SYSCTL(sysctl);
677
678 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
679 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
680 skb->len);
681 current_tx_ptr->desc_a.start_addr =
682 (u32)current_tx_ptr->packet;
683 if (current_tx_ptr->status.status_word != 0)
684 current_tx_ptr->status.status_word = 0;
685 blackfin_dcache_flush_range(
686 (u32)current_tx_ptr->packet,
687 (u32)(current_tx_ptr->packet + skb->len + 2));
688 }
689 } 631 }
690 632
633 /* make sure the internal data buffers in the core are drained
634 * so that the DMA descriptors are completely written when the
635 * DMA engine goes to fetch them below
636 */
637 SSYNC();
638
691 /* enable this packet's dma */ 639 /* enable this packet's dma */
692 current_tx_ptr->desc_a.config |= DMAEN; 640 current_tx_ptr->desc_a.config |= DMAEN;
693 641
694 /* tx dma is running, just return */ 642 /* tx dma is running, just return */
695 if (bfin_read_DMA2_IRQ_STATUS() & 0x08) 643 if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
696 goto out; 644 goto out;
697 645
698 /* tx dma is not running */ 646 /* tx dma is not running */
@@ -718,7 +666,7 @@ static void bfin_mac_rx(struct net_device *dev)
718 666
719 /* allocate a new skb for next time receive */ 667 /* allocate a new skb for next time receive */
720 skb = current_rx_ptr->skb; 668 skb = current_rx_ptr->skb;
721 new_skb = dev_alloc_skb(PKT_BUF_SZ + 2); 669 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
722 if (!new_skb) { 670 if (!new_skb) {
723 printk(KERN_NOTICE DRV_NAME 671 printk(KERN_NOTICE DRV_NAME
724 ": rx: low on mem - packet dropped\n"); 672 ": rx: low on mem - packet dropped\n");
@@ -726,7 +674,7 @@ static void bfin_mac_rx(struct net_device *dev)
726 goto out; 674 goto out;
727 } 675 }
728 /* reserve 2 bytes for RXDWA padding */ 676 /* reserve 2 bytes for RXDWA padding */
729 skb_reserve(new_skb, 2); 677 skb_reserve(new_skb, NET_IP_ALIGN);
730 current_rx_ptr->skb = new_skb; 678 current_rx_ptr->skb = new_skb;
731 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; 679 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
732 680
@@ -979,22 +927,7 @@ static int bfin_mac_open(struct net_device *dev)
979 return 0; 927 return 0;
980} 928}
981 929
982static const struct net_device_ops bfin_mac_netdev_ops = {
983 .ndo_open = bfin_mac_open,
984 .ndo_stop = bfin_mac_close,
985 .ndo_start_xmit = bfin_mac_hard_start_xmit,
986 .ndo_set_mac_address = bfin_mac_set_mac_address,
987 .ndo_tx_timeout = bfin_mac_timeout,
988 .ndo_set_multicast_list = bfin_mac_set_multicast_list,
989 .ndo_validate_addr = eth_validate_addr,
990 .ndo_change_mtu = eth_change_mtu,
991#ifdef CONFIG_NET_POLL_CONTROLLER
992 .ndo_poll_controller = bfin_mac_poll,
993#endif
994};
995
996/* 930/*
997 *
998 * this makes the board clean up everything that it can 931 * this makes the board clean up everything that it can
999 * and not talk to the outside world. Caused by 932 * and not talk to the outside world. Caused by
1000 * an 'ifconfig ethX down' 933 * an 'ifconfig ethX down'
@@ -1019,11 +952,26 @@ static int bfin_mac_close(struct net_device *dev)
1019 return 0; 952 return 0;
1020} 953}
1021 954
955static const struct net_device_ops bfin_mac_netdev_ops = {
956 .ndo_open = bfin_mac_open,
957 .ndo_stop = bfin_mac_close,
958 .ndo_start_xmit = bfin_mac_hard_start_xmit,
959 .ndo_set_mac_address = bfin_mac_set_mac_address,
960 .ndo_tx_timeout = bfin_mac_timeout,
961 .ndo_set_multicast_list = bfin_mac_set_multicast_list,
962 .ndo_validate_addr = eth_validate_addr,
963 .ndo_change_mtu = eth_change_mtu,
964#ifdef CONFIG_NET_POLL_CONTROLLER
965 .ndo_poll_controller = bfin_mac_poll,
966#endif
967};
968
1022static int __devinit bfin_mac_probe(struct platform_device *pdev) 969static int __devinit bfin_mac_probe(struct platform_device *pdev)
1023{ 970{
1024 struct net_device *ndev; 971 struct net_device *ndev;
1025 struct bfin_mac_local *lp; 972 struct bfin_mac_local *lp;
1026 int rc, i; 973 struct platform_device *pd;
974 int rc;
1027 975
1028 ndev = alloc_etherdev(sizeof(struct bfin_mac_local)); 976 ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
1029 if (!ndev) { 977 if (!ndev) {
@@ -1048,13 +996,6 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1048 goto out_err_probe_mac; 996 goto out_err_probe_mac;
1049 } 997 }
1050 998
1051 /* set the GPIO pins to Ethernet mode */
1052 rc = peripheral_request_list(pin_req, DRV_NAME);
1053 if (rc) {
1054 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1055 rc = -EFAULT;
1056 goto out_err_setup_pin_mux;
1057 }
1058 999
1059 /* 1000 /*
1060 * Is it valid? (Did bootloader initialize it?) 1001 * Is it valid? (Did bootloader initialize it?)
@@ -1070,26 +1011,14 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1070 1011
1071 setup_mac_addr(ndev->dev_addr); 1012 setup_mac_addr(ndev->dev_addr);
1072 1013
1073 /* MDIO bus initial */ 1014 if (!pdev->dev.platform_data) {
1074 lp->mii_bus = mdiobus_alloc(); 1015 dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
1075 if (lp->mii_bus == NULL) 1016 rc = -ENODEV;
1076 goto out_err_mdiobus_alloc; 1017 goto out_err_probe_mac;
1077
1078 lp->mii_bus->priv = ndev;
1079 lp->mii_bus->read = bfin_mdiobus_read;
1080 lp->mii_bus->write = bfin_mdiobus_write;
1081 lp->mii_bus->reset = bfin_mdiobus_reset;
1082 lp->mii_bus->name = "bfin_mac_mdio";
1083 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "0");
1084 lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1085 for (i = 0; i < PHY_MAX_ADDR; ++i)
1086 lp->mii_bus->irq[i] = PHY_POLL;
1087
1088 rc = mdiobus_register(lp->mii_bus);
1089 if (rc) {
1090 dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
1091 goto out_err_mdiobus_register;
1092 } 1018 }
1019 pd = pdev->dev.platform_data;
1020 lp->mii_bus = platform_get_drvdata(pd);
1021 lp->mii_bus->priv = ndev;
1093 1022
1094 rc = mii_probe(ndev); 1023 rc = mii_probe(ndev);
1095 if (rc) { 1024 if (rc) {
@@ -1108,7 +1037,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1108 /* now, enable interrupts */ 1037 /* now, enable interrupts */
1109 /* register irq handler */ 1038 /* register irq handler */
1110 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt, 1039 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1111 IRQF_DISABLED | IRQF_SHARED, "EMAC_RX", ndev); 1040 IRQF_DISABLED, "EMAC_RX", ndev);
1112 if (rc) { 1041 if (rc) {
1113 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n"); 1042 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1114 rc = -EBUSY; 1043 rc = -EBUSY;
@@ -1131,11 +1060,8 @@ out_err_reg_ndev:
1131out_err_request_irq: 1060out_err_request_irq:
1132out_err_mii_probe: 1061out_err_mii_probe:
1133 mdiobus_unregister(lp->mii_bus); 1062 mdiobus_unregister(lp->mii_bus);
1134out_err_mdiobus_register:
1135 mdiobus_free(lp->mii_bus); 1063 mdiobus_free(lp->mii_bus);
1136out_err_mdiobus_alloc:
1137 peripheral_free_list(pin_req); 1064 peripheral_free_list(pin_req);
1138out_err_setup_pin_mux:
1139out_err_probe_mac: 1065out_err_probe_mac:
1140 platform_set_drvdata(pdev, NULL); 1066 platform_set_drvdata(pdev, NULL);
1141 free_netdev(ndev); 1067 free_netdev(ndev);
@@ -1150,8 +1076,7 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
1150 1076
1151 platform_set_drvdata(pdev, NULL); 1077 platform_set_drvdata(pdev, NULL);
1152 1078
1153 mdiobus_unregister(lp->mii_bus); 1079 lp->mii_bus->priv = NULL;
1154 mdiobus_free(lp->mii_bus);
1155 1080
1156 unregister_netdev(ndev); 1081 unregister_netdev(ndev);
1157 1082
@@ -1189,6 +1114,74 @@ static int bfin_mac_resume(struct platform_device *pdev)
1189#define bfin_mac_resume NULL 1114#define bfin_mac_resume NULL
1190#endif /* CONFIG_PM */ 1115#endif /* CONFIG_PM */
1191 1116
1117static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1118{
1119 struct mii_bus *miibus;
1120 int rc, i;
1121
1122 /*
1123 * We are setting up a network card,
1124 * so set the GPIO pins to Ethernet mode
1125 */
1126 rc = peripheral_request_list(pin_req, DRV_NAME);
1127 if (rc) {
1128 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1129 return rc;
1130 }
1131
1132 rc = -ENOMEM;
1133 miibus = mdiobus_alloc();
1134 if (miibus == NULL)
1135 goto out_err_alloc;
1136 miibus->read = bfin_mdiobus_read;
1137 miibus->write = bfin_mdiobus_write;
1138 miibus->reset = bfin_mdiobus_reset;
1139
1140 miibus->parent = &pdev->dev;
1141 miibus->name = "bfin_mii_bus";
1142 snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
1143 miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1144 if (miibus->irq == NULL)
1145 goto out_err_alloc;
1146 for (i = 0; i < PHY_MAX_ADDR; ++i)
1147 miibus->irq[i] = PHY_POLL;
1148
1149 rc = mdiobus_register(miibus);
1150 if (rc) {
1151 dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
1152 goto out_err_mdiobus_register;
1153 }
1154
1155 platform_set_drvdata(pdev, miibus);
1156 return 0;
1157
1158out_err_mdiobus_register:
1159 mdiobus_free(miibus);
1160out_err_alloc:
1161 peripheral_free_list(pin_req);
1162
1163 return rc;
1164}
1165
1166static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
1167{
1168 struct mii_bus *miibus = platform_get_drvdata(pdev);
1169 platform_set_drvdata(pdev, NULL);
1170 mdiobus_unregister(miibus);
1171 mdiobus_free(miibus);
1172 peripheral_free_list(pin_req);
1173 return 0;
1174}
1175
1176static struct platform_driver bfin_mii_bus_driver = {
1177 .probe = bfin_mii_bus_probe,
1178 .remove = __devexit_p(bfin_mii_bus_remove),
1179 .driver = {
1180 .name = "bfin_mii_bus",
1181 .owner = THIS_MODULE,
1182 },
1183};
1184
1192static struct platform_driver bfin_mac_driver = { 1185static struct platform_driver bfin_mac_driver = {
1193 .probe = bfin_mac_probe, 1186 .probe = bfin_mac_probe,
1194 .remove = __devexit_p(bfin_mac_remove), 1187 .remove = __devexit_p(bfin_mac_remove),
@@ -1202,7 +1195,11 @@ static struct platform_driver bfin_mac_driver = {
1202 1195
1203static int __init bfin_mac_init(void) 1196static int __init bfin_mac_init(void)
1204{ 1197{
1205 return platform_driver_register(&bfin_mac_driver); 1198 int ret;
1199 ret = platform_driver_register(&bfin_mii_bus_driver);
1200 if (!ret)
1201 return platform_driver_register(&bfin_mac_driver);
1202 return -ENODEV;
1206} 1203}
1207 1204
1208module_init(bfin_mac_init); 1205module_init(bfin_mac_init);
@@ -1210,6 +1207,7 @@ module_init(bfin_mac_init);
1210static void __exit bfin_mac_cleanup(void) 1207static void __exit bfin_mac_cleanup(void)
1211{ 1208{
1212 platform_driver_unregister(&bfin_mac_driver); 1209 platform_driver_unregister(&bfin_mac_driver);
1210 platform_driver_unregister(&bfin_mii_bus_driver);
1213} 1211}
1214 1212
1215module_exit(bfin_mac_cleanup); 1213module_exit(bfin_mac_cleanup);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index c37acc1d10ac..f99e17e0a319 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -48,6 +48,7 @@
48#include <linux/cache.h> 48#include <linux/cache.h>
49#include <linux/firmware.h> 49#include <linux/firmware.h>
50#include <linux/log2.h> 50#include <linux/log2.h>
51#include <linux/list.h>
51 52
52#include "bnx2.h" 53#include "bnx2.h"
53#include "bnx2_fw.h" 54#include "bnx2_fw.h"
@@ -545,8 +546,7 @@ bnx2_free_rx_mem(struct bnx2 *bp)
545 rxr->rx_desc_mapping[j]); 546 rxr->rx_desc_mapping[j]);
546 rxr->rx_desc_ring[j] = NULL; 547 rxr->rx_desc_ring[j] = NULL;
547 } 548 }
548 if (rxr->rx_buf_ring) 549 vfree(rxr->rx_buf_ring);
549 vfree(rxr->rx_buf_ring);
550 rxr->rx_buf_ring = NULL; 550 rxr->rx_buf_ring = NULL;
551 551
552 for (j = 0; j < bp->rx_max_pg_ring; j++) { 552 for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -556,8 +556,7 @@ bnx2_free_rx_mem(struct bnx2 *bp)
556 rxr->rx_pg_desc_mapping[j]); 556 rxr->rx_pg_desc_mapping[j]);
557 rxr->rx_pg_desc_ring[j] = NULL; 557 rxr->rx_pg_desc_ring[j] = NULL;
558 } 558 }
559 if (rxr->rx_pg_ring) 559 vfree(rxr->rx_pg_ring);
560 vfree(rxr->rx_pg_ring);
561 rxr->rx_pg_ring = NULL; 560 rxr->rx_pg_ring = NULL;
562 } 561 }
563} 562}
@@ -3310,7 +3309,7 @@ bnx2_set_rx_mode(struct net_device *dev)
3310{ 3309{
3311 struct bnx2 *bp = netdev_priv(dev); 3310 struct bnx2 *bp = netdev_priv(dev);
3312 u32 rx_mode, sort_mode; 3311 u32 rx_mode, sort_mode;
3313 struct dev_addr_list *uc_ptr; 3312 struct netdev_hw_addr *ha;
3314 int i; 3313 int i;
3315 3314
3316 if (!netif_running(dev)) 3315 if (!netif_running(dev))
@@ -3369,21 +3368,19 @@ bnx2_set_rx_mode(struct net_device *dev)
3369 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; 3368 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3370 } 3369 }
3371 3370
3372 uc_ptr = NULL;
3373 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) { 3371 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3374 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; 3372 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3375 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | 3373 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3376 BNX2_RPM_SORT_USER0_PROM_VLAN; 3374 BNX2_RPM_SORT_USER0_PROM_VLAN;
3377 } else if (!(dev->flags & IFF_PROMISC)) { 3375 } else if (!(dev->flags & IFF_PROMISC)) {
3378 uc_ptr = dev->uc_list;
3379
3380 /* Add all entries into to the match filter list */ 3376 /* Add all entries into to the match filter list */
3381 for (i = 0; i < dev->uc_count; i++) { 3377 i = 0;
3382 bnx2_set_mac_addr(bp, uc_ptr->da_addr, 3378 list_for_each_entry(ha, &dev->uc_list, list) {
3379 bnx2_set_mac_addr(bp, ha->addr,
3383 i + BNX2_START_UNICAST_ADDRESS_INDEX); 3380 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3384 sort_mode |= (1 << 3381 sort_mode |= (1 <<
3385 (i + BNX2_START_UNICAST_ADDRESS_INDEX)); 3382 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3386 uc_ptr = uc_ptr->next; 3383 i++;
3387 } 3384 }
3388 3385
3389 } 3386 }
@@ -5488,7 +5485,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5488 dev_kfree_skb(skb); 5485 dev_kfree_skb(skb);
5489 return -EIO; 5486 return -EIO;
5490 } 5487 }
5491 map = skb_shinfo(skb)->dma_maps[0]; 5488 map = skb_shinfo(skb)->dma_head;
5492 5489
5493 REG_WR(bp, BNX2_HC_COMMAND, 5490 REG_WR(bp, BNX2_HC_COMMAND,
5494 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 5491 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
@@ -6168,7 +6165,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6168 } 6165 }
6169 6166
6170 sp = skb_shinfo(skb); 6167 sp = skb_shinfo(skb);
6171 mapping = sp->dma_maps[0]; 6168 mapping = sp->dma_head;
6172 6169
6173 tx_buf = &txr->tx_buf_ring[ring_prod]; 6170 tx_buf = &txr->tx_buf_ring[ring_prod];
6174 tx_buf->skb = skb; 6171 tx_buf->skb = skb;
@@ -6192,7 +6189,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6192 txbd = &txr->tx_desc_ring[ring_prod]; 6189 txbd = &txr->tx_desc_ring[ring_prod];
6193 6190
6194 len = frag->size; 6191 len = frag->size;
6195 mapping = sp->dma_maps[i + 1]; 6192 mapping = sp->dma_maps[i];
6196 6193
6197 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6194 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6198 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; 6195 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -6211,7 +6208,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6211 mmiowb(); 6208 mmiowb();
6212 6209
6213 txr->tx_prod = prod; 6210 txr->tx_prod = prod;
6214 dev->trans_start = jiffies;
6215 6211
6216 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { 6212 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6217 netif_tx_stop_queue(txq); 6213 netif_tx_stop_queue(txq);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index e01539c33b8a..fbf1352e9c1c 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -10617,7 +10617,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10617 mmiowb(); 10617 mmiowb();
10618 10618
10619 fp->tx_bd_prod += nbd; 10619 fp->tx_bd_prod += nbd;
10620 dev->trans_start = jiffies;
10621 10620
10622 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { 10621 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10623 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod 10622 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 92a9d69c5650..2f4329e91a4c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2405,8 +2405,7 @@ static void bond_miimon_commit(struct bonding *bond)
2405 bond_3ad_handle_link_change(slave, 2405 bond_3ad_handle_link_change(slave,
2406 BOND_LINK_DOWN); 2406 BOND_LINK_DOWN);
2407 2407
2408 if (bond->params.mode == BOND_MODE_TLB || 2408 if (bond_is_lb(bond))
2409 bond->params.mode == BOND_MODE_ALB)
2410 bond_alb_handle_link_change(bond, slave, 2409 bond_alb_handle_link_change(bond, slave,
2411 BOND_LINK_DOWN); 2410 BOND_LINK_DOWN);
2412 2411
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 3a1b7b04eb79..5fb861a08664 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1541,6 +1541,7 @@ int bond_create_sysfs(void)
1541 printk(KERN_ERR 1541 printk(KERN_ERR
1542 "network device named %s already exists in sysfs", 1542 "network device named %s already exists in sysfs",
1543 class_attr_bonding_masters.attr.name); 1543 class_attr_bonding_masters.attr.name);
1544 ret = 0;
1544 } 1545 }
1545 1546
1546 return ret; 1547 return ret;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index ca849d2adf98..41ceca12c68f 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -286,8 +286,7 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
286static inline void bond_set_slave_inactive_flags(struct slave *slave) 286static inline void bond_set_slave_inactive_flags(struct slave *slave)
287{ 287{
288 struct bonding *bond = netdev_priv(slave->dev->master); 288 struct bonding *bond = netdev_priv(slave->dev->master);
289 if (bond->params.mode != BOND_MODE_TLB && 289 if (!bond_is_lb(bond))
290 bond->params.mode != BOND_MODE_ALB)
291 slave->state = BOND_STATE_BACKUP; 290 slave->state = BOND_STATE_BACKUP;
292 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; 291 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
293 if (slave_do_arp_validate(bond, slave)) 292 if (slave_do_arp_validate(bond, slave))
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index cfd6c5a285fa..d5e18812bf49 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -51,6 +51,15 @@ config CAN_SJA1000_PLATFORM
51 boards from Phytec (http://www.phytec.de) like the PCM027, 51 boards from Phytec (http://www.phytec.de) like the PCM027,
52 PCM038. 52 PCM038.
53 53
54config CAN_SJA1000_OF_PLATFORM
55 depends on CAN_SJA1000 && PPC_OF
56 tristate "Generic OF Platform Bus based SJA1000 driver"
57 ---help---
58 This driver adds support for the SJA1000 chips connected to
59 the OpenFirmware "platform bus" found on embedded systems with
60 OpenFirmware bindings, e.g. if you have a PowerPC based system
61 you may want to enable this option.
62
54config CAN_EMS_PCI 63config CAN_EMS_PCI
55 tristate "EMS CPC-PCI and CPC-PCIe Card" 64 tristate "EMS CPC-PCI and CPC-PCIe Card"
56 depends on PCI && CAN_SJA1000 65 depends on PCI && CAN_SJA1000
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 52b0e7d8901d..574daddc21bf 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -477,7 +477,7 @@ int open_candev(struct net_device *dev)
477 477
478 return 0; 478 return 0;
479} 479}
480EXPORT_SYMBOL(open_candev); 480EXPORT_SYMBOL_GPL(open_candev);
481 481
482/* 482/*
483 * Common close function for cleanup before the device gets closed. 483 * Common close function for cleanup before the device gets closed.
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index d6c631f9e665..9d0c08da273c 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_CAN_SJA1000) += sja1000.o 5obj-$(CONFIG_CAN_SJA1000) += sja1000.o
6obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o 6obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
7obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
7obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o 8obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
8obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o 9obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
9 10
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 3cd2ff9165e3..121b64101d72 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -99,25 +99,21 @@ MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
99 */ 99 */
100static u8 ems_pci_readb(struct ems_pci_card *card, unsigned int port) 100static u8 ems_pci_readb(struct ems_pci_card *card, unsigned int port)
101{ 101{
102 return readb((void __iomem *)card->base_addr 102 return readb(card->base_addr + (port * EMS_PCI_PORT_BYTES));
103 + (port * EMS_PCI_PORT_BYTES));
104} 103}
105 104
106static u8 ems_pci_read_reg(const struct net_device *dev, int port) 105static u8 ems_pci_read_reg(const struct sja1000_priv *priv, int port)
107{ 106{
108 return readb((void __iomem *)dev->base_addr 107 return readb(priv->reg_base + (port * EMS_PCI_PORT_BYTES));
109 + (port * EMS_PCI_PORT_BYTES));
110} 108}
111 109
112static void ems_pci_write_reg(const struct net_device *dev, int port, u8 val) 110static void ems_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val)
113{ 111{
114 writeb(val, (void __iomem *)dev->base_addr 112 writeb(val, priv->reg_base + (port * EMS_PCI_PORT_BYTES));
115 + (port * EMS_PCI_PORT_BYTES));
116} 113}
117 114
118static void ems_pci_post_irq(const struct net_device *dev) 115static void ems_pci_post_irq(const struct sja1000_priv *priv)
119{ 116{
120 struct sja1000_priv *priv = netdev_priv(dev);
121 struct ems_pci_card *card = (struct ems_pci_card *)priv->priv; 117 struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
122 118
123 /* reset int flag of pita */ 119 /* reset int flag of pita */
@@ -129,17 +125,17 @@ static void ems_pci_post_irq(const struct net_device *dev)
129 * Check if a CAN controller is present at the specified location 125 * Check if a CAN controller is present at the specified location
130 * by trying to set 'em into the PeliCAN mode 126 * by trying to set 'em into the PeliCAN mode
131 */ 127 */
132static inline int ems_pci_check_chan(struct net_device *dev) 128static inline int ems_pci_check_chan(const struct sja1000_priv *priv)
133{ 129{
134 unsigned char res; 130 unsigned char res;
135 131
136 /* Make sure SJA1000 is in reset mode */ 132 /* Make sure SJA1000 is in reset mode */
137 ems_pci_write_reg(dev, REG_MOD, 1); 133 ems_pci_write_reg(priv, REG_MOD, 1);
138 134
139 ems_pci_write_reg(dev, REG_CDR, CDR_PELICAN); 135 ems_pci_write_reg(priv, REG_CDR, CDR_PELICAN);
140 136
141 /* read reset-values */ 137 /* read reset-values */
142 res = ems_pci_read_reg(dev, REG_CDR); 138 res = ems_pci_read_reg(priv, REG_CDR);
143 139
144 if (res == CDR_PELICAN) 140 if (res == CDR_PELICAN)
145 return 1; 141 return 1;
@@ -218,14 +214,12 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
218 card->conf_addr = pci_iomap(pdev, 0, EMS_PCI_MEM_SIZE); 214 card->conf_addr = pci_iomap(pdev, 0, EMS_PCI_MEM_SIZE);
219 if (card->conf_addr == NULL) { 215 if (card->conf_addr == NULL) {
220 err = -ENOMEM; 216 err = -ENOMEM;
221
222 goto failure_cleanup; 217 goto failure_cleanup;
223 } 218 }
224 219
225 card->base_addr = pci_iomap(pdev, 1, EMS_PCI_MEM_SIZE); 220 card->base_addr = pci_iomap(pdev, 1, EMS_PCI_MEM_SIZE);
226 if (card->base_addr == NULL) { 221 if (card->base_addr == NULL) {
227 err = -ENOMEM; 222 err = -ENOMEM;
228
229 goto failure_cleanup; 223 goto failure_cleanup;
230 } 224 }
231 225
@@ -239,7 +233,6 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
239 ems_pci_readb(card, 3) != 0xCB || 233 ems_pci_readb(card, 3) != 0xCB ||
240 ems_pci_readb(card, 4) != 0x11) { 234 ems_pci_readb(card, 4) != 0x11) {
241 dev_err(&pdev->dev, "Not EMS Dr. Thomas Wuensche interface\n"); 235 dev_err(&pdev->dev, "Not EMS Dr. Thomas Wuensche interface\n");
242
243 err = -ENODEV; 236 err = -ENODEV;
244 goto failure_cleanup; 237 goto failure_cleanup;
245 } 238 }
@@ -260,12 +253,11 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
260 priv->irq_flags = IRQF_SHARED; 253 priv->irq_flags = IRQF_SHARED;
261 254
262 dev->irq = pdev->irq; 255 dev->irq = pdev->irq;
263 dev->base_addr = (unsigned long)(card->base_addr 256 priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET
264 + EMS_PCI_CAN_BASE_OFFSET 257 + (i * EMS_PCI_CAN_CTRL_SIZE);
265 + (i * EMS_PCI_CAN_CTRL_SIZE));
266 258
267 /* Check if channel is present */ 259 /* Check if channel is present */
268 if (ems_pci_check_chan(dev)) { 260 if (ems_pci_check_chan(priv)) {
269 priv->read_reg = ems_pci_read_reg; 261 priv->read_reg = ems_pci_read_reg;
270 priv->write_reg = ems_pci_write_reg; 262 priv->write_reg = ems_pci_write_reg;
271 priv->post_irq = ems_pci_post_irq; 263 priv->post_irq = ems_pci_post_irq;
@@ -289,9 +281,8 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
289 281
290 card->channels++; 282 card->channels++;
291 283
292 dev_info(&pdev->dev, "Channel #%d at %#lX, irq %d\n", 284 dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n",
293 i + 1, dev->base_addr, 285 i + 1, priv->reg_base, dev->irq);
294 dev->irq);
295 } else { 286 } else {
296 free_sja1000dev(dev); 287 free_sja1000dev(dev);
297 } 288 }
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 00830b358c4f..7dd7769b9713 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -117,14 +117,15 @@ static struct pci_device_id kvaser_pci_tbl[] = {
117 117
118MODULE_DEVICE_TABLE(pci, kvaser_pci_tbl); 118MODULE_DEVICE_TABLE(pci, kvaser_pci_tbl);
119 119
120static u8 kvaser_pci_read_reg(const struct net_device *dev, int port) 120static u8 kvaser_pci_read_reg(const struct sja1000_priv *priv, int port)
121{ 121{
122 return ioread8((void __iomem *)(dev->base_addr + port)); 122 return ioread8(priv->reg_base + port);
123} 123}
124 124
125static void kvaser_pci_write_reg(const struct net_device *dev, int port, u8 val) 125static void kvaser_pci_write_reg(const struct sja1000_priv *priv,
126 int port, u8 val)
126{ 127{
127 iowrite8(val, (void __iomem *)(dev->base_addr + port)); 128 iowrite8(val, priv->reg_base + port);
128} 129}
129 130
130static void kvaser_pci_disable_irq(struct net_device *dev) 131static void kvaser_pci_disable_irq(struct net_device *dev)
@@ -199,7 +200,7 @@ static void kvaser_pci_del_chan(struct net_device *dev)
199 } 200 }
200 unregister_sja1000dev(dev); 201 unregister_sja1000dev(dev);
201 202
202 pci_iounmap(board->pci_dev, (void __iomem *)dev->base_addr); 203 pci_iounmap(board->pci_dev, priv->reg_base);
203 pci_iounmap(board->pci_dev, board->conf_addr); 204 pci_iounmap(board->pci_dev, board->conf_addr);
204 pci_iounmap(board->pci_dev, board->res_addr); 205 pci_iounmap(board->pci_dev, board->res_addr);
205 206
@@ -210,7 +211,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
210 struct net_device **master_dev, 211 struct net_device **master_dev,
211 void __iomem *conf_addr, 212 void __iomem *conf_addr,
212 void __iomem *res_addr, 213 void __iomem *res_addr,
213 unsigned long base_addr) 214 void __iomem *base_addr)
214{ 215{
215 struct net_device *dev; 216 struct net_device *dev;
216 struct sja1000_priv *priv; 217 struct sja1000_priv *priv;
@@ -252,7 +253,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
252 board->xilinx_ver = master_board->xilinx_ver; 253 board->xilinx_ver = master_board->xilinx_ver;
253 } 254 }
254 255
255 dev->base_addr = base_addr + channel * KVASER_PCI_PORT_BYTES; 256 priv->reg_base = base_addr + channel * KVASER_PCI_PORT_BYTES;
256 257
257 priv->read_reg = kvaser_pci_read_reg; 258 priv->read_reg = kvaser_pci_read_reg;
258 priv->write_reg = kvaser_pci_write_reg; 259 priv->write_reg = kvaser_pci_write_reg;
@@ -267,8 +268,8 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
267 268
268 init_step = 4; 269 init_step = 4;
269 270
270 dev_info(&pdev->dev, "base_addr=%#lx conf_addr=%p irq=%d\n", 271 dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n",
271 dev->base_addr, board->conf_addr, dev->irq); 272 priv->reg_base, board->conf_addr, dev->irq);
272 273
273 SET_NETDEV_DEV(dev, &pdev->dev); 274 SET_NETDEV_DEV(dev, &pdev->dev);
274 275
@@ -343,7 +344,7 @@ static int __devinit kvaser_pci_init_one(struct pci_dev *pdev,
343 for (i = 0; i < no_channels; i++) { 344 for (i = 0; i < no_channels; i++) {
344 err = kvaser_pci_add_chan(pdev, i, &master_dev, 345 err = kvaser_pci_add_chan(pdev, i, &master_dev,
345 conf_addr, res_addr, 346 conf_addr, res_addr,
346 (unsigned long)base_addr); 347 base_addr);
347 if (err) 348 if (err)
348 goto failure_cleanup; 349 goto failure_cleanup;
349 } 350 }
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 05b38dde648e..571f133a8fec 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -89,7 +89,7 @@ static int sja1000_probe_chip(struct net_device *dev)
89{ 89{
90 struct sja1000_priv *priv = netdev_priv(dev); 90 struct sja1000_priv *priv = netdev_priv(dev);
91 91
92 if (dev->base_addr && (priv->read_reg(dev, 0) == 0xFF)) { 92 if (priv->reg_base && (priv->read_reg(priv, 0) == 0xFF)) {
93 printk(KERN_INFO "%s: probing @0x%lX failed\n", 93 printk(KERN_INFO "%s: probing @0x%lX failed\n",
94 DRV_NAME, dev->base_addr); 94 DRV_NAME, dev->base_addr);
95 return 0; 95 return 0;
@@ -100,11 +100,11 @@ static int sja1000_probe_chip(struct net_device *dev)
100static void set_reset_mode(struct net_device *dev) 100static void set_reset_mode(struct net_device *dev)
101{ 101{
102 struct sja1000_priv *priv = netdev_priv(dev); 102 struct sja1000_priv *priv = netdev_priv(dev);
103 unsigned char status = priv->read_reg(dev, REG_MOD); 103 unsigned char status = priv->read_reg(priv, REG_MOD);
104 int i; 104 int i;
105 105
106 /* disable interrupts */ 106 /* disable interrupts */
107 priv->write_reg(dev, REG_IER, IRQ_OFF); 107 priv->write_reg(priv, REG_IER, IRQ_OFF);
108 108
109 for (i = 0; i < 100; i++) { 109 for (i = 0; i < 100; i++) {
110 /* check reset bit */ 110 /* check reset bit */
@@ -113,9 +113,9 @@ static void set_reset_mode(struct net_device *dev)
113 return; 113 return;
114 } 114 }
115 115
116 priv->write_reg(dev, REG_MOD, MOD_RM); /* reset chip */ 116 priv->write_reg(priv, REG_MOD, MOD_RM); /* reset chip */
117 udelay(10); 117 udelay(10);
118 status = priv->read_reg(dev, REG_MOD); 118 status = priv->read_reg(priv, REG_MOD);
119 } 119 }
120 120
121 dev_err(dev->dev.parent, "setting SJA1000 into reset mode failed!\n"); 121 dev_err(dev->dev.parent, "setting SJA1000 into reset mode failed!\n");
@@ -124,7 +124,7 @@ static void set_reset_mode(struct net_device *dev)
124static void set_normal_mode(struct net_device *dev) 124static void set_normal_mode(struct net_device *dev)
125{ 125{
126 struct sja1000_priv *priv = netdev_priv(dev); 126 struct sja1000_priv *priv = netdev_priv(dev);
127 unsigned char status = priv->read_reg(dev, REG_MOD); 127 unsigned char status = priv->read_reg(priv, REG_MOD);
128 int i; 128 int i;
129 129
130 for (i = 0; i < 100; i++) { 130 for (i = 0; i < 100; i++) {
@@ -132,14 +132,14 @@ static void set_normal_mode(struct net_device *dev)
132 if ((status & MOD_RM) == 0) { 132 if ((status & MOD_RM) == 0) {
133 priv->can.state = CAN_STATE_ERROR_ACTIVE; 133 priv->can.state = CAN_STATE_ERROR_ACTIVE;
134 /* enable all interrupts */ 134 /* enable all interrupts */
135 priv->write_reg(dev, REG_IER, IRQ_ALL); 135 priv->write_reg(priv, REG_IER, IRQ_ALL);
136 return; 136 return;
137 } 137 }
138 138
139 /* set chip to normal mode */ 139 /* set chip to normal mode */
140 priv->write_reg(dev, REG_MOD, 0x00); 140 priv->write_reg(priv, REG_MOD, 0x00);
141 udelay(10); 141 udelay(10);
142 status = priv->read_reg(dev, REG_MOD); 142 status = priv->read_reg(priv, REG_MOD);
143 } 143 }
144 144
145 dev_err(dev->dev.parent, "setting SJA1000 into normal mode failed!\n"); 145 dev_err(dev->dev.parent, "setting SJA1000 into normal mode failed!\n");
@@ -154,9 +154,9 @@ static void sja1000_start(struct net_device *dev)
154 set_reset_mode(dev); 154 set_reset_mode(dev);
155 155
156 /* Clear error counters and error code capture */ 156 /* Clear error counters and error code capture */
157 priv->write_reg(dev, REG_TXERR, 0x0); 157 priv->write_reg(priv, REG_TXERR, 0x0);
158 priv->write_reg(dev, REG_RXERR, 0x0); 158 priv->write_reg(priv, REG_RXERR, 0x0);
159 priv->read_reg(dev, REG_ECC); 159 priv->read_reg(priv, REG_ECC);
160 160
161 /* leave reset mode */ 161 /* leave reset mode */
162 set_normal_mode(dev); 162 set_normal_mode(dev);
@@ -198,8 +198,8 @@ static int sja1000_set_bittiming(struct net_device *dev)
198 dev_info(dev->dev.parent, 198 dev_info(dev->dev.parent,
199 "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); 199 "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
200 200
201 priv->write_reg(dev, REG_BTR0, btr0); 201 priv->write_reg(priv, REG_BTR0, btr0);
202 priv->write_reg(dev, REG_BTR1, btr1); 202 priv->write_reg(priv, REG_BTR1, btr1);
203 203
204 return 0; 204 return 0;
205} 205}
@@ -217,20 +217,20 @@ static void chipset_init(struct net_device *dev)
217 struct sja1000_priv *priv = netdev_priv(dev); 217 struct sja1000_priv *priv = netdev_priv(dev);
218 218
219 /* set clock divider and output control register */ 219 /* set clock divider and output control register */
220 priv->write_reg(dev, REG_CDR, priv->cdr | CDR_PELICAN); 220 priv->write_reg(priv, REG_CDR, priv->cdr | CDR_PELICAN);
221 221
222 /* set acceptance filter (accept all) */ 222 /* set acceptance filter (accept all) */
223 priv->write_reg(dev, REG_ACCC0, 0x00); 223 priv->write_reg(priv, REG_ACCC0, 0x00);
224 priv->write_reg(dev, REG_ACCC1, 0x00); 224 priv->write_reg(priv, REG_ACCC1, 0x00);
225 priv->write_reg(dev, REG_ACCC2, 0x00); 225 priv->write_reg(priv, REG_ACCC2, 0x00);
226 priv->write_reg(dev, REG_ACCC3, 0x00); 226 priv->write_reg(priv, REG_ACCC3, 0x00);
227 227
228 priv->write_reg(dev, REG_ACCM0, 0xFF); 228 priv->write_reg(priv, REG_ACCM0, 0xFF);
229 priv->write_reg(dev, REG_ACCM1, 0xFF); 229 priv->write_reg(priv, REG_ACCM1, 0xFF);
230 priv->write_reg(dev, REG_ACCM2, 0xFF); 230 priv->write_reg(priv, REG_ACCM2, 0xFF);
231 priv->write_reg(dev, REG_ACCM3, 0xFF); 231 priv->write_reg(priv, REG_ACCM3, 0xFF);
232 232
233 priv->write_reg(dev, REG_OCR, priv->ocr | OCR_MODE_NORMAL); 233 priv->write_reg(priv, REG_OCR, priv->ocr | OCR_MODE_NORMAL);
234} 234}
235 235
236/* 236/*
@@ -261,27 +261,27 @@ static int sja1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
261 if (id & CAN_EFF_FLAG) { 261 if (id & CAN_EFF_FLAG) {
262 fi |= FI_FF; 262 fi |= FI_FF;
263 dreg = EFF_BUF; 263 dreg = EFF_BUF;
264 priv->write_reg(dev, REG_FI, fi); 264 priv->write_reg(priv, REG_FI, fi);
265 priv->write_reg(dev, REG_ID1, (id & 0x1fe00000) >> (5 + 16)); 265 priv->write_reg(priv, REG_ID1, (id & 0x1fe00000) >> (5 + 16));
266 priv->write_reg(dev, REG_ID2, (id & 0x001fe000) >> (5 + 8)); 266 priv->write_reg(priv, REG_ID2, (id & 0x001fe000) >> (5 + 8));
267 priv->write_reg(dev, REG_ID3, (id & 0x00001fe0) >> 5); 267 priv->write_reg(priv, REG_ID3, (id & 0x00001fe0) >> 5);
268 priv->write_reg(dev, REG_ID4, (id & 0x0000001f) << 3); 268 priv->write_reg(priv, REG_ID4, (id & 0x0000001f) << 3);
269 } else { 269 } else {
270 dreg = SFF_BUF; 270 dreg = SFF_BUF;
271 priv->write_reg(dev, REG_FI, fi); 271 priv->write_reg(priv, REG_FI, fi);
272 priv->write_reg(dev, REG_ID1, (id & 0x000007f8) >> 3); 272 priv->write_reg(priv, REG_ID1, (id & 0x000007f8) >> 3);
273 priv->write_reg(dev, REG_ID2, (id & 0x00000007) << 5); 273 priv->write_reg(priv, REG_ID2, (id & 0x00000007) << 5);
274 } 274 }
275 275
276 for (i = 0; i < dlc; i++) 276 for (i = 0; i < dlc; i++)
277 priv->write_reg(dev, dreg++, cf->data[i]); 277 priv->write_reg(priv, dreg++, cf->data[i]);
278 278
279 stats->tx_bytes += dlc; 279 stats->tx_bytes += dlc;
280 dev->trans_start = jiffies; 280 dev->trans_start = jiffies;
281 281
282 can_put_echo_skb(skb, dev, 0); 282 can_put_echo_skb(skb, dev, 0);
283 283
284 priv->write_reg(dev, REG_CMR, CMD_TR); 284 priv->write_reg(priv, REG_CMR, CMD_TR);
285 285
286 return 0; 286 return 0;
287} 287}
@@ -304,22 +304,22 @@ static void sja1000_rx(struct net_device *dev)
304 skb->dev = dev; 304 skb->dev = dev;
305 skb->protocol = htons(ETH_P_CAN); 305 skb->protocol = htons(ETH_P_CAN);
306 306
307 fi = priv->read_reg(dev, REG_FI); 307 fi = priv->read_reg(priv, REG_FI);
308 dlc = fi & 0x0F; 308 dlc = fi & 0x0F;
309 309
310 if (fi & FI_FF) { 310 if (fi & FI_FF) {
311 /* extended frame format (EFF) */ 311 /* extended frame format (EFF) */
312 dreg = EFF_BUF; 312 dreg = EFF_BUF;
313 id = (priv->read_reg(dev, REG_ID1) << (5 + 16)) 313 id = (priv->read_reg(priv, REG_ID1) << (5 + 16))
314 | (priv->read_reg(dev, REG_ID2) << (5 + 8)) 314 | (priv->read_reg(priv, REG_ID2) << (5 + 8))
315 | (priv->read_reg(dev, REG_ID3) << 5) 315 | (priv->read_reg(priv, REG_ID3) << 5)
316 | (priv->read_reg(dev, REG_ID4) >> 3); 316 | (priv->read_reg(priv, REG_ID4) >> 3);
317 id |= CAN_EFF_FLAG; 317 id |= CAN_EFF_FLAG;
318 } else { 318 } else {
319 /* standard frame format (SFF) */ 319 /* standard frame format (SFF) */
320 dreg = SFF_BUF; 320 dreg = SFF_BUF;
321 id = (priv->read_reg(dev, REG_ID1) << 3) 321 id = (priv->read_reg(priv, REG_ID1) << 3)
322 | (priv->read_reg(dev, REG_ID2) >> 5); 322 | (priv->read_reg(priv, REG_ID2) >> 5);
323 } 323 }
324 324
325 if (fi & FI_RTR) 325 if (fi & FI_RTR)
@@ -330,13 +330,13 @@ static void sja1000_rx(struct net_device *dev)
330 cf->can_id = id; 330 cf->can_id = id;
331 cf->can_dlc = dlc; 331 cf->can_dlc = dlc;
332 for (i = 0; i < dlc; i++) 332 for (i = 0; i < dlc; i++)
333 cf->data[i] = priv->read_reg(dev, dreg++); 333 cf->data[i] = priv->read_reg(priv, dreg++);
334 334
335 while (i < 8) 335 while (i < 8)
336 cf->data[i++] = 0; 336 cf->data[i++] = 0;
337 337
338 /* release receive buffer */ 338 /* release receive buffer */
339 priv->write_reg(dev, REG_CMR, CMD_RRB); 339 priv->write_reg(priv, REG_CMR, CMD_RRB);
340 340
341 netif_rx(skb); 341 netif_rx(skb);
342 342
@@ -371,7 +371,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
371 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 371 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
372 stats->rx_over_errors++; 372 stats->rx_over_errors++;
373 stats->rx_errors++; 373 stats->rx_errors++;
374 priv->write_reg(dev, REG_CMR, CMD_CDO); /* clear bit */ 374 priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
375 } 375 }
376 376
377 if (isrc & IRQ_EI) { 377 if (isrc & IRQ_EI) {
@@ -392,7 +392,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
392 priv->can.can_stats.bus_error++; 392 priv->can.can_stats.bus_error++;
393 stats->rx_errors++; 393 stats->rx_errors++;
394 394
395 ecc = priv->read_reg(dev, REG_ECC); 395 ecc = priv->read_reg(priv, REG_ECC);
396 396
397 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 397 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
398 398
@@ -426,7 +426,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
426 if (isrc & IRQ_ALI) { 426 if (isrc & IRQ_ALI) {
427 /* arbitration lost interrupt */ 427 /* arbitration lost interrupt */
428 dev_dbg(dev->dev.parent, "arbitration lost interrupt\n"); 428 dev_dbg(dev->dev.parent, "arbitration lost interrupt\n");
429 alc = priv->read_reg(dev, REG_ALC); 429 alc = priv->read_reg(priv, REG_ALC);
430 priv->can.can_stats.arbitration_lost++; 430 priv->can.can_stats.arbitration_lost++;
431 stats->rx_errors++; 431 stats->rx_errors++;
432 cf->can_id |= CAN_ERR_LOSTARB; 432 cf->can_id |= CAN_ERR_LOSTARB;
@@ -435,8 +435,8 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
435 435
436 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING || 436 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
437 state == CAN_STATE_ERROR_PASSIVE)) { 437 state == CAN_STATE_ERROR_PASSIVE)) {
438 uint8_t rxerr = priv->read_reg(dev, REG_RXERR); 438 uint8_t rxerr = priv->read_reg(priv, REG_RXERR);
439 uint8_t txerr = priv->read_reg(dev, REG_TXERR); 439 uint8_t txerr = priv->read_reg(priv, REG_TXERR);
440 cf->can_id |= CAN_ERR_CRTL; 440 cf->can_id |= CAN_ERR_CRTL;
441 if (state == CAN_STATE_ERROR_WARNING) { 441 if (state == CAN_STATE_ERROR_WARNING) {
442 priv->can.can_stats.error_warning++; 442 priv->can.can_stats.error_warning++;
@@ -471,15 +471,15 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
471 int n = 0; 471 int n = 0;
472 472
473 /* Shared interrupts and IRQ off? */ 473 /* Shared interrupts and IRQ off? */
474 if (priv->read_reg(dev, REG_IER) == IRQ_OFF) 474 if (priv->read_reg(priv, REG_IER) == IRQ_OFF)
475 return IRQ_NONE; 475 return IRQ_NONE;
476 476
477 if (priv->pre_irq) 477 if (priv->pre_irq)
478 priv->pre_irq(dev); 478 priv->pre_irq(priv);
479 479
480 while ((isrc = priv->read_reg(dev, REG_IR)) && (n < SJA1000_MAX_IRQ)) { 480 while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
481 n++; 481 n++;
482 status = priv->read_reg(dev, REG_SR); 482 status = priv->read_reg(priv, REG_SR);
483 483
484 if (isrc & IRQ_WUI) 484 if (isrc & IRQ_WUI)
485 dev_warn(dev->dev.parent, "wakeup interrupt\n"); 485 dev_warn(dev->dev.parent, "wakeup interrupt\n");
@@ -494,7 +494,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
494 /* receive interrupt */ 494 /* receive interrupt */
495 while (status & SR_RBS) { 495 while (status & SR_RBS) {
496 sja1000_rx(dev); 496 sja1000_rx(dev);
497 status = priv->read_reg(dev, REG_SR); 497 status = priv->read_reg(priv, REG_SR);
498 } 498 }
499 } 499 }
500 if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { 500 if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
@@ -505,7 +505,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
505 } 505 }
506 506
507 if (priv->post_irq) 507 if (priv->post_irq)
508 priv->post_irq(dev); 508 priv->post_irq(priv);
509 509
510 if (n >= SJA1000_MAX_IRQ) 510 if (n >= SJA1000_MAX_IRQ)
511 dev_dbg(dev->dev.parent, "%d messages handled in ISR", n); 511 dev_dbg(dev->dev.parent, "%d messages handled in ISR", n);
@@ -532,8 +532,8 @@ static int sja1000_open(struct net_device *dev)
532 err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags, 532 err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags,
533 dev->name, (void *)dev); 533 dev->name, (void *)dev);
534 if (err) { 534 if (err) {
535 return -EAGAIN;
536 close_candev(dev); 535 close_candev(dev);
536 return -EAGAIN;
537 } 537 }
538 } 538 }
539 539
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index ccd302887964..302d2c763ad7 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -155,14 +155,15 @@ struct sja1000_priv {
155 struct sk_buff *echo_skb; 155 struct sk_buff *echo_skb;
156 156
157 /* the lower-layer is responsible for appropriate locking */ 157 /* the lower-layer is responsible for appropriate locking */
158 u8 (*read_reg) (const struct net_device *dev, int reg); 158 u8 (*read_reg) (const struct sja1000_priv *priv, int reg);
159 void (*write_reg) (const struct net_device *dev, int reg, u8 val); 159 void (*write_reg) (const struct sja1000_priv *priv, int reg, u8 val);
160 void (*pre_irq) (const struct net_device *dev); 160 void (*pre_irq) (const struct sja1000_priv *priv);
161 void (*post_irq) (const struct net_device *dev); 161 void (*post_irq) (const struct sja1000_priv *priv);
162 162
163 void *priv; /* for board-specific data */ 163 void *priv; /* for board-specific data */
164 struct net_device *dev; 164 struct net_device *dev;
165 165
166 void __iomem *reg_base; /* ioremap'ed address to registers */
166 unsigned long irq_flags; /* for request_irq() */ 167 unsigned long irq_flags; /* for request_irq() */
167 168
168 u16 flags; /* custom mode flags */ 169 u16 flags; /* custom mode flags */
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
new file mode 100644
index 000000000000..3373560405ba
--- /dev/null
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -0,0 +1,235 @@
1/*
2 * Driver for SJA1000 CAN controllers on the OpenFirmware platform bus
3 *
4 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
21 * bus found on embedded PowerPC systems. You need a SJA1000 CAN node
22 * definition in your flattened device tree source (DTS) file similar to:
23 *
24 * can@3,100 {
25 * compatible = "nxp,sja1000";
26 * reg = <3 0x100 0x80>;
27 * interrupts = <2 0>;
28 * interrupt-parent = <&mpic>;
29 * nxp,external-clock-frequency = <16000000>;
30 * };
31 *
32 * See "Documentation/powerpc/dts-bindings/can/sja1000.txt" for further
33 * information.
34 */
35
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/interrupt.h>
39#include <linux/netdevice.h>
40#include <linux/delay.h>
41#include <linux/can.h>
42#include <linux/can/dev.h>
43
44#include <linux/of_platform.h>
45#include <asm/prom.h>
46
47#include "sja1000.h"
48
49#define DRV_NAME "sja1000_of_platform"
50
51MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
52MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the OF platform bus");
53MODULE_LICENSE("GPL v2");
54
55#define SJA1000_OFP_CAN_CLOCK (16000000 / 2)
56
57#define SJA1000_OFP_OCR OCR_TX0_PULLDOWN
58#define SJA1000_OFP_CDR (CDR_CBP | CDR_CLK_OFF)
59
60static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
61{
62 return in_8(priv->reg_base + reg);
63}
64
65static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
66 int reg, u8 val)
67{
68 out_8(priv->reg_base + reg, val);
69}
70
71static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
72{
73 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
74 struct sja1000_priv *priv = netdev_priv(dev);
75 struct device_node *np = ofdev->node;
76 struct resource res;
77
78 dev_set_drvdata(&ofdev->dev, NULL);
79
80 unregister_sja1000dev(dev);
81 free_sja1000dev(dev);
82 iounmap(priv->reg_base);
83 irq_dispose_mapping(dev->irq);
84
85 of_address_to_resource(np, 0, &res);
86 release_mem_region(res.start, resource_size(&res));
87
88 return 0;
89}
90
91static int __devinit sja1000_ofp_probe(struct of_device *ofdev,
92 const struct of_device_id *id)
93{
94 struct device_node *np = ofdev->node;
95 struct net_device *dev;
96 struct sja1000_priv *priv;
97 struct resource res;
98 const u32 *prop;
99 int err, irq, res_size, prop_size;
100 void __iomem *base;
101
102 err = of_address_to_resource(np, 0, &res);
103 if (err) {
104 dev_err(&ofdev->dev, "invalid address\n");
105 return err;
106 }
107
108 res_size = resource_size(&res);
109
110 if (!request_mem_region(res.start, res_size, DRV_NAME)) {
111 dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n",
112 (unsigned long long)res.start,
113 (unsigned long long)res.end);
114 return -EBUSY;
115 }
116
117 base = ioremap_nocache(res.start, res_size);
118 if (!base) {
119 dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n",
120 (unsigned long long)res.start,
121 (unsigned long long)res.end);
122 err = -ENOMEM;
123 goto exit_release_mem;
124 }
125
126 irq = irq_of_parse_and_map(np, 0);
127 if (irq == NO_IRQ) {
128 dev_err(&ofdev->dev, "no irq found\n");
129 err = -ENODEV;
130 goto exit_unmap_mem;
131 }
132
133 dev = alloc_sja1000dev(0);
134 if (!dev) {
135 err = -ENOMEM;
136 goto exit_dispose_irq;
137 }
138
139 priv = netdev_priv(dev);
140
141 priv->read_reg = sja1000_ofp_read_reg;
142 priv->write_reg = sja1000_ofp_write_reg;
143
144 prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
145 if (prop && (prop_size == sizeof(u32)))
146 priv->can.clock.freq = *prop / 2;
147 else
148 priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
149
150 prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
151 if (prop && (prop_size == sizeof(u32)))
152 priv->ocr |= *prop & OCR_MODE_MASK;
153 else
154 priv->ocr |= OCR_MODE_NORMAL; /* default */
155
156 prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
157 if (prop && (prop_size == sizeof(u32)))
158 priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
159 else
160 priv->ocr |= OCR_TX0_PULLDOWN; /* default */
161
162 prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
163 if (prop && (prop_size == sizeof(u32)) && *prop) {
164 u32 divider = priv->can.clock.freq * 2 / *prop;
165
166 if (divider > 1)
167 priv->cdr |= divider / 2 - 1;
168 else
169 priv->cdr |= CDR_CLKOUT_MASK;
170 } else {
171 priv->cdr |= CDR_CLK_OFF; /* default */
172 }
173
174 prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
175 if (!prop)
176 priv->cdr |= CDR_CBP; /* default */
177
178 priv->irq_flags = IRQF_SHARED;
179 priv->reg_base = base;
180
181 dev->irq = irq;
182
183 dev_info(&ofdev->dev,
184 "reg_base=0x%p irq=%d clock=%d ocr=0x%02x cdr=0x%02x\n",
185 priv->reg_base, dev->irq, priv->can.clock.freq,
186 priv->ocr, priv->cdr);
187
188 dev_set_drvdata(&ofdev->dev, dev);
189 SET_NETDEV_DEV(dev, &ofdev->dev);
190
191 err = register_sja1000dev(dev);
192 if (err) {
193 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
194 DRV_NAME, err);
195 goto exit_free_sja1000;
196 }
197
198 return 0;
199
200exit_free_sja1000:
201 free_sja1000dev(dev);
202exit_dispose_irq:
203 irq_dispose_mapping(irq);
204exit_unmap_mem:
205 iounmap(base);
206exit_release_mem:
207 release_mem_region(res.start, res_size);
208
209 return err;
210}
211
212static struct of_device_id __devinitdata sja1000_ofp_table[] = {
213 {.compatible = "nxp,sja1000"},
214 {},
215};
216
217static struct of_platform_driver sja1000_ofp_driver = {
218 .owner = THIS_MODULE,
219 .name = DRV_NAME,
220 .probe = sja1000_ofp_probe,
221 .remove = __devexit_p(sja1000_ofp_remove),
222 .match_table = sja1000_ofp_table,
223};
224
225static int __init sja1000_ofp_init(void)
226{
227 return of_register_platform_driver(&sja1000_ofp_driver);
228}
229module_init(sja1000_ofp_init);
230
231static void __exit sja1000_ofp_exit(void)
232{
233 return of_unregister_platform_driver(&sja1000_ofp_driver);
234};
235module_exit(sja1000_ofp_exit);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 8017229d6fd6..628374c2a05f 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -37,14 +37,14 @@ MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
37MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus"); 37MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
38MODULE_LICENSE("GPL v2"); 38MODULE_LICENSE("GPL v2");
39 39
40static u8 sp_read_reg(const struct net_device *dev, int reg) 40static u8 sp_read_reg(const struct sja1000_priv *priv, int reg)
41{ 41{
42 return ioread8((void __iomem *)(dev->base_addr + reg)); 42 return ioread8(priv->reg_base + reg);
43} 43}
44 44
45static void sp_write_reg(const struct net_device *dev, int reg, u8 val) 45static void sp_write_reg(const struct sja1000_priv *priv, int reg, u8 val)
46{ 46{
47 iowrite8(val, (void __iomem *)(dev->base_addr + reg)); 47 iowrite8(val, priv->reg_base + reg);
48} 48}
49 49
50static int sp_probe(struct platform_device *pdev) 50static int sp_probe(struct platform_device *pdev)
@@ -89,9 +89,9 @@ static int sp_probe(struct platform_device *pdev)
89 } 89 }
90 priv = netdev_priv(dev); 90 priv = netdev_priv(dev);
91 91
92 dev->base_addr = (unsigned long)addr;
93 dev->irq = res_irq->start; 92 dev->irq = res_irq->start;
94 priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; 93 priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
94 priv->reg_base = addr;
95 priv->read_reg = sp_read_reg; 95 priv->read_reg = sp_read_reg;
96 priv->write_reg = sp_write_reg; 96 priv->write_reg = sp_write_reg;
97 priv->can.clock.freq = pdata->clock; 97 priv->can.clock.freq = pdata->clock;
@@ -108,8 +108,8 @@ static int sp_probe(struct platform_device *pdev)
108 goto exit_free; 108 goto exit_free;
109 } 109 }
110 110
111 dev_info(&pdev->dev, "%s device registered (base_addr=%#lx, irq=%d)\n", 111 dev_info(&pdev->dev, "%s device registered (reg_base=%p, irq=%d)\n",
112 DRV_NAME, dev->base_addr, dev->irq); 112 DRV_NAME, priv->reg_base, dev->irq);
113 return 0; 113 return 0;
114 114
115 exit_free: 115 exit_free:
@@ -125,13 +125,14 @@ static int sp_probe(struct platform_device *pdev)
125static int sp_remove(struct platform_device *pdev) 125static int sp_remove(struct platform_device *pdev)
126{ 126{
127 struct net_device *dev = dev_get_drvdata(&pdev->dev); 127 struct net_device *dev = dev_get_drvdata(&pdev->dev);
128 struct sja1000_priv *priv = netdev_priv(dev);
128 struct resource *res; 129 struct resource *res;
129 130
130 unregister_sja1000dev(dev); 131 unregister_sja1000dev(dev);
131 dev_set_drvdata(&pdev->dev, NULL); 132 dev_set_drvdata(&pdev->dev, NULL);
132 133
133 if (dev->base_addr) 134 if (priv->reg_base)
134 iounmap((void __iomem *)dev->base_addr); 135 iounmap(priv->reg_base);
135 136
136 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 137 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
137 release_mem_region(res->start, resource_size(res)); 138 release_mem_region(res->start, resource_size(res));
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 5e97a1a71d88..3711d64e45ef 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1879,7 +1879,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1879 cpl->vlan_valid = 0; 1879 cpl->vlan_valid = 0;
1880 1880
1881send: 1881send:
1882 dev->trans_start = jiffies;
1883 ret = t1_sge_tx(skb, adapter, 0, dev); 1882 ret = t1_sge_tx(skb, adapter, 0, dev);
1884 1883
1885 /* If transmit busy, and we reallocated skb's due to headroom limit, 1884 /* If transmit busy, and we reallocated skb's due to headroom limit,
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index cfb4198b6776..58afafbd3b9c 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -615,13 +615,13 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
615 615
616 dev_kfree_skb_irq(desc->skb); 616 dev_kfree_skb_irq(desc->skb);
617 desc->skb = NULL; 617 desc->skb = NULL;
618 if (netif_subqueue_stopped(dev, queue)) 618 if (__netif_subqueue_stopped(dev, queue))
619 netif_wake_subqueue(dev, queue); 619 netif_wake_subqueue(dev, queue);
620 } else { 620 } else {
621 if (netif_msg_tx_err(priv) && net_ratelimit()) 621 if (netif_msg_tx_err(priv) && net_ratelimit())
622 printk(KERN_WARNING 622 printk(KERN_WARNING
623 "%s: end_xmit: spurious interrupt\n", dev->name); 623 "%s: end_xmit: spurious interrupt\n", dev->name);
624 if (netif_subqueue_stopped(dev, queue)) 624 if (__netif_subqueue_stopped(dev, queue))
625 netif_wake_subqueue(dev, queue); 625 netif_wake_subqueue(dev, queue);
626 } 626 }
627} 627}
@@ -731,7 +731,6 @@ static void cpmac_clear_tx(struct net_device *dev)
731 731
732static void cpmac_hw_error(struct work_struct *work) 732static void cpmac_hw_error(struct work_struct *work)
733{ 733{
734 int i;
735 struct cpmac_priv *priv = 734 struct cpmac_priv *priv =
736 container_of(work, struct cpmac_priv, reset_work); 735 container_of(work, struct cpmac_priv, reset_work);
737 736
@@ -818,7 +817,6 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
818 817
819static void cpmac_tx_timeout(struct net_device *dev) 818static void cpmac_tx_timeout(struct net_device *dev)
820{ 819{
821 int i;
822 struct cpmac_priv *priv = netdev_priv(dev); 820 struct cpmac_priv *priv = netdev_priv(dev);
823 821
824 spin_lock(&priv->lock); 822 spin_lock(&priv->lock);
@@ -1110,7 +1108,7 @@ static int external_switch;
1110 1108
1111static int __devinit cpmac_probe(struct platform_device *pdev) 1109static int __devinit cpmac_probe(struct platform_device *pdev)
1112{ 1110{
1113 int rc, phy_id, i; 1111 int rc, phy_id;
1114 char *mdio_bus_id = "0"; 1112 char *mdio_bus_id = "0";
1115 struct resource *mem; 1113 struct resource *mem;
1116 struct cpmac_priv *priv; 1114 struct cpmac_priv *priv;
diff --git a/drivers/net/cxgb3/Makefile b/drivers/net/cxgb3/Makefile
index 343467985321..29aff78c7820 100644
--- a/drivers/net/cxgb3/Makefile
+++ b/drivers/net/cxgb3/Makefile
@@ -5,4 +5,4 @@
5obj-$(CONFIG_CHELSIO_T3) += cxgb3.o 5obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
6 6
7cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \ 7cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
8 xgmac.o sge.o l2t.o cxgb3_offload.o 8 xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 322434ac42fc..1694fad38720 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -85,8 +85,8 @@ struct fl_pg_chunk {
85 struct page *page; 85 struct page *page;
86 void *va; 86 void *va;
87 unsigned int offset; 87 unsigned int offset;
88 u64 *p_cnt; 88 unsigned long *p_cnt;
89 DECLARE_PCI_UNMAP_ADDR(mapping); 89 dma_addr_t mapping;
90}; 90};
91 91
92struct rx_desc; 92struct rx_desc;
@@ -253,6 +253,8 @@ struct adapter {
253 struct mutex mdio_lock; 253 struct mutex mdio_lock;
254 spinlock_t stats_lock; 254 spinlock_t stats_lock;
255 spinlock_t work_lock; 255 spinlock_t work_lock;
256
257 struct sk_buff *nofail_skb;
256}; 258};
257 259
258static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr) 260static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index df1f58576689..9fe008ec9ba5 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -44,12 +44,33 @@ enum {
44 AEL_I2C_STAT = 0xc30c, 44 AEL_I2C_STAT = 0xc30c,
45 AEL2005_GPIO_CTRL = 0xc214, 45 AEL2005_GPIO_CTRL = 0xc214,
46 AEL2005_GPIO_STAT = 0xc215, 46 AEL2005_GPIO_STAT = 0xc215,
47
48 AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */
49 AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */
50 AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */
51 AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */
52
53 AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */
54 AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */
55 AEL2020_GPIO_0 = 3, /* IN: unassigned */
56 AEL2020_GPIO_1 = 2, /* OUT: unassigned */
57 AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */
47}; 58};
48 59
49enum { edc_none, edc_sr, edc_twinax }; 60enum { edc_none, edc_sr, edc_twinax };
50 61
51/* PHY module I2C device address */ 62/* PHY module I2C device address */
52#define MODULE_DEV_ADDR 0xa0 63enum {
64 MODULE_DEV_ADDR = 0xa0,
65 SFF_DEV_ADDR = 0xa2,
66};
67
68/* PHY transceiver type */
69enum {
70 phy_transtype_unknown = 0,
71 phy_transtype_sfp = 3,
72 phy_transtype_xfp = 6,
73};
53 74
54#define AEL2005_MODDET_IRQ 4 75#define AEL2005_MODDET_IRQ 4
55 76
@@ -86,6 +107,37 @@ static void ael100x_txon(struct cphy *phy)
86 msleep(30); 107 msleep(30);
87} 108}
88 109
110/*
111 * Read an 8-bit word from a device attached to the PHY's i2c bus.
112 */
113static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
114{
115 int i, err;
116 unsigned int stat, data;
117
118 err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL,
119 (dev_addr << 8) | (1 << 8) | word_addr);
120 if (err)
121 return err;
122
123 for (i = 0; i < 200; i++) {
124 msleep(1);
125 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat);
126 if (err)
127 return err;
128 if ((stat & 3) == 1) {
129 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA,
130 &data);
131 if (err)
132 return err;
133 return data >> 8;
134 }
135 }
136 CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n",
137 phy->mdio.prtad, dev_addr, word_addr);
138 return -ETIMEDOUT;
139}
140
89static int ael1002_power_down(struct cphy *phy, int enable) 141static int ael1002_power_down(struct cphy *phy, int enable)
90{ 142{
91 int err; 143 int err;
@@ -199,6 +251,51 @@ int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
199 return 0; 251 return 0;
200} 252}
201 253
254/*
255 * Decode our module type.
256 */
257static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms)
258{
259 int v;
260
261 if (delay_ms)
262 msleep(delay_ms);
263
264 /* see SFF-8472 for below */
265 v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3);
266 if (v < 0)
267 return v;
268
269 if (v == 0x10)
270 return phy_modtype_sr;
271 if (v == 0x20)
272 return phy_modtype_lr;
273 if (v == 0x40)
274 return phy_modtype_lrm;
275
276 v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6);
277 if (v < 0)
278 return v;
279 if (v != 4)
280 goto unknown;
281
282 v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10);
283 if (v < 0)
284 return v;
285
286 if (v & 0x80) {
287 v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
288 if (v < 0)
289 return v;
290 return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
291 }
292unknown:
293 return phy_modtype_unknown;
294}
295
296/*
297 * Code to support the Aeluros/NetLogic 2005 10Gb PHY.
298 */
202static int ael2005_setup_sr_edc(struct cphy *phy) 299static int ael2005_setup_sr_edc(struct cphy *phy)
203{ 300{
204 static struct reg_val regs[] = { 301 static struct reg_val regs[] = {
@@ -893,35 +990,7 @@ static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
893 return err; 990 return err;
894} 991}
895 992
896static int ael2005_i2c_rd(struct cphy *phy, int dev_addr, int word_addr) 993static int ael2005_get_module_type(struct cphy *phy, int delay_ms)
897{
898 int i, err;
899 unsigned int stat, data;
900
901 err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL,
902 (dev_addr << 8) | (1 << 8) | word_addr);
903 if (err)
904 return err;
905
906 for (i = 0; i < 5; i++) {
907 msleep(1);
908 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat);
909 if (err)
910 return err;
911 if ((stat & 3) == 1) {
912 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA,
913 &data);
914 if (err)
915 return err;
916 return data >> 8;
917 }
918 }
919 CH_WARN(phy->adapter, "PHY %u I2C read of addr %u timed out\n",
920 phy->mdio.prtad, word_addr);
921 return -ETIMEDOUT;
922}
923
924static int get_module_type(struct cphy *phy, int delay_ms)
925{ 994{
926 int v; 995 int v;
927 unsigned int stat; 996 unsigned int stat;
@@ -933,39 +1002,7 @@ static int get_module_type(struct cphy *phy, int delay_ms)
933 if (stat & (1 << 8)) /* module absent */ 1002 if (stat & (1 << 8)) /* module absent */
934 return phy_modtype_none; 1003 return phy_modtype_none;
935 1004
936 if (delay_ms) 1005 return ael2xxx_get_module_type(phy, delay_ms);
937 msleep(delay_ms);
938
939 /* see SFF-8472 for below */
940 v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 3);
941 if (v < 0)
942 return v;
943
944 if (v == 0x10)
945 return phy_modtype_sr;
946 if (v == 0x20)
947 return phy_modtype_lr;
948 if (v == 0x40)
949 return phy_modtype_lrm;
950
951 v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 6);
952 if (v < 0)
953 return v;
954 if (v != 4)
955 goto unknown;
956
957 v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 10);
958 if (v < 0)
959 return v;
960
961 if (v & 0x80) {
962 v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
963 if (v < 0)
964 return v;
965 return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
966 }
967unknown:
968 return phy_modtype_unknown;
969} 1006}
970 1007
971static int ael2005_intr_enable(struct cphy *phy) 1008static int ael2005_intr_enable(struct cphy *phy)
@@ -1024,7 +1061,7 @@ static int ael2005_reset(struct cphy *phy, int wait)
1024 1061
1025 msleep(50); 1062 msleep(50);
1026 1063
1027 err = get_module_type(phy, 0); 1064 err = ael2005_get_module_type(phy, 0);
1028 if (err < 0) 1065 if (err < 0)
1029 return err; 1066 return err;
1030 phy->modtype = err; 1067 phy->modtype = err;
@@ -1062,7 +1099,7 @@ static int ael2005_intr_handler(struct cphy *phy)
1062 return ret; 1099 return ret;
1063 1100
1064 /* modules have max 300 ms init time after hot plug */ 1101 /* modules have max 300 ms init time after hot plug */
1065 ret = get_module_type(phy, 300); 1102 ret = ael2005_get_module_type(phy, 300);
1066 if (ret < 0) 1103 if (ret < 0)
1067 return ret; 1104 return ret;
1068 1105
@@ -1113,6 +1150,662 @@ int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
1113} 1150}
1114 1151
1115/* 1152/*
1153 * Setup EDC and other parameters for operation with an optical module.
1154 */
1155static int ael2020_setup_sr_edc(struct cphy *phy)
1156{
1157 static struct reg_val regs[] = {
1158 /* set CDR offset to 10 */
1159 { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
1160
1161 /* adjust 10G RX bias current */
1162 { MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 },
1163 { MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 },
1164 { MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 },
1165
1166 /* end */
1167 { 0, 0, 0, 0 }
1168 };
1169 int err;
1170
1171 err = set_phy_regs(phy, regs);
1172 msleep(50);
1173 if (err)
1174 return err;
1175
1176 phy->priv = edc_sr;
1177 return 0;
1178}
1179
1180/*
1181 * Setup EDC and other parameters for operation with an TWINAX module.
1182 */
1183static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
1184{
1185 /* set uC to 40MHz */
1186 static struct reg_val uCclock40MHz[] = {
1187 { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
1188 { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
1189 { 0, 0, 0, 0 }
1190 };
1191
1192 /* activate uC clock */
1193 static struct reg_val uCclockActivate[] = {
1194 { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
1195 { 0, 0, 0, 0 }
1196 };
1197
1198 /* set PC to start of SRAM and activate uC */
1199 static struct reg_val uCactivate[] = {
1200 { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
1201 { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
1202 { 0, 0, 0, 0 }
1203 };
1204
1205 /* TWINAX EDC firmware */
1206 static u16 twinax_edc[] = {
1207 0xd800, 0x4009,
1208 0xd801, 0x2fff,
1209 0xd802, 0x300f,
1210 0xd803, 0x40aa,
1211 0xd804, 0x401c,
1212 0xd805, 0x401e,
1213 0xd806, 0x2ff4,
1214 0xd807, 0x3dc4,
1215 0xd808, 0x2035,
1216 0xd809, 0x3035,
1217 0xd80a, 0x6524,
1218 0xd80b, 0x2cb2,
1219 0xd80c, 0x3012,
1220 0xd80d, 0x1002,
1221 0xd80e, 0x26e2,
1222 0xd80f, 0x3022,
1223 0xd810, 0x1002,
1224 0xd811, 0x27d2,
1225 0xd812, 0x3022,
1226 0xd813, 0x1002,
1227 0xd814, 0x2822,
1228 0xd815, 0x3012,
1229 0xd816, 0x1002,
1230 0xd817, 0x2492,
1231 0xd818, 0x3022,
1232 0xd819, 0x1002,
1233 0xd81a, 0x2772,
1234 0xd81b, 0x3012,
1235 0xd81c, 0x1002,
1236 0xd81d, 0x23d2,
1237 0xd81e, 0x3022,
1238 0xd81f, 0x1002,
1239 0xd820, 0x22cd,
1240 0xd821, 0x301d,
1241 0xd822, 0x27f2,
1242 0xd823, 0x3022,
1243 0xd824, 0x1002,
1244 0xd825, 0x5553,
1245 0xd826, 0x0307,
1246 0xd827, 0x2522,
1247 0xd828, 0x3022,
1248 0xd829, 0x1002,
1249 0xd82a, 0x2142,
1250 0xd82b, 0x3012,
1251 0xd82c, 0x1002,
1252 0xd82d, 0x4016,
1253 0xd82e, 0x5e63,
1254 0xd82f, 0x0344,
1255 0xd830, 0x2142,
1256 0xd831, 0x3012,
1257 0xd832, 0x1002,
1258 0xd833, 0x400e,
1259 0xd834, 0x2522,
1260 0xd835, 0x3022,
1261 0xd836, 0x1002,
1262 0xd837, 0x2b52,
1263 0xd838, 0x3012,
1264 0xd839, 0x1002,
1265 0xd83a, 0x2742,
1266 0xd83b, 0x3022,
1267 0xd83c, 0x1002,
1268 0xd83d, 0x25e2,
1269 0xd83e, 0x3022,
1270 0xd83f, 0x1002,
1271 0xd840, 0x2fa4,
1272 0xd841, 0x3dc4,
1273 0xd842, 0x6624,
1274 0xd843, 0x414b,
1275 0xd844, 0x56b3,
1276 0xd845, 0x03c6,
1277 0xd846, 0x866b,
1278 0xd847, 0x400c,
1279 0xd848, 0x2712,
1280 0xd849, 0x3012,
1281 0xd84a, 0x1002,
1282 0xd84b, 0x2c4b,
1283 0xd84c, 0x309b,
1284 0xd84d, 0x56b3,
1285 0xd84e, 0x03c3,
1286 0xd84f, 0x866b,
1287 0xd850, 0x400c,
1288 0xd851, 0x2272,
1289 0xd852, 0x3022,
1290 0xd853, 0x1002,
1291 0xd854, 0x2742,
1292 0xd855, 0x3022,
1293 0xd856, 0x1002,
1294 0xd857, 0x25e2,
1295 0xd858, 0x3022,
1296 0xd859, 0x1002,
1297 0xd85a, 0x2fb4,
1298 0xd85b, 0x3dc4,
1299 0xd85c, 0x6624,
1300 0xd85d, 0x56b3,
1301 0xd85e, 0x03c3,
1302 0xd85f, 0x866b,
1303 0xd860, 0x401c,
1304 0xd861, 0x2c45,
1305 0xd862, 0x3095,
1306 0xd863, 0x5b53,
1307 0xd864, 0x2372,
1308 0xd865, 0x3012,
1309 0xd866, 0x13c2,
1310 0xd867, 0x5cc3,
1311 0xd868, 0x2712,
1312 0xd869, 0x3012,
1313 0xd86a, 0x1312,
1314 0xd86b, 0x2b52,
1315 0xd86c, 0x3012,
1316 0xd86d, 0x1002,
1317 0xd86e, 0x2742,
1318 0xd86f, 0x3022,
1319 0xd870, 0x1002,
1320 0xd871, 0x2582,
1321 0xd872, 0x3022,
1322 0xd873, 0x1002,
1323 0xd874, 0x2142,
1324 0xd875, 0x3012,
1325 0xd876, 0x1002,
1326 0xd877, 0x628f,
1327 0xd878, 0x2985,
1328 0xd879, 0x33a5,
1329 0xd87a, 0x25e2,
1330 0xd87b, 0x3022,
1331 0xd87c, 0x1002,
1332 0xd87d, 0x5653,
1333 0xd87e, 0x03d2,
1334 0xd87f, 0x401e,
1335 0xd880, 0x6f72,
1336 0xd881, 0x1002,
1337 0xd882, 0x628f,
1338 0xd883, 0x2304,
1339 0xd884, 0x3c84,
1340 0xd885, 0x6436,
1341 0xd886, 0xdff4,
1342 0xd887, 0x6436,
1343 0xd888, 0x2ff5,
1344 0xd889, 0x3005,
1345 0xd88a, 0x8656,
1346 0xd88b, 0xdfba,
1347 0xd88c, 0x56a3,
1348 0xd88d, 0xd05a,
1349 0xd88e, 0x2972,
1350 0xd88f, 0x3012,
1351 0xd890, 0x1392,
1352 0xd891, 0xd05a,
1353 0xd892, 0x56a3,
1354 0xd893, 0xdfba,
1355 0xd894, 0x0383,
1356 0xd895, 0x6f72,
1357 0xd896, 0x1002,
1358 0xd897, 0x2b45,
1359 0xd898, 0x3005,
1360 0xd899, 0x4178,
1361 0xd89a, 0x5653,
1362 0xd89b, 0x0384,
1363 0xd89c, 0x2a62,
1364 0xd89d, 0x3012,
1365 0xd89e, 0x1002,
1366 0xd89f, 0x2f05,
1367 0xd8a0, 0x3005,
1368 0xd8a1, 0x41c8,
1369 0xd8a2, 0x5653,
1370 0xd8a3, 0x0382,
1371 0xd8a4, 0x0002,
1372 0xd8a5, 0x4218,
1373 0xd8a6, 0x2474,
1374 0xd8a7, 0x3c84,
1375 0xd8a8, 0x6437,
1376 0xd8a9, 0xdff4,
1377 0xd8aa, 0x6437,
1378 0xd8ab, 0x2ff5,
1379 0xd8ac, 0x3c05,
1380 0xd8ad, 0x8757,
1381 0xd8ae, 0xb888,
1382 0xd8af, 0x9787,
1383 0xd8b0, 0xdff4,
1384 0xd8b1, 0x6724,
1385 0xd8b2, 0x866a,
1386 0xd8b3, 0x6f72,
1387 0xd8b4, 0x1002,
1388 0xd8b5, 0x2641,
1389 0xd8b6, 0x3021,
1390 0xd8b7, 0x1001,
1391 0xd8b8, 0xc620,
1392 0xd8b9, 0x0000,
1393 0xd8ba, 0xc621,
1394 0xd8bb, 0x0000,
1395 0xd8bc, 0xc622,
1396 0xd8bd, 0x00ce,
1397 0xd8be, 0xc623,
1398 0xd8bf, 0x007f,
1399 0xd8c0, 0xc624,
1400 0xd8c1, 0x0032,
1401 0xd8c2, 0xc625,
1402 0xd8c3, 0x0000,
1403 0xd8c4, 0xc627,
1404 0xd8c5, 0x0000,
1405 0xd8c6, 0xc628,
1406 0xd8c7, 0x0000,
1407 0xd8c8, 0xc62c,
1408 0xd8c9, 0x0000,
1409 0xd8ca, 0x0000,
1410 0xd8cb, 0x2641,
1411 0xd8cc, 0x3021,
1412 0xd8cd, 0x1001,
1413 0xd8ce, 0xc502,
1414 0xd8cf, 0x53ac,
1415 0xd8d0, 0xc503,
1416 0xd8d1, 0x2cd3,
1417 0xd8d2, 0xc600,
1418 0xd8d3, 0x2a6e,
1419 0xd8d4, 0xc601,
1420 0xd8d5, 0x2a2c,
1421 0xd8d6, 0xc605,
1422 0xd8d7, 0x5557,
1423 0xd8d8, 0xc60c,
1424 0xd8d9, 0x5400,
1425 0xd8da, 0xc710,
1426 0xd8db, 0x0700,
1427 0xd8dc, 0xc711,
1428 0xd8dd, 0x0f06,
1429 0xd8de, 0xc718,
1430 0xd8df, 0x0700,
1431 0xd8e0, 0xc719,
1432 0xd8e1, 0x0f06,
1433 0xd8e2, 0xc720,
1434 0xd8e3, 0x4700,
1435 0xd8e4, 0xc721,
1436 0xd8e5, 0x0f06,
1437 0xd8e6, 0xc728,
1438 0xd8e7, 0x0700,
1439 0xd8e8, 0xc729,
1440 0xd8e9, 0x1207,
1441 0xd8ea, 0xc801,
1442 0xd8eb, 0x7f50,
1443 0xd8ec, 0xc802,
1444 0xd8ed, 0x7760,
1445 0xd8ee, 0xc803,
1446 0xd8ef, 0x7fce,
1447 0xd8f0, 0xc804,
1448 0xd8f1, 0x520e,
1449 0xd8f2, 0xc805,
1450 0xd8f3, 0x5c11,
1451 0xd8f4, 0xc806,
1452 0xd8f5, 0x3c51,
1453 0xd8f6, 0xc807,
1454 0xd8f7, 0x4061,
1455 0xd8f8, 0xc808,
1456 0xd8f9, 0x49c1,
1457 0xd8fa, 0xc809,
1458 0xd8fb, 0x3840,
1459 0xd8fc, 0xc80a,
1460 0xd8fd, 0x0000,
1461 0xd8fe, 0xc821,
1462 0xd8ff, 0x0002,
1463 0xd900, 0xc822,
1464 0xd901, 0x0046,
1465 0xd902, 0xc844,
1466 0xd903, 0x182f,
1467 0xd904, 0xc013,
1468 0xd905, 0xf341,
1469 0xd906, 0xc084,
1470 0xd907, 0x0030,
1471 0xd908, 0xc904,
1472 0xd909, 0x1401,
1473 0xd90a, 0xcb0c,
1474 0xd90b, 0x0004,
1475 0xd90c, 0xcb0e,
1476 0xd90d, 0xa00a,
1477 0xd90e, 0xcb0f,
1478 0xd90f, 0xc0c0,
1479 0xd910, 0xcb10,
1480 0xd911, 0xc0c0,
1481 0xd912, 0xcb11,
1482 0xd913, 0x00a0,
1483 0xd914, 0xcb12,
1484 0xd915, 0x0007,
1485 0xd916, 0xc241,
1486 0xd917, 0xa000,
1487 0xd918, 0xc243,
1488 0xd919, 0x7fe0,
1489 0xd91a, 0xc604,
1490 0xd91b, 0x000e,
1491 0xd91c, 0xc609,
1492 0xd91d, 0x00f5,
1493 0xd91e, 0xc611,
1494 0xd91f, 0x000e,
1495 0xd920, 0xc660,
1496 0xd921, 0x9600,
1497 0xd922, 0xc687,
1498 0xd923, 0x0004,
1499 0xd924, 0xc60a,
1500 0xd925, 0x04f5,
1501 0xd926, 0x0000,
1502 0xd927, 0x2641,
1503 0xd928, 0x3021,
1504 0xd929, 0x1001,
1505 0xd92a, 0xc620,
1506 0xd92b, 0x14e5,
1507 0xd92c, 0xc621,
1508 0xd92d, 0xc53d,
1509 0xd92e, 0xc622,
1510 0xd92f, 0x3cbe,
1511 0xd930, 0xc623,
1512 0xd931, 0x4452,
1513 0xd932, 0xc624,
1514 0xd933, 0xc5c5,
1515 0xd934, 0xc625,
1516 0xd935, 0xe01e,
1517 0xd936, 0xc627,
1518 0xd937, 0x0000,
1519 0xd938, 0xc628,
1520 0xd939, 0x0000,
1521 0xd93a, 0xc62c,
1522 0xd93b, 0x0000,
1523 0xd93c, 0x0000,
1524 0xd93d, 0x2b84,
1525 0xd93e, 0x3c74,
1526 0xd93f, 0x6435,
1527 0xd940, 0xdff4,
1528 0xd941, 0x6435,
1529 0xd942, 0x2806,
1530 0xd943, 0x3006,
1531 0xd944, 0x8565,
1532 0xd945, 0x2b24,
1533 0xd946, 0x3c24,
1534 0xd947, 0x6436,
1535 0xd948, 0x1002,
1536 0xd949, 0x2b24,
1537 0xd94a, 0x3c24,
1538 0xd94b, 0x6436,
1539 0xd94c, 0x4045,
1540 0xd94d, 0x8656,
1541 0xd94e, 0x5663,
1542 0xd94f, 0x0302,
1543 0xd950, 0x401e,
1544 0xd951, 0x1002,
1545 0xd952, 0x2807,
1546 0xd953, 0x31a7,
1547 0xd954, 0x20c4,
1548 0xd955, 0x3c24,
1549 0xd956, 0x6724,
1550 0xd957, 0x1002,
1551 0xd958, 0x2807,
1552 0xd959, 0x3187,
1553 0xd95a, 0x20c4,
1554 0xd95b, 0x3c24,
1555 0xd95c, 0x6724,
1556 0xd95d, 0x1002,
1557 0xd95e, 0x24f4,
1558 0xd95f, 0x3c64,
1559 0xd960, 0x6436,
1560 0xd961, 0xdff4,
1561 0xd962, 0x6436,
1562 0xd963, 0x1002,
1563 0xd964, 0x2006,
1564 0xd965, 0x3d76,
1565 0xd966, 0xc161,
1566 0xd967, 0x6134,
1567 0xd968, 0x6135,
1568 0xd969, 0x5443,
1569 0xd96a, 0x0303,
1570 0xd96b, 0x6524,
1571 0xd96c, 0x00fb,
1572 0xd96d, 0x1002,
1573 0xd96e, 0x20d4,
1574 0xd96f, 0x3c24,
1575 0xd970, 0x2025,
1576 0xd971, 0x3005,
1577 0xd972, 0x6524,
1578 0xd973, 0x1002,
1579 0xd974, 0xd019,
1580 0xd975, 0x2104,
1581 0xd976, 0x3c24,
1582 0xd977, 0x2105,
1583 0xd978, 0x3805,
1584 0xd979, 0x6524,
1585 0xd97a, 0xdff4,
1586 0xd97b, 0x4005,
1587 0xd97c, 0x6524,
1588 0xd97d, 0x2e8d,
1589 0xd97e, 0x303d,
1590 0xd97f, 0x2408,
1591 0xd980, 0x35d8,
1592 0xd981, 0x5dd3,
1593 0xd982, 0x0307,
1594 0xd983, 0x8887,
1595 0xd984, 0x63a7,
1596 0xd985, 0x8887,
1597 0xd986, 0x63a7,
1598 0xd987, 0xdffd,
1599 0xd988, 0x00f9,
1600 0xd989, 0x1002,
1601 0xd98a, 0x0000,
1602 };
1603 int i, err;
1604
1605 /* set uC clock and activate it */
1606 err = set_phy_regs(phy, uCclock40MHz);
1607 msleep(500);
1608 if (err)
1609 return err;
1610 err = set_phy_regs(phy, uCclockActivate);
1611 msleep(500);
1612 if (err)
1613 return err;
1614
1615 /* write TWINAX EDC firmware into PHY */
1616 for (i = 0; i < ARRAY_SIZE(twinax_edc) && !err; i += 2)
1617 err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, twinax_edc[i],
1618 twinax_edc[i + 1]);
1619 /* activate uC */
1620 err = set_phy_regs(phy, uCactivate);
1621 if (!err)
1622 phy->priv = edc_twinax;
1623 return err;
1624}
1625
1626/*
1627 * Return Module Type.
1628 */
1629static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
1630{
1631 int v;
1632 unsigned int stat;
1633
1634 v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat);
1635 if (v)
1636 return v;
1637
1638 if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) {
1639 /* module absent */
1640 return phy_modtype_none;
1641 }
1642
1643 return ael2xxx_get_module_type(phy, delay_ms);
1644}
1645
1646/*
1647 * Enable PHY interrupts. We enable "Module Detection" interrupts (on any
1648 * state transition) and then generic Link Alarm Status Interrupt (LASI).
1649 */
1650static int ael2020_intr_enable(struct cphy *phy)
1651{
1652 int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
1653 0x2 << (AEL2020_GPIO_MODDET*4));
1654 return err ? err : t3_phy_lasi_intr_enable(phy);
1655}
1656
1657/*
1658 * Disable PHY interrupts. The mirror of the above ...
1659 */
1660static int ael2020_intr_disable(struct cphy *phy)
1661{
1662 int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
1663 0x1 << (AEL2020_GPIO_MODDET*4));
1664 return err ? err : t3_phy_lasi_intr_disable(phy);
1665}
1666
1667/*
1668 * Clear PHY interrupt state.
1669 */
1670static int ael2020_intr_clear(struct cphy *phy)
1671{
1672 /*
1673 * The GPIO Interrupt register on the AEL2020 is a "Latching High"
1674 * (LH) register which is cleared to the current state when it's read.
1675 * Thus, we simply read the register and discard the result.
1676 */
1677 unsigned int stat;
1678 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
1679 return err ? err : t3_phy_lasi_intr_clear(phy);
1680}
1681
1682/*
1683 * Reset the PHY and put it into a canonical operating state.
1684 */
1685static int ael2020_reset(struct cphy *phy, int wait)
1686{
1687 static struct reg_val regs0[] = {
1688 /* Erratum #2: CDRLOL asserted, causing PMA link down status */
1689 { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
1690
1691 /* force XAUI to send LF when RX_LOS is asserted */
1692 { MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 },
1693
1694 /* RX_LOS pin is active high */
1695 { MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS,
1696 0x0020, 0x0020 },
1697
1698 /* output Module's Loss Of Signal (LOS) to LED */
1699 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
1700 0xffff, 0x0004 },
1701 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
1702 0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) },
1703
1704 /* end */
1705 { 0, 0, 0, 0 }
1706 };
1707 int err;
1708 unsigned int lasi_ctrl;
1709
1710 /* grab current interrupt state */
1711 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
1712 &lasi_ctrl);
1713 if (err)
1714 return err;
1715
1716 err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125);
1717 if (err)
1718 return err;
1719 msleep(100);
1720
1721 /* basic initialization for all module types */
1722 phy->priv = edc_none;
1723 err = set_phy_regs(phy, regs0);
1724 if (err)
1725 return err;
1726
1727 /* determine module type and perform appropriate initialization */
1728 err = ael2020_get_module_type(phy, 0);
1729 if (err < 0)
1730 return err;
1731 phy->modtype = (u8)err;
1732 if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
1733 err = ael2020_setup_twinax_edc(phy, err);
1734 else
1735 err = ael2020_setup_sr_edc(phy);
1736 if (err)
1737 return err;
1738
1739 /* reset wipes out interrupts, reenable them if they were on */
1740 if (lasi_ctrl & 1)
1741 err = ael2005_intr_enable(phy);
1742 return err;
1743}
1744
1745/*
1746 * Handle a PHY interrupt.
1747 */
1748static int ael2020_intr_handler(struct cphy *phy)
1749{
1750 unsigned int stat;
1751 int ret, edc_needed, cause = 0;
1752
1753 ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
1754 if (ret)
1755 return ret;
1756
1757 if (stat & (0x1 << AEL2020_GPIO_MODDET)) {
1758 /* modules have max 300 ms init time after hot plug */
1759 ret = ael2020_get_module_type(phy, 300);
1760 if (ret < 0)
1761 return ret;
1762
1763 phy->modtype = (u8)ret;
1764 if (ret == phy_modtype_none)
1765 edc_needed = phy->priv; /* on unplug retain EDC */
1766 else if (ret == phy_modtype_twinax ||
1767 ret == phy_modtype_twinax_long)
1768 edc_needed = edc_twinax;
1769 else
1770 edc_needed = edc_sr;
1771
1772 if (edc_needed != phy->priv) {
1773 ret = ael2020_reset(phy, 0);
1774 return ret ? ret : cphy_cause_module_change;
1775 }
1776 cause = cphy_cause_module_change;
1777 }
1778
1779 ret = t3_phy_lasi_intr_handler(phy);
1780 if (ret < 0)
1781 return ret;
1782
1783 ret |= cause;
1784 return ret ? ret : cphy_cause_link_change;
1785}
1786
1787static struct cphy_ops ael2020_ops = {
1788 .reset = ael2020_reset,
1789 .intr_enable = ael2020_intr_enable,
1790 .intr_disable = ael2020_intr_disable,
1791 .intr_clear = ael2020_intr_clear,
1792 .intr_handler = ael2020_intr_handler,
1793 .get_link_status = get_link_status_r,
1794 .power_down = ael1002_power_down,
1795 .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
1796};
1797
1798int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
1799 const struct mdio_ops *mdio_ops)
1800{
1801 cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
1802 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
1803 SUPPORTED_IRQ, "10GBASE-R");
1804 msleep(125);
1805 return 0;
1806}
1807
1808/*
1116 * Get link status for a 10GBASE-X device. 1809 * Get link status for a 10GBASE-X device.
1117 */ 1810 */
1118static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed, 1811static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed,
diff --git a/drivers/net/cxgb3/aq100x.c b/drivers/net/cxgb3/aq100x.c
new file mode 100644
index 000000000000..b1fd5bf836e4
--- /dev/null
+++ b/drivers/net/cxgb3/aq100x.c
@@ -0,0 +1,355 @@
1/*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "common.h"
34#include "regs.h"
35
36enum {
37 /* MDIO_DEV_PMA_PMD registers */
38 AQ_LINK_STAT = 0xe800,
39 AQ_IMASK_PMA = 0xf000,
40
41 /* MDIO_DEV_XGXS registers */
42 AQ_XAUI_RX_CFG = 0xc400,
43 AQ_XAUI_TX_CFG = 0xe400,
44
45 /* MDIO_DEV_ANEG registers */
46 AQ_1G_CTRL = 0xc400,
47 AQ_ANEG_STAT = 0xc800,
48
49 /* MDIO_DEV_VEND1 registers */
50 AQ_FW_VERSION = 0x0020,
51 AQ_IFLAG_GLOBAL = 0xfc00,
52 AQ_IMASK_GLOBAL = 0xff00,
53};
54
55enum {
56 IMASK_PMA = 1 << 2,
57 IMASK_GLOBAL = 1 << 15,
58 ADV_1G_FULL = 1 << 15,
59 ADV_1G_HALF = 1 << 14,
60 ADV_10G_FULL = 1 << 12,
61 AQ_RESET = (1 << 14) | (1 << 15),
62 AQ_LOWPOWER = 1 << 12,
63};
64
65static int aq100x_reset(struct cphy *phy, int wait)
66{
67 /*
68 * Ignore the caller specified wait time; always wait for the reset to
69 * complete. Can take up to 3s.
70 */
71 int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000);
72
73 if (err)
74 CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n",
75 phy->mdio.prtad, err);
76
77 return err;
78}
79
80static int aq100x_intr_enable(struct cphy *phy)
81{
82 int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA);
83 if (err)
84 return err;
85
86 err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL);
87 return err;
88}
89
90static int aq100x_intr_disable(struct cphy *phy)
91{
92 return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0);
93}
94
95static int aq100x_intr_clear(struct cphy *phy)
96{
97 unsigned int v;
98
99 t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v);
100 t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
101
102 return 0;
103}
104
105static int aq100x_intr_handler(struct cphy *phy)
106{
107 int err;
108 unsigned int cause, v;
109
110 err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause);
111 if (err)
112 return err;
113
114 /* Read (and reset) the latching version of the status */
115 t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
116
117 return cphy_cause_link_change;
118}
119
120static int aq100x_power_down(struct cphy *phy, int off)
121{
122 return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
123 MDIO_MMD_PMAPMD, MDIO_CTRL1,
124 MDIO_CTRL1_LPOWER, off);
125}
126
127static int aq100x_autoneg_enable(struct cphy *phy)
128{
129 int err;
130
131 err = aq100x_power_down(phy, 0);
132 if (!err)
133 err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
134 MDIO_MMD_AN, MDIO_CTRL1,
135 BMCR_ANENABLE | BMCR_ANRESTART, 1);
136
137 return err;
138}
139
140static int aq100x_autoneg_restart(struct cphy *phy)
141{
142 int err;
143
144 err = aq100x_power_down(phy, 0);
145 if (!err)
146 err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
147 MDIO_MMD_AN, MDIO_CTRL1,
148 BMCR_ANENABLE | BMCR_ANRESTART, 1);
149
150 return err;
151}
152
153static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map)
154{
155 unsigned int adv;
156 int err;
157
158 /* 10G advertisement */
159 adv = 0;
160 if (advertise_map & ADVERTISED_10000baseT_Full)
161 adv |= ADV_10G_FULL;
162 err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
163 ADV_10G_FULL, adv);
164 if (err)
165 return err;
166
167 /* 1G advertisement */
168 adv = 0;
169 if (advertise_map & ADVERTISED_1000baseT_Full)
170 adv |= ADV_1G_FULL;
171 if (advertise_map & ADVERTISED_1000baseT_Half)
172 adv |= ADV_1G_HALF;
173 err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL,
174 ADV_1G_FULL | ADV_1G_HALF, adv);
175 if (err)
176 return err;
177
178 /* 100M, pause advertisement */
179 adv = 0;
180 if (advertise_map & ADVERTISED_100baseT_Half)
181 adv |= ADVERTISE_100HALF;
182 if (advertise_map & ADVERTISED_100baseT_Full)
183 adv |= ADVERTISE_100FULL;
184 if (advertise_map & ADVERTISED_Pause)
185 adv |= ADVERTISE_PAUSE_CAP;
186 if (advertise_map & ADVERTISED_Asym_Pause)
187 adv |= ADVERTISE_PAUSE_ASYM;
188 err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
189 0xfe0, adv);
190
191 return err;
192}
193
194static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable)
195{
196 return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
197 MDIO_MMD_PMAPMD, MDIO_CTRL1,
198 BMCR_LOOPBACK, enable);
199}
200
201static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex)
202{
203 /* no can do */
204 return -1;
205}
206
207static int aq100x_get_link_status(struct cphy *phy, int *link_ok,
208 int *speed, int *duplex, int *fc)
209{
210 int err;
211 unsigned int v;
212
213 if (link_ok) {
214 err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v);
215 if (err)
216 return err;
217
218 *link_ok = v & 1;
219 if (!*link_ok)
220 return 0;
221 }
222
223 err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v);
224 if (err)
225 return err;
226
227 if (speed) {
228 switch (v & 0x6) {
229 case 0x6:
230 *speed = SPEED_10000;
231 break;
232 case 0x4:
233 *speed = SPEED_1000;
234 break;
235 case 0x2:
236 *speed = SPEED_100;
237 break;
238 case 0x0:
239 *speed = SPEED_10;
240 break;
241 }
242 }
243
244 if (duplex)
245 *duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF;
246
247 return 0;
248}
249
250static struct cphy_ops aq100x_ops = {
251 .reset = aq100x_reset,
252 .intr_enable = aq100x_intr_enable,
253 .intr_disable = aq100x_intr_disable,
254 .intr_clear = aq100x_intr_clear,
255 .intr_handler = aq100x_intr_handler,
256 .autoneg_enable = aq100x_autoneg_enable,
257 .autoneg_restart = aq100x_autoneg_restart,
258 .advertise = aq100x_advertise,
259 .set_loopback = aq100x_set_loopback,
260 .set_speed_duplex = aq100x_set_speed_duplex,
261 .get_link_status = aq100x_get_link_status,
262 .power_down = aq100x_power_down,
263 .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
264};
265
266int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
267 const struct mdio_ops *mdio_ops)
268{
269 unsigned int v, v2, gpio, wait;
270 int err;
271
272 cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops,
273 SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full |
274 SUPPORTED_Autoneg | SUPPORTED_AUI, "1000/10GBASE-T");
275
276 /*
277 * The PHY has been out of reset ever since the system powered up. So
278 * we do a hard reset over here.
279 */
280 gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL;
281 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0);
282 msleep(1);
283 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio);
284
285 /*
286 * Give it enough time to load the firmware and get ready for mdio.
287 */
288 msleep(1000);
289 wait = 500; /* in 10ms increments */
290 do {
291 err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
292 if (err || v == 0xffff) {
293
294 /* Allow prep_adapter to succeed when ffff is read */
295
296 CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n",
297 phy_addr, err, v);
298 goto done;
299 }
300
301 v &= AQ_RESET;
302 if (v)
303 msleep(10);
304 } while (v && --wait);
305 if (v) {
306 CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n",
307 phy_addr, v);
308
309 goto done; /* let prep_adapter succeed */
310 }
311
312 /* Datasheet says 3s max but this has been observed */
313 wait = (500 - wait) * 10 + 1000;
314 if (wait > 3000)
315 CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait);
316
317 /* Firmware version check. */
318 t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v);
319 if (v != 30) {
320 CH_WARN(adapter, "PHY%d: unsupported firmware %d\n",
321 phy_addr, v);
322 return 0; /* allow t3_prep_adapter to succeed */
323 }
324
325 /*
326 * The PHY should start in really-low-power mode. Prepare it for normal
327 * operations.
328 */
329 err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
330 if (err)
331 return err;
332 if (v & AQ_LOWPOWER) {
333 err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1,
334 AQ_LOWPOWER, 0);
335 if (err)
336 return err;
337 msleep(10);
338 } else
339 CH_WARN(adapter, "PHY%d does not start in low power mode.\n",
340 phy_addr);
341
342 /*
343 * Verify XAUI settings, but let prep succeed no matter what.
344 */
345 v = v2 = 0;
346 t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v);
347 t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2);
348 if (v != 0x1b || v2 != 0x1b)
349 CH_WARN(adapter,
350 "PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n",
351 phy_addr, v, v2);
352
353done:
354 return err;
355}
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 79a113b99e2f..d21b705501a9 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -802,8 +802,12 @@ int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
802 int phy_addr, const struct mdio_ops *mdio_ops); 802 int phy_addr, const struct mdio_ops *mdio_ops);
803int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter, 803int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
804 int phy_addr, const struct mdio_ops *mdio_ops); 804 int phy_addr, const struct mdio_ops *mdio_ops);
805int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter,
806 int phy_addr, const struct mdio_ops *mdio_ops);
805int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, 807int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
806 const struct mdio_ops *mdio_ops); 808 const struct mdio_ops *mdio_ops);
807int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, 809int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
808 int phy_addr, const struct mdio_ops *mdio_ops); 810 int phy_addr, const struct mdio_ops *mdio_ops);
811int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
812 int phy_addr, const struct mdio_ops *mdio_ops);
809#endif /* __CHELSIO_COMMON_H */ 813#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 0b87fee023f5..538dda4422dc 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -91,6 +91,8 @@ static const struct pci_device_id cxgb3_pci_tbl[] = {
91 CH_DEVICE(0x31, 3), /* T3B20 */ 91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */ 92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */ 93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
94 CH_DEVICE(0x36, 3), /* S320E-CR */
95 CH_DEVICE(0x37, 7), /* N320E-G2 */
94 {0,} 96 {0,}
95}; 97};
96 98
@@ -431,40 +433,78 @@ static int init_tp_parity(struct adapter *adap)
431 for (i = 0; i < 16; i++) { 433 for (i = 0; i < 16; i++) {
432 struct cpl_smt_write_req *req; 434 struct cpl_smt_write_req *req;
433 435
434 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 436 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
437 if (!skb)
438 skb = adap->nofail_skb;
439 if (!skb)
440 goto alloc_skb_fail;
441
435 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); 442 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
436 memset(req, 0, sizeof(*req)); 443 memset(req, 0, sizeof(*req));
437 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 444 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); 445 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
439 req->iff = i; 446 req->iff = i;
440 t3_mgmt_tx(adap, skb); 447 t3_mgmt_tx(adap, skb);
448 if (skb == adap->nofail_skb) {
449 await_mgmt_replies(adap, cnt, i + 1);
450 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451 if (!adap->nofail_skb)
452 goto alloc_skb_fail;
453 }
441 } 454 }
442 455
443 for (i = 0; i < 2048; i++) { 456 for (i = 0; i < 2048; i++) {
444 struct cpl_l2t_write_req *req; 457 struct cpl_l2t_write_req *req;
445 458
446 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 459 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
460 if (!skb)
461 skb = adap->nofail_skb;
462 if (!skb)
463 goto alloc_skb_fail;
464
447 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); 465 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
448 memset(req, 0, sizeof(*req)); 466 memset(req, 0, sizeof(*req));
449 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 467 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
450 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); 468 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
451 req->params = htonl(V_L2T_W_IDX(i)); 469 req->params = htonl(V_L2T_W_IDX(i));
452 t3_mgmt_tx(adap, skb); 470 t3_mgmt_tx(adap, skb);
471 if (skb == adap->nofail_skb) {
472 await_mgmt_replies(adap, cnt, 16 + i + 1);
473 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474 if (!adap->nofail_skb)
475 goto alloc_skb_fail;
476 }
453 } 477 }
454 478
455 for (i = 0; i < 2048; i++) { 479 for (i = 0; i < 2048; i++) {
456 struct cpl_rte_write_req *req; 480 struct cpl_rte_write_req *req;
457 481
458 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 482 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
483 if (!skb)
484 skb = adap->nofail_skb;
485 if (!skb)
486 goto alloc_skb_fail;
487
459 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req)); 488 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
460 memset(req, 0, sizeof(*req)); 489 memset(req, 0, sizeof(*req));
461 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 490 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
462 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); 491 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
463 req->l2t_idx = htonl(V_L2T_W_IDX(i)); 492 req->l2t_idx = htonl(V_L2T_W_IDX(i));
464 t3_mgmt_tx(adap, skb); 493 t3_mgmt_tx(adap, skb);
494 if (skb == adap->nofail_skb) {
495 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497 if (!adap->nofail_skb)
498 goto alloc_skb_fail;
499 }
465 } 500 }
466 501
467 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL); 502 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503 if (!skb)
504 skb = adap->nofail_skb;
505 if (!skb)
506 goto alloc_skb_fail;
507
468 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq)); 508 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
469 memset(greq, 0, sizeof(*greq)); 509 memset(greq, 0, sizeof(*greq));
470 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 510 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
@@ -473,8 +513,17 @@ static int init_tp_parity(struct adapter *adap)
473 t3_mgmt_tx(adap, skb); 513 t3_mgmt_tx(adap, skb);
474 514
475 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); 515 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
516 if (skb == adap->nofail_skb) {
517 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
519 }
520
476 t3_tp_set_offload_mode(adap, 0); 521 t3_tp_set_offload_mode(adap, 0);
477 return i; 522 return i;
523
524alloc_skb_fail:
525 t3_tp_set_offload_mode(adap, 0);
526 return -ENOMEM;
478} 527}
479 528
480/** 529/**
@@ -869,7 +918,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
869 struct mngt_pktsched_wr *req; 918 struct mngt_pktsched_wr *req;
870 int ret; 919 int ret;
871 920
872 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); 921 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
922 if (!skb)
923 skb = adap->nofail_skb;
924 if (!skb)
925 return -ENOMEM;
926
873 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); 927 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
874 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); 928 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
875 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; 929 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
@@ -879,6 +933,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
879 req->max = hi; 933 req->max = hi;
880 req->binding = port; 934 req->binding = port;
881 ret = t3_mgmt_tx(adap, skb); 935 ret = t3_mgmt_tx(adap, skb);
936 if (skb == adap->nofail_skb) {
937 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
938 GFP_KERNEL);
939 if (!adap->nofail_skb)
940 ret = -ENOMEM;
941 }
882 942
883 return ret; 943 return ret;
884} 944}
@@ -2451,14 +2511,16 @@ static void check_link_status(struct adapter *adapter)
2451 for_each_port(adapter, i) { 2511 for_each_port(adapter, i) {
2452 struct net_device *dev = adapter->port[i]; 2512 struct net_device *dev = adapter->port[i];
2453 struct port_info *p = netdev_priv(dev); 2513 struct port_info *p = netdev_priv(dev);
2514 int link_fault;
2454 2515
2455 spin_lock_irq(&adapter->work_lock); 2516 spin_lock_irq(&adapter->work_lock);
2456 if (p->link_fault) { 2517 link_fault = p->link_fault;
2518 spin_unlock_irq(&adapter->work_lock);
2519
2520 if (link_fault) {
2457 t3_link_fault(adapter, i); 2521 t3_link_fault(adapter, i);
2458 spin_unlock_irq(&adapter->work_lock);
2459 continue; 2522 continue;
2460 } 2523 }
2461 spin_unlock_irq(&adapter->work_lock);
2462 2524
2463 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) { 2525 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2464 t3_xgm_intr_disable(adapter, i); 2526 t3_xgm_intr_disable(adapter, i);
@@ -3016,6 +3078,14 @@ static int __devinit init_one(struct pci_dev *pdev,
3016 goto out_disable_device; 3078 goto out_disable_device;
3017 } 3079 }
3018 3080
3081 adapter->nofail_skb =
3082 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3083 if (!adapter->nofail_skb) {
3084 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3085 err = -ENOMEM;
3086 goto out_free_adapter;
3087 }
3088
3019 adapter->regs = ioremap_nocache(mmio_start, mmio_len); 3089 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3020 if (!adapter->regs) { 3090 if (!adapter->regs) {
3021 dev_err(&pdev->dev, "cannot map device registers\n"); 3091 dev_err(&pdev->dev, "cannot map device registers\n");
@@ -3059,7 +3129,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3059 netdev->mem_start = mmio_start; 3129 netdev->mem_start = mmio_start;
3060 netdev->mem_end = mmio_start + mmio_len - 1; 3130 netdev->mem_end = mmio_start + mmio_len - 1;
3061 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 3131 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3062 netdev->features |= NETIF_F_LLTX;
3063 netdev->features |= NETIF_F_GRO; 3132 netdev->features |= NETIF_F_GRO;
3064 if (pci_using_dac) 3133 if (pci_using_dac)
3065 netdev->features |= NETIF_F_HIGHDMA; 3134 netdev->features |= NETIF_F_HIGHDMA;
@@ -3173,6 +3242,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
3173 free_netdev(adapter->port[i]); 3242 free_netdev(adapter->port[i]);
3174 3243
3175 iounmap(adapter->regs); 3244 iounmap(adapter->regs);
3245 if (adapter->nofail_skb)
3246 kfree_skb(adapter->nofail_skb);
3176 kfree(adapter); 3247 kfree(adapter);
3177 pci_release_regions(pdev); 3248 pci_release_regions(pdev);
3178 pci_disable_device(pdev); 3249 pci_disable_device(pdev);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 620d80be6aac..f9f54b57b28c 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -566,13 +566,31 @@ static void t3_process_tid_release_list(struct work_struct *work)
566 spin_unlock_bh(&td->tid_release_lock); 566 spin_unlock_bh(&td->tid_release_lock);
567 567
568 skb = alloc_skb(sizeof(struct cpl_tid_release), 568 skb = alloc_skb(sizeof(struct cpl_tid_release),
569 GFP_KERNEL | __GFP_NOFAIL); 569 GFP_KERNEL);
570 if (!skb)
571 skb = td->nofail_skb;
572 if (!skb) {
573 spin_lock_bh(&td->tid_release_lock);
574 p->ctx = (void *)td->tid_release_list;
575 td->tid_release_list = (struct t3c_tid_entry *)p;
576 break;
577 }
570 mk_tid_release(skb, p - td->tid_maps.tid_tab); 578 mk_tid_release(skb, p - td->tid_maps.tid_tab);
571 cxgb3_ofld_send(tdev, skb); 579 cxgb3_ofld_send(tdev, skb);
572 p->ctx = NULL; 580 p->ctx = NULL;
581 if (skb == td->nofail_skb)
582 td->nofail_skb =
583 alloc_skb(sizeof(struct cpl_tid_release),
584 GFP_KERNEL);
573 spin_lock_bh(&td->tid_release_lock); 585 spin_lock_bh(&td->tid_release_lock);
574 } 586 }
587 td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
575 spin_unlock_bh(&td->tid_release_lock); 588 spin_unlock_bh(&td->tid_release_lock);
589
590 if (!td->nofail_skb)
591 td->nofail_skb =
592 alloc_skb(sizeof(struct cpl_tid_release),
593 GFP_KERNEL);
576} 594}
577 595
578/* use ctx as a next pointer in the tid release list */ 596/* use ctx as a next pointer in the tid release list */
@@ -585,7 +603,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
585 p->ctx = (void *)td->tid_release_list; 603 p->ctx = (void *)td->tid_release_list;
586 p->client = NULL; 604 p->client = NULL;
587 td->tid_release_list = p; 605 td->tid_release_list = p;
588 if (!p->ctx) 606 if (!p->ctx || td->release_list_incomplete)
589 schedule_work(&td->tid_release_task); 607 schedule_work(&td->tid_release_task);
590 spin_unlock_bh(&td->tid_release_lock); 608 spin_unlock_bh(&td->tid_release_lock);
591} 609}
@@ -1274,6 +1292,9 @@ int cxgb3_offload_activate(struct adapter *adapter)
1274 if (list_empty(&adapter_list)) 1292 if (list_empty(&adapter_list))
1275 register_netevent_notifier(&nb); 1293 register_netevent_notifier(&nb);
1276 1294
1295 t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
1296 t->release_list_incomplete = 0;
1297
1277 add_adapter(adapter); 1298 add_adapter(adapter);
1278 return 0; 1299 return 0;
1279 1300
@@ -1298,6 +1319,8 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
1298 T3C_DATA(tdev) = NULL; 1319 T3C_DATA(tdev) = NULL;
1299 t3_free_l2t(L2DATA(tdev)); 1320 t3_free_l2t(L2DATA(tdev));
1300 L2DATA(tdev) = NULL; 1321 L2DATA(tdev) = NULL;
1322 if (t->nofail_skb)
1323 kfree_skb(t->nofail_skb);
1301 kfree(t); 1324 kfree(t);
1302} 1325}
1303 1326
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index a8e8e5fcdf84..55945f422aec 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -191,6 +191,9 @@ struct t3c_data {
191 struct t3c_tid_entry *tid_release_list; 191 struct t3c_tid_entry *tid_release_list;
192 spinlock_t tid_release_lock; 192 spinlock_t tid_release_lock;
193 struct work_struct tid_release_task; 193 struct work_struct tid_release_task;
194
195 struct sk_buff *nofail_skb;
196 unsigned int release_list_incomplete;
194}; 197};
195 198
196/* 199/*
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 73d569e758ec..29c79eb43beb 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
355 (*d->pg_chunk.p_cnt)--; 355 (*d->pg_chunk.p_cnt)--;
356 if (!*d->pg_chunk.p_cnt) 356 if (!*d->pg_chunk.p_cnt)
357 pci_unmap_page(pdev, 357 pci_unmap_page(pdev,
358 pci_unmap_addr(&d->pg_chunk, mapping), 358 d->pg_chunk.mapping,
359 q->alloc_size, PCI_DMA_FROMDEVICE); 359 q->alloc_size, PCI_DMA_FROMDEVICE);
360 360
361 put_page(d->pg_chunk.page); 361 put_page(d->pg_chunk.page);
@@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
454 q->pg_chunk.offset = 0; 454 q->pg_chunk.offset = 0;
455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
456 0, q->alloc_size, PCI_DMA_FROMDEVICE); 456 0, q->alloc_size, PCI_DMA_FROMDEVICE);
457 pci_unmap_addr_set(&q->pg_chunk, mapping, mapping); 457 q->pg_chunk.mapping = mapping;
458 } 458 }
459 sd->pg_chunk = q->pg_chunk; 459 sd->pg_chunk = q->pg_chunk;
460 460
@@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
511nomem: q->alloc_failed++; 511nomem: q->alloc_failed++;
512 break; 512 break;
513 } 513 }
514 mapping = pci_unmap_addr(&sd->pg_chunk, mapping) + 514 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
515 sd->pg_chunk.offset;
516 pci_unmap_addr_set(sd, dma_addr, mapping); 515 pci_unmap_addr_set(sd, dma_addr, mapping);
517 516
518 add_one_rx_chunk(mapping, d, q->gen); 517 add_one_rx_chunk(mapping, d, q->gen);
@@ -882,7 +881,7 @@ recycle:
882 (*sd->pg_chunk.p_cnt)--; 881 (*sd->pg_chunk.p_cnt)--;
883 if (!*sd->pg_chunk.p_cnt) 882 if (!*sd->pg_chunk.p_cnt)
884 pci_unmap_page(adap->pdev, 883 pci_unmap_page(adap->pdev,
885 pci_unmap_addr(&sd->pg_chunk, mapping), 884 sd->pg_chunk.mapping,
886 fl->alloc_size, 885 fl->alloc_size,
887 PCI_DMA_FROMDEVICE); 886 PCI_DMA_FROMDEVICE);
888 if (!skb) { 887 if (!skb) {
@@ -1241,7 +1240,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1241 q = &qs->txq[TXQ_ETH]; 1240 q = &qs->txq[TXQ_ETH];
1242 txq = netdev_get_tx_queue(dev, qidx); 1241 txq = netdev_get_tx_queue(dev, qidx);
1243 1242
1244 spin_lock(&q->lock);
1245 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1243 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1246 1244
1247 credits = q->size - q->in_use; 1245 credits = q->size - q->in_use;
@@ -1252,7 +1250,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1252 dev_err(&adap->pdev->dev, 1250 dev_err(&adap->pdev->dev,
1253 "%s: Tx ring %u full while queue awake!\n", 1251 "%s: Tx ring %u full while queue awake!\n",
1254 dev->name, q->cntxt_id & 7); 1252 dev->name, q->cntxt_id & 7);
1255 spin_unlock(&q->lock);
1256 return NETDEV_TX_BUSY; 1253 return NETDEV_TX_BUSY;
1257 } 1254 }
1258 1255
@@ -1286,9 +1283,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1286 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1283 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1287 qs->port_stats[SGE_PSTAT_VLANINS]++; 1284 qs->port_stats[SGE_PSTAT_VLANINS]++;
1288 1285
1289 dev->trans_start = jiffies;
1290 spin_unlock(&q->lock);
1291
1292 /* 1286 /*
1293 * We do not use Tx completion interrupts to free DMAd Tx packets. 1287 * We do not use Tx completion interrupts to free DMAd Tx packets.
1294 * This is good for performamce but means that we rely on new Tx 1288 * This is good for performamce but means that we rely on new Tx
@@ -2096,7 +2090,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2096 (*sd->pg_chunk.p_cnt)--; 2090 (*sd->pg_chunk.p_cnt)--;
2097 if (!*sd->pg_chunk.p_cnt) 2091 if (!*sd->pg_chunk.p_cnt)
2098 pci_unmap_page(adap->pdev, 2092 pci_unmap_page(adap->pdev,
2099 pci_unmap_addr(&sd->pg_chunk, mapping), 2093 sd->pg_chunk.mapping,
2100 fl->alloc_size, 2094 fl->alloc_size,
2101 PCI_DMA_FROMDEVICE); 2095 PCI_DMA_FROMDEVICE);
2102 2096
@@ -2858,11 +2852,12 @@ static void sge_timer_tx(unsigned long data)
2858 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; 2852 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2859 unsigned long next_period; 2853 unsigned long next_period;
2860 2854
2861 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) { 2855 if (__netif_tx_trylock(qs->tx_q)) {
2862 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], 2856 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2863 TX_RECLAIM_TIMER_CHUNK); 2857 TX_RECLAIM_TIMER_CHUNK);
2864 spin_unlock(&qs->txq[TXQ_ETH].lock); 2858 __netif_tx_unlock(qs->tx_q);
2865 } 2859 }
2860
2866 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { 2861 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2867 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], 2862 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2868 TX_RECLAIM_TIMER_CHUNK); 2863 TX_RECLAIM_TIMER_CHUNK);
@@ -2870,8 +2865,8 @@ static void sge_timer_tx(unsigned long data)
2870 } 2865 }
2871 2866
2872 next_period = TX_RECLAIM_PERIOD >> 2867 next_period = TX_RECLAIM_PERIOD >>
2873 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / 2868 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2874 TX_RECLAIM_TIMER_CHUNK); 2869 TX_RECLAIM_TIMER_CHUNK);
2875 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); 2870 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2876} 2871}
2877 2872
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index fc7db8a9ba89..870d44992c70 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -526,6 +526,11 @@ static const struct adapter_info t3_adap_info[] = {
526 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 526 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
527 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 527 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
528 &mi1_mdio_ext_ops, "Chelsio T310" }, 528 &mi1_mdio_ext_ops, "Chelsio T310" },
529 {1, 0, 0,
530 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
531 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
532 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
533 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
529}; 534};
530 535
531/* 536/*
@@ -552,6 +557,8 @@ static const struct port_type_info port_types[] = {
552 { t3_qt2045_phy_prep }, 557 { t3_qt2045_phy_prep },
553 { t3_ael1006_phy_prep }, 558 { t3_ael1006_phy_prep },
554 { NULL }, 559 { NULL },
560 { t3_aq100x_phy_prep },
561 { t3_ael2020_phy_prep },
555}; 562};
556 563
557#define VPD_ENTRY(name, len) \ 564#define VPD_ENTRY(name, len) \
@@ -1281,6 +1288,11 @@ void t3_link_fault(struct adapter *adapter, int port_id)
1281 A_XGM_INT_STATUS + mac->offset); 1288 A_XGM_INT_STATUS + mac->offset);
1282 link_fault &= F_LINKFAULTCHANGE; 1289 link_fault &= F_LINKFAULTCHANGE;
1283 1290
1291 link_ok = lc->link_ok;
1292 speed = lc->speed;
1293 duplex = lc->duplex;
1294 fc = lc->fc;
1295
1284 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); 1296 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1285 1297
1286 if (link_fault) { 1298 if (link_fault) {
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 7bf963ec5548..9d0bd9dd9ab1 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -35,10 +35,10 @@
35#define DRV_DESC "Chelsio T3 Network Driver" 35#define DRV_DESC "Chelsio T3 Network Driver"
36#define DRV_NAME "cxgb3" 36#define DRV_NAME "cxgb3"
37/* Driver version */ 37/* Driver version */
38#define DRV_VERSION "1.1.2-ko" 38#define DRV_VERSION "1.1.3-ko"
39 39
40/* Firmware version */ 40/* Firmware version */
41#define FW_VERSION_MAJOR 7 41#define FW_VERSION_MAJOR 7
42#define FW_VERSION_MINOR 1 42#define FW_VERSION_MINOR 4
43#define FW_VERSION_MICRO 0 43#define FW_VERSION_MICRO 0
44#endif /* __CHELSIO_VERSION_H */ 44#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index cf689a056b38..0e9b9f9632c1 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1819,7 +1819,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1819 struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH]; 1819 struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH];
1820 struct device *emac_dev = &priv->ndev->dev; 1820 struct device *emac_dev = &priv->ndev->dev;
1821 struct sockaddr *sa = addr; 1821 struct sockaddr *sa = addr;
1822 DECLARE_MAC_BUF(mac);
1823 1822
1824 /* Store mac addr in priv and rx channel and set it in EMAC hw */ 1823 /* Store mac addr in priv and rx channel and set it in EMAC hw */
1825 memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); 1824 memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
@@ -1828,8 +1827,8 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1828 emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr); 1827 emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
1829 1828
1830 if (netif_msg_drv(priv)) 1829 if (netif_msg_drv(priv))
1831 dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %s\n", 1830 dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",
1832 print_mac(mac, priv->mac_addr)); 1831 priv->mac_addr);
1833 1832
1834 return 0; 1833 return 0;
1835} 1834}
@@ -2683,11 +2682,10 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2683 ndev->irq = res->start; 2682 ndev->irq = res->start;
2684 2683
2685 if (!is_valid_ether_addr(priv->mac_addr)) { 2684 if (!is_valid_ether_addr(priv->mac_addr)) {
2686 DECLARE_MAC_BUF(buf);
2687 /* Use random MAC if none passed */ 2685 /* Use random MAC if none passed */
2688 random_ether_addr(priv->mac_addr); 2686 random_ether_addr(priv->mac_addr);
2689 printk(KERN_WARNING "%s: using random MAC addr: %s\n", 2687 printk(KERN_WARNING "%s: using random MAC addr: %pM\n",
2690 __func__, print_mac(buf, priv->mac_addr)); 2688 __func__, priv->mac_addr);
2691 } 2689 }
2692 2690
2693 ndev->netdev_ops = &emac_netdev_ops; 2691 ndev->netdev_ops = &emac_netdev_ops;
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index b62405a69180..2b22e580c4de 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -895,6 +895,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
895 struct lance_private *lp = netdev_priv(dev); 895 struct lance_private *lp = netdev_priv(dev);
896 volatile struct lance_regs *ll = lp->ll; 896 volatile struct lance_regs *ll = lp->ll;
897 volatile u16 *ib = (volatile u16 *)dev->mem_start; 897 volatile u16 *ib = (volatile u16 *)dev->mem_start;
898 unsigned long flags;
898 int entry, len; 899 int entry, len;
899 900
900 len = skb->len; 901 len = skb->len;
@@ -907,6 +908,8 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
907 908
908 dev->stats.tx_bytes += len; 909 dev->stats.tx_bytes += len;
909 910
911 spin_lock_irqsave(&lp->lock, flags);
912
910 entry = lp->tx_new; 913 entry = lp->tx_new;
911 *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); 914 *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
912 *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0; 915 *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
@@ -925,6 +928,8 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
925 /* Kick the lance: transmit now */ 928 /* Kick the lance: transmit now */
926 writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD); 929 writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
927 930
931 spin_unlock_irqrestore(&lp->lock, flags);
932
928 dev->trans_start = jiffies; 933 dev->trans_start = jiffies;
929 dev_kfree_skb(skb); 934 dev_kfree_skb(skb);
930 935
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 4a1b554654eb..895d72143ee0 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -539,7 +539,7 @@ rio_tx_timeout (struct net_device *dev)
539 dev->name, readl (ioaddr + TxStatus)); 539 dev->name, readl (ioaddr + TxStatus));
540 rio_free_tx(dev, 0); 540 rio_free_tx(dev, 0);
541 dev->if_port = 0; 541 dev->if_port = 0;
542 dev->trans_start = jiffies; 542 dev->trans_start = jiffies; /* prevent tx timeout */
543} 543}
544 544
545 /* allocate and initialize Tx and Rx descriptors */ 545 /* allocate and initialize Tx and Rx descriptors */
@@ -610,7 +610,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
610 610
611 if (np->link_status == 0) { /* Link Down */ 611 if (np->link_status == 0) { /* Link Down */
612 dev_kfree_skb(skb); 612 dev_kfree_skb(skb);
613 return 0; 613 return NETDEV_TX_OK;
614 } 614 }
615 ioaddr = dev->base_addr; 615 ioaddr = dev->base_addr;
616 entry = np->cur_tx % TX_RING_SIZE; 616 entry = np->cur_tx % TX_RING_SIZE;
@@ -665,9 +665,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
665 writel (0, dev->base_addr + TFDListPtr1); 665 writel (0, dev->base_addr + TFDListPtr1);
666 } 666 }
667 667
668 /* NETDEV WATCHDOG timer */ 668 return NETDEV_TX_OK;
669 dev->trans_start = jiffies;
670 return 0;
671} 669}
672 670
673static irqreturn_t 671static irqreturn_t
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 0f9ee1348552..119dc5300f9d 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -143,6 +143,8 @@
143 * FIXES: 143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com> 144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations 145 * - Stratus87247: protect MDI control register manipulations
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
146 */ 148 */
147 149
148#include <linux/module.h> 150#include <linux/module.h>
@@ -372,6 +374,7 @@ enum eeprom_op {
372 374
373enum eeprom_offsets { 375enum eeprom_offsets {
374 eeprom_cnfg_mdix = 0x03, 376 eeprom_cnfg_mdix = 0x03,
377 eeprom_phy_iface = 0x06,
375 eeprom_id = 0x0A, 378 eeprom_id = 0x0A,
376 eeprom_config_asf = 0x0D, 379 eeprom_config_asf = 0x0D,
377 eeprom_smbus_addr = 0x90, 380 eeprom_smbus_addr = 0x90,
@@ -381,6 +384,18 @@ enum eeprom_cnfg_mdix {
381 eeprom_mdix_enabled = 0x0080, 384 eeprom_mdix_enabled = 0x0080,
382}; 385};
383 386
387enum eeprom_phy_iface {
388 NoSuchPhy = 0,
389 I82553AB,
390 I82553C,
391 I82503,
392 DP83840,
393 S80C240,
394 S80C24,
395 I82555,
396 DP83840A = 10,
397};
398
384enum eeprom_id { 399enum eeprom_id {
385 eeprom_id_wol = 0x0020, 400 eeprom_id_wol = 0x0020,
386}; 401};
@@ -545,6 +560,7 @@ struct nic {
545 u32 msg_enable ____cacheline_aligned; 560 u32 msg_enable ____cacheline_aligned;
546 struct net_device *netdev; 561 struct net_device *netdev;
547 struct pci_dev *pdev; 562 struct pci_dev *pdev;
563 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
548 564
549 struct rx *rxs ____cacheline_aligned; 565 struct rx *rxs ____cacheline_aligned;
550 struct rx *rx_to_use; 566 struct rx *rx_to_use;
@@ -899,7 +915,21 @@ err_unlock:
899 return err; 915 return err;
900} 916}
901 917
902static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) 918static int mdio_read(struct net_device *netdev, int addr, int reg)
919{
920 struct nic *nic = netdev_priv(netdev);
921 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
922}
923
924static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
925{
926 struct nic *nic = netdev_priv(netdev);
927
928 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
929}
930
931/* the standard mdio_ctrl() function for usual MII-compliant hardware */
932static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
903{ 933{
904 u32 data_out = 0; 934 u32 data_out = 0;
905 unsigned int i; 935 unsigned int i;
@@ -938,30 +968,83 @@ static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
938 return (u16)data_out; 968 return (u16)data_out;
939} 969}
940 970
941static int mdio_read(struct net_device *netdev, int addr, int reg) 971/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
942{ 972static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
943 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0); 973 u32 addr,
974 u32 dir,
975 u32 reg,
976 u16 data)
977{
978 if ((reg == MII_BMCR) && (dir == mdi_write)) {
979 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
980 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
981 MII_ADVERTISE);
982
983 /*
984 * Workaround Si issue where sometimes the part will not
985 * autoneg to 100Mbps even when advertised.
986 */
987 if (advert & ADVERTISE_100FULL)
988 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
989 else if (advert & ADVERTISE_100HALF)
990 data |= BMCR_SPEED100;
991 }
992 }
993 return mdio_ctrl_hw(nic, addr, dir, reg, data);
944} 994}
945 995
946static void mdio_write(struct net_device *netdev, int addr, int reg, int data) 996/* Fully software-emulated mdio_ctrl() function for cards without
947{ 997 * MII-compliant PHYs.
948 struct nic *nic = netdev_priv(netdev); 998 * For now, this is mainly geared towards 80c24 support; in case of further
949 999 * requirements for other types (i82503, ...?) either extend this mechanism
950 if ((nic->phy == phy_82552_v) && (reg == MII_BMCR) && 1000 * or split it, whichever is cleaner.
951 (data & (BMCR_ANRESTART | BMCR_ANENABLE))) { 1001 */
952 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); 1002static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
953 1003 u32 addr,
954 /* 1004 u32 dir,
955 * Workaround Si issue where sometimes the part will not 1005 u32 reg,
956 * autoneg to 100Mbps even when advertised. 1006 u16 data)
957 */ 1007{
958 if (advert & ADVERTISE_100FULL) 1008 /* might need to allocate a netdev_priv'ed register array eventually
959 data |= BMCR_SPEED100 | BMCR_FULLDPLX; 1009 * to be able to record state changes, but for now
960 else if (advert & ADVERTISE_100HALF) 1010 * some fully hardcoded register handling ought to be ok I guess. */
961 data |= BMCR_SPEED100; 1011
1012 if (dir == mdi_read) {
1013 switch (reg) {
1014 case MII_BMCR:
1015 /* Auto-negotiation, right? */
1016 return BMCR_ANENABLE |
1017 BMCR_FULLDPLX;
1018 case MII_BMSR:
1019 return BMSR_LSTATUS /* for mii_link_ok() */ |
1020 BMSR_ANEGCAPABLE |
1021 BMSR_10FULL;
1022 case MII_ADVERTISE:
1023 /* 80c24 is a "combo card" PHY, right? */
1024 return ADVERTISE_10HALF |
1025 ADVERTISE_10FULL;
1026 default:
1027 DPRINTK(HW, DEBUG,
1028 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1029 dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
1030 return 0xFFFF;
1031 }
1032 } else {
1033 switch (reg) {
1034 default:
1035 DPRINTK(HW, DEBUG,
1036 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1037 dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
1038 return 0xFFFF;
1039 }
962 } 1040 }
963 1041}
964 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data); 1042static inline int e100_phy_supports_mii(struct nic *nic)
1043{
1044 /* for now, just check it by comparing whether we
1045 are using MII software emulation.
1046 */
1047 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
965} 1048}
966 1049
967static void e100_get_defaults(struct nic *nic) 1050static void e100_get_defaults(struct nic *nic)
@@ -1013,7 +1096,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1013 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ 1096 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1014 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ 1097 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1015 config->tx_underrun_retry = 0x3; /* # of underrun retries */ 1098 config->tx_underrun_retry = 0x3; /* # of underrun retries */
1016 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */ 1099 if (e100_phy_supports_mii(nic))
1100 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1017 config->pad10 = 0x6; 1101 config->pad10 = 0x6;
1018 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ 1102 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1019 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ 1103 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
@@ -1270,6 +1354,42 @@ static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1270 offsetof(struct mem, dump_buf)); 1354 offsetof(struct mem, dump_buf));
1271} 1355}
1272 1356
1357static int e100_phy_check_without_mii(struct nic *nic)
1358{
1359 u8 phy_type;
1360 int without_mii;
1361
1362 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1363
1364 switch (phy_type) {
1365 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1366 case I82503: /* Non-MII PHY; UNTESTED! */
1367 case S80C24: /* Non-MII PHY; tested and working */
1368 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1369 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1370 * doesn't have a programming interface of any sort. The
1371 * media is sensed automatically based on how the link partner
1372 * is configured. This is, in essence, manual configuration.
1373 */
1374 DPRINTK(PROBE, INFO,
1375 "found MII-less i82503 or 80c24 or other PHY\n");
1376
1377 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1378 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1379
1380 /* these might be needed for certain MII-less cards...
1381 * nic->flags |= ich;
1382 * nic->flags |= ich_10h_workaround; */
1383
1384 without_mii = 1;
1385 break;
1386 default:
1387 without_mii = 0;
1388 break;
1389 }
1390 return without_mii;
1391}
1392
1273#define NCONFIG_AUTO_SWITCH 0x0080 1393#define NCONFIG_AUTO_SWITCH 0x0080
1274#define MII_NSC_CONG MII_RESV1 1394#define MII_NSC_CONG MII_RESV1
1275#define NSC_CONG_ENABLE 0x0100 1395#define NSC_CONG_ENABLE 0x0100
@@ -1290,9 +1410,21 @@ static int e100_phy_init(struct nic *nic)
1290 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) 1410 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1291 break; 1411 break;
1292 } 1412 }
1293 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id); 1413 if (addr == 32) {
1294 if (addr == 32) 1414 /* uhoh, no PHY detected: check whether we seem to be some
1295 return -EAGAIN; 1415 * weird, rare variant which is *known* to not have any MII.
1416 * But do this AFTER MII checking only, since this does
1417 * lookup of EEPROM values which may easily be unreliable. */
1418 if (e100_phy_check_without_mii(nic))
1419 return 0; /* simply return and hope for the best */
1420 else {
1421 /* for unknown cases log a fatal error */
1422 DPRINTK(HW, ERR,
1423 "Failed to locate any known PHY, aborting.\n");
1424 return -EAGAIN;
1425 }
1426 } else
1427 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1296 1428
1297 /* Isolate all the PHY ids */ 1429 /* Isolate all the PHY ids */
1298 for (addr = 0; addr < 32; addr++) 1430 for (addr = 0; addr < 32; addr++)
@@ -1320,6 +1452,9 @@ static int e100_phy_init(struct nic *nic)
1320 if (nic->phy == phy_82552_v) { 1452 if (nic->phy == phy_82552_v) {
1321 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); 1453 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1322 1454
1455 /* assign special tweaked mdio_ctrl() function */
1456 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1457
1323 /* Workaround Si not advertising flow-control during autoneg */ 1458 /* Workaround Si not advertising flow-control during autoneg */
1324 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1459 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1325 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); 1460 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
@@ -2585,6 +2720,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2585 nic->netdev = netdev; 2720 nic->netdev = netdev;
2586 nic->pdev = pdev; 2721 nic->pdev = pdev;
2587 nic->msg_enable = (1 << debug) - 1; 2722 nic->msg_enable = (1 << debug) - 1;
2723 nic->mdio_ctrl = mdio_ctrl_hw;
2588 pci_set_drvdata(pdev, netdev); 2724 pci_set_drvdata(pdev, netdev);
2589 2725
2590 if ((err = pci_enable_device(pdev))) { 2726 if ((err = pci_enable_device(pdev))) {
@@ -2822,12 +2958,13 @@ static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel
2822 struct net_device *netdev = pci_get_drvdata(pdev); 2958 struct net_device *netdev = pci_get_drvdata(pdev);
2823 struct nic *nic = netdev_priv(netdev); 2959 struct nic *nic = netdev_priv(netdev);
2824 2960
2825 /* Similar to calling e100_down(), but avoids adapter I/O. */
2826 e100_close(netdev);
2827
2828 /* Detach; put netif into a state similar to hotplug unplug. */
2829 napi_enable(&nic->napi);
2830 netif_device_detach(netdev); 2961 netif_device_detach(netdev);
2962
2963 if (state == pci_channel_io_perm_failure)
2964 return PCI_ERS_RESULT_DISCONNECT;
2965
2966 if (netif_running(netdev))
2967 e100_down(nic);
2831 pci_disable_device(pdev); 2968 pci_disable_device(pdev);
2832 2969
2833 /* Request a slot reset. */ 2970 /* Request a slot reset. */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 9a32d0c73cb3..8d36743c8140 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2330,7 +2330,8 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2330{ 2330{
2331 struct e1000_adapter *adapter = netdev_priv(netdev); 2331 struct e1000_adapter *adapter = netdev_priv(netdev);
2332 struct e1000_hw *hw = &adapter->hw; 2332 struct e1000_hw *hw = &adapter->hw;
2333 struct dev_addr_list *uc_ptr; 2333 struct netdev_hw_addr *ha;
2334 bool use_uc = false;
2334 struct dev_addr_list *mc_ptr; 2335 struct dev_addr_list *mc_ptr;
2335 u32 rctl; 2336 u32 rctl;
2336 u32 hash_value; 2337 u32 hash_value;
@@ -2369,12 +2370,11 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2369 rctl |= E1000_RCTL_VFE; 2370 rctl |= E1000_RCTL_VFE;
2370 } 2371 }
2371 2372
2372 uc_ptr = NULL;
2373 if (netdev->uc_count > rar_entries - 1) { 2373 if (netdev->uc_count > rar_entries - 1) {
2374 rctl |= E1000_RCTL_UPE; 2374 rctl |= E1000_RCTL_UPE;
2375 } else if (!(netdev->flags & IFF_PROMISC)) { 2375 } else if (!(netdev->flags & IFF_PROMISC)) {
2376 rctl &= ~E1000_RCTL_UPE; 2376 rctl &= ~E1000_RCTL_UPE;
2377 uc_ptr = netdev->uc_list; 2377 use_uc = true;
2378 } 2378 }
2379 2379
2380 ew32(RCTL, rctl); 2380 ew32(RCTL, rctl);
@@ -2392,13 +2392,20 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2392 * if there are not 14 addresses, go ahead and clear the filters 2392 * if there are not 14 addresses, go ahead and clear the filters
2393 * -- with 82571 controllers only 0-13 entries are filled here 2393 * -- with 82571 controllers only 0-13 entries are filled here
2394 */ 2394 */
2395 i = 1;
2396 if (use_uc)
2397 list_for_each_entry(ha, &netdev->uc_list, list) {
2398 if (i == rar_entries)
2399 break;
2400 e1000_rar_set(hw, ha->addr, i++);
2401 }
2402
2403 WARN_ON(i == rar_entries);
2404
2395 mc_ptr = netdev->mc_list; 2405 mc_ptr = netdev->mc_list;
2396 2406
2397 for (i = 1; i < rar_entries; i++) { 2407 for (; i < rar_entries; i++) {
2398 if (uc_ptr) { 2408 if (mc_ptr) {
2399 e1000_rar_set(hw, uc_ptr->da_addr, i);
2400 uc_ptr = uc_ptr->next;
2401 } else if (mc_ptr) {
2402 e1000_rar_set(hw, mc_ptr->da_addr, i); 2409 e1000_rar_set(hw, mc_ptr->da_addr, i);
2403 mc_ptr = mc_ptr->next; 2410 mc_ptr = mc_ptr->next;
2404 } else { 2411 } else {
@@ -2408,7 +2415,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2408 E1000_WRITE_FLUSH(); 2415 E1000_WRITE_FLUSH();
2409 } 2416 }
2410 } 2417 }
2411 WARN_ON(uc_ptr != NULL);
2412 2418
2413 /* load any remaining addresses into the hash table */ 2419 /* load any remaining addresses into the hash table */
2414 2420
@@ -2992,7 +2998,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2992 size -= 4; 2998 size -= 4;
2993 2999
2994 buffer_info->length = size; 3000 buffer_info->length = size;
2995 buffer_info->dma = map[0] + offset; 3001 buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
2996 buffer_info->time_stamp = jiffies; 3002 buffer_info->time_stamp = jiffies;
2997 buffer_info->next_to_watch = i; 3003 buffer_info->next_to_watch = i;
2998 3004
@@ -3033,7 +3039,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3033 size -= 4; 3039 size -= 4;
3034 3040
3035 buffer_info->length = size; 3041 buffer_info->length = size;
3036 buffer_info->dma = map[f + 1] + offset; 3042 buffer_info->dma = map[f] + offset;
3037 buffer_info->time_stamp = jiffies; 3043 buffer_info->time_stamp = jiffies;
3038 buffer_info->next_to_watch = i; 3044 buffer_info->next_to_watch = i;
3039 3045
@@ -3365,7 +3371,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3365 3371
3366 if (count) { 3372 if (count) {
3367 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3373 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3368 netdev->trans_start = jiffies;
3369 /* Make sure there is space in the ring for the next send. */ 3374 /* Make sure there is space in the ring for the next send. */
3370 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3375 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3371 3376
@@ -4030,8 +4035,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4030 PCI_DMA_FROMDEVICE); 4035 PCI_DMA_FROMDEVICE);
4031 4036
4032 length = le16_to_cpu(rx_desc->length); 4037 length = le16_to_cpu(rx_desc->length);
4033 4038 /* !EOP means multiple descriptors were used to store a single
4034 if (unlikely(!(status & E1000_RXD_STAT_EOP))) { 4039 * packet, also make sure the frame isn't just CRC only */
4040 if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
4035 /* All receives must fit into a single buffer */ 4041 /* All receives must fit into a single buffer */
4036 E1000_DBG("%s: Receive packet consumed multiple" 4042 E1000_DBG("%s: Receive packet consumed multiple"
4037 " buffers\n", netdev->name); 4043 " buffers\n", netdev->name);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 6c01a2072c87..b53b40ba88a8 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -71,6 +71,7 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw);
71static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); 71static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
72static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); 72static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
73static s32 e1000_led_on_82574(struct e1000_hw *hw); 73static s32 e1000_led_on_82574(struct e1000_hw *hw);
74static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
74 75
75/** 76/**
76 * e1000_init_phy_params_82571 - Init PHY func ptrs. 77 * e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -212,6 +213,9 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
212 struct e1000_hw *hw = &adapter->hw; 213 struct e1000_hw *hw = &adapter->hw;
213 struct e1000_mac_info *mac = &hw->mac; 214 struct e1000_mac_info *mac = &hw->mac;
214 struct e1000_mac_operations *func = &mac->ops; 215 struct e1000_mac_operations *func = &mac->ops;
216 u32 swsm = 0;
217 u32 swsm2 = 0;
218 bool force_clear_smbi = false;
215 219
216 /* Set media type */ 220 /* Set media type */
217 switch (adapter->pdev->device) { 221 switch (adapter->pdev->device) {
@@ -276,6 +280,50 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
276 break; 280 break;
277 } 281 }
278 282
283 /*
284 * Ensure that the inter-port SWSM.SMBI lock bit is clear before
285 * first NVM or PHY acess. This should be done for single-port
286 * devices, and for one port only on dual-port devices so that
287 * for those devices we can still use the SMBI lock to synchronize
288 * inter-port accesses to the PHY & NVM.
289 */
290 switch (hw->mac.type) {
291 case e1000_82571:
292 case e1000_82572:
293 swsm2 = er32(SWSM2);
294
295 if (!(swsm2 & E1000_SWSM2_LOCK)) {
296 /* Only do this for the first interface on this card */
297 ew32(SWSM2,
298 swsm2 | E1000_SWSM2_LOCK);
299 force_clear_smbi = true;
300 } else
301 force_clear_smbi = false;
302 break;
303 default:
304 force_clear_smbi = true;
305 break;
306 }
307
308 if (force_clear_smbi) {
309 /* Make sure SWSM.SMBI is clear */
310 swsm = er32(SWSM);
311 if (swsm & E1000_SWSM_SMBI) {
312 /* This bit should not be set on a first interface, and
313 * indicates that the bootagent or EFI code has
314 * improperly left this bit enabled
315 */
316 hw_dbg(hw, "Please update your 82571 Bootagent\n");
317 }
318 ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
319 }
320
321 /*
322 * Initialze device specific counter of SMBI acquisition
323 * timeouts.
324 */
325 hw->dev_spec.e82571.smb_counter = 0;
326
279 return 0; 327 return 0;
280} 328}
281 329
@@ -341,8 +389,10 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
341 if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, 389 if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1,
342 &eeprom_data) < 0) 390 &eeprom_data) < 0)
343 break; 391 break;
344 if (eeprom_data & NVM_WORD1A_ASPM_MASK) 392 if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) {
345 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; 393 adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
394 adapter->max_hw_frame_size = DEFAULT_JUMBO;
395 }
346 } 396 }
347 break; 397 break;
348 default: 398 default:
@@ -411,11 +461,37 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
411static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) 461static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
412{ 462{
413 u32 swsm; 463 u32 swsm;
414 s32 timeout = hw->nvm.word_size + 1; 464 s32 sw_timeout = hw->nvm.word_size + 1;
465 s32 fw_timeout = hw->nvm.word_size + 1;
415 s32 i = 0; 466 s32 i = 0;
416 467
468 /*
469 * If we have timedout 3 times on trying to acquire
470 * the inter-port SMBI semaphore, there is old code
471 * operating on the other port, and it is not
472 * releasing SMBI. Modify the number of times that
473 * we try for the semaphore to interwork with this
474 * older code.
475 */
476 if (hw->dev_spec.e82571.smb_counter > 2)
477 sw_timeout = 1;
478
479 /* Get the SW semaphore */
480 while (i < sw_timeout) {
481 swsm = er32(SWSM);
482 if (!(swsm & E1000_SWSM_SMBI))
483 break;
484
485 udelay(50);
486 i++;
487 }
488
489 if (i == sw_timeout) {
490 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
491 hw->dev_spec.e82571.smb_counter++;
492 }
417 /* Get the FW semaphore. */ 493 /* Get the FW semaphore. */
418 for (i = 0; i < timeout; i++) { 494 for (i = 0; i < fw_timeout; i++) {
419 swsm = er32(SWSM); 495 swsm = er32(SWSM);
420 ew32(SWSM, swsm | E1000_SWSM_SWESMBI); 496 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
421 497
@@ -426,9 +502,9 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
426 udelay(50); 502 udelay(50);
427 } 503 }
428 504
429 if (i == timeout) { 505 if (i == fw_timeout) {
430 /* Release semaphores */ 506 /* Release semaphores */
431 e1000e_put_hw_semaphore(hw); 507 e1000_put_hw_semaphore_82571(hw);
432 hw_dbg(hw, "Driver can't access the NVM\n"); 508 hw_dbg(hw, "Driver can't access the NVM\n");
433 return -E1000_ERR_NVM; 509 return -E1000_ERR_NVM;
434 } 510 }
@@ -447,9 +523,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
447 u32 swsm; 523 u32 swsm;
448 524
449 swsm = er32(SWSM); 525 swsm = er32(SWSM);
450 526 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
451 swsm &= ~E1000_SWSM_SWESMBI;
452
453 ew32(SWSM, swsm); 527 ew32(SWSM, swsm);
454} 528}
455 529
@@ -1585,6 +1659,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
1585static struct e1000_mac_operations e82571_mac_ops = { 1659static struct e1000_mac_operations e82571_mac_ops = {
1586 /* .check_mng_mode: mac type dependent */ 1660 /* .check_mng_mode: mac type dependent */
1587 /* .check_for_link: media type dependent */ 1661 /* .check_for_link: media type dependent */
1662 .id_led_init = e1000e_id_led_init,
1588 .cleanup_led = e1000e_cleanup_led_generic, 1663 .cleanup_led = e1000e_cleanup_led_generic,
1589 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, 1664 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
1590 .get_bus_info = e1000e_get_bus_info_pcie, 1665 .get_bus_info = e1000e_get_bus_info_pcie,
@@ -1596,6 +1671,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
1596 .init_hw = e1000_init_hw_82571, 1671 .init_hw = e1000_init_hw_82571,
1597 .setup_link = e1000_setup_link_82571, 1672 .setup_link = e1000_setup_link_82571,
1598 /* .setup_physical_interface: media type dependent */ 1673 /* .setup_physical_interface: media type dependent */
1674 .setup_led = e1000e_setup_led_generic,
1599}; 1675};
1600 1676
1601static struct e1000_phy_operations e82_phy_ops_igp = { 1677static struct e1000_phy_operations e82_phy_ops_igp = {
@@ -1672,6 +1748,7 @@ struct e1000_info e1000_82571_info = {
1672 | FLAG_TARC_SPEED_MODE_BIT /* errata */ 1748 | FLAG_TARC_SPEED_MODE_BIT /* errata */
1673 | FLAG_APME_CHECK_PORT_B, 1749 | FLAG_APME_CHECK_PORT_B,
1674 .pba = 38, 1750 .pba = 38,
1751 .max_hw_frame_size = DEFAULT_JUMBO,
1675 .get_variants = e1000_get_variants_82571, 1752 .get_variants = e1000_get_variants_82571,
1676 .mac_ops = &e82571_mac_ops, 1753 .mac_ops = &e82571_mac_ops,
1677 .phy_ops = &e82_phy_ops_igp, 1754 .phy_ops = &e82_phy_ops_igp,
@@ -1688,6 +1765,7 @@ struct e1000_info e1000_82572_info = {
1688 | FLAG_HAS_CTRLEXT_ON_LOAD 1765 | FLAG_HAS_CTRLEXT_ON_LOAD
1689 | FLAG_TARC_SPEED_MODE_BIT, /* errata */ 1766 | FLAG_TARC_SPEED_MODE_BIT, /* errata */
1690 .pba = 38, 1767 .pba = 38,
1768 .max_hw_frame_size = DEFAULT_JUMBO,
1691 .get_variants = e1000_get_variants_82571, 1769 .get_variants = e1000_get_variants_82571,
1692 .mac_ops = &e82571_mac_ops, 1770 .mac_ops = &e82571_mac_ops,
1693 .phy_ops = &e82_phy_ops_igp, 1771 .phy_ops = &e82_phy_ops_igp,
@@ -1706,6 +1784,7 @@ struct e1000_info e1000_82573_info = {
1706 | FLAG_HAS_ERT 1784 | FLAG_HAS_ERT
1707 | FLAG_HAS_SWSM_ON_LOAD, 1785 | FLAG_HAS_SWSM_ON_LOAD,
1708 .pba = 20, 1786 .pba = 20,
1787 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1709 .get_variants = e1000_get_variants_82571, 1788 .get_variants = e1000_get_variants_82571,
1710 .mac_ops = &e82571_mac_ops, 1789 .mac_ops = &e82571_mac_ops,
1711 .phy_ops = &e82_phy_ops_m88, 1790 .phy_ops = &e82_phy_ops_m88,
@@ -1724,6 +1803,7 @@ struct e1000_info e1000_82574_info = {
1724 | FLAG_HAS_AMT 1803 | FLAG_HAS_AMT
1725 | FLAG_HAS_CTRLEXT_ON_LOAD, 1804 | FLAG_HAS_CTRLEXT_ON_LOAD,
1726 .pba = 20, 1805 .pba = 20,
1806 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1727 .get_variants = e1000_get_variants_82571, 1807 .get_variants = e1000_get_variants_82571,
1728 .mac_ops = &e82571_mac_ops, 1808 .mac_ops = &e82571_mac_ops,
1729 .phy_ops = &e82_phy_ops_bm, 1809 .phy_ops = &e82_phy_ops_bm,
@@ -1740,6 +1820,7 @@ struct e1000_info e1000_82583_info = {
1740 | FLAG_HAS_AMT 1820 | FLAG_HAS_AMT
1741 | FLAG_HAS_CTRLEXT_ON_LOAD, 1821 | FLAG_HAS_CTRLEXT_ON_LOAD,
1742 .pba = 20, 1822 .pba = 20,
1823 .max_hw_frame_size = DEFAULT_JUMBO,
1743 .get_variants = e1000_get_variants_82571, 1824 .get_variants = e1000_get_variants_82571,
1744 .mac_ops = &e82571_mac_ops, 1825 .mac_ops = &e82571_mac_ops,
1745 .phy_ops = &e82_phy_ops_bm, 1826 .phy_ops = &e82_phy_ops_bm,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 243aa499fe90..8890c97e1120 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -56,6 +56,7 @@
56/* Wake Up Control */ 56/* Wake Up Control */
57#define E1000_WUC_APME 0x00000001 /* APM Enable */ 57#define E1000_WUC_APME 0x00000001 /* APM Enable */
58#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ 58#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
59#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
59 60
60/* Wake Up Filter Control */ 61/* Wake Up Filter Control */
61#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ 62#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@@ -65,6 +66,13 @@
65#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ 66#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
66#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ 67#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
67 68
69/* Wake Up Status */
70#define E1000_WUS_LNKC E1000_WUFC_LNKC
71#define E1000_WUS_MAG E1000_WUFC_MAG
72#define E1000_WUS_EX E1000_WUFC_EX
73#define E1000_WUS_MC E1000_WUFC_MC
74#define E1000_WUS_BC E1000_WUFC_BC
75
68/* Extended Device Control */ 76/* Extended Device Control */
69#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */ 77#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
70#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 78#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
@@ -77,6 +85,7 @@
77#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 85#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
78#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 86#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
79#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 87#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
88#define E1000_CTRL_EXT_PHYPDEN 0x00100000
80 89
81/* Receive Descriptor bit definitions */ 90/* Receive Descriptor bit definitions */
82#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 91#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
@@ -140,6 +149,7 @@
140#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ 149#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
141#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ 150#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
142#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ 151#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
152#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
143#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ 153#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
144/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ 154/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
145#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ 155#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
@@ -153,6 +163,7 @@
153#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ 163#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
154#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ 164#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
155#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ 165#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
166#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
156#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 167#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
157#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 168#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
158 169
@@ -255,11 +266,16 @@
255#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX 266#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
256 267
257/* LED Control */ 268/* LED Control */
269#define E1000_PHY_LED0_MODE_MASK 0x00000007
270#define E1000_PHY_LED0_IVRT 0x00000008
271#define E1000_PHY_LED0_MASK 0x0000001F
272
258#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F 273#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
259#define E1000_LEDCTL_LED0_MODE_SHIFT 0 274#define E1000_LEDCTL_LED0_MODE_SHIFT 0
260#define E1000_LEDCTL_LED0_IVRT 0x00000040 275#define E1000_LEDCTL_LED0_IVRT 0x00000040
261#define E1000_LEDCTL_LED0_BLINK 0x00000080 276#define E1000_LEDCTL_LED0_BLINK 0x00000080
262 277
278#define E1000_LEDCTL_MODE_LINK_UP 0x2
263#define E1000_LEDCTL_MODE_LED_ON 0xE 279#define E1000_LEDCTL_MODE_LED_ON 0xE
264#define E1000_LEDCTL_MODE_LED_OFF 0xF 280#define E1000_LEDCTL_MODE_LED_OFF 0xF
265 281
@@ -360,6 +376,8 @@
360#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 376#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
361#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ 377#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
362 378
379#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
380
363/* Interrupt Cause Read */ 381/* Interrupt Cause Read */
364#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ 382#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
365#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ 383#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
@@ -469,6 +487,8 @@
469#define AUTO_READ_DONE_TIMEOUT 10 487#define AUTO_READ_DONE_TIMEOUT 10
470 488
471/* Flow Control */ 489/* Flow Control */
490#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
491#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
472#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 492#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
473 493
474/* Transmit Configuration Word */ 494/* Transmit Configuration Word */
@@ -674,6 +694,8 @@
674#define IFE_C_E_PHY_ID 0x02A80310 694#define IFE_C_E_PHY_ID 0x02A80310
675#define BME1000_E_PHY_ID 0x01410CB0 695#define BME1000_E_PHY_ID 0x01410CB0
676#define BME1000_E_PHY_ID_R2 0x01410CB1 696#define BME1000_E_PHY_ID_R2 0x01410CB1
697#define I82577_E_PHY_ID 0x01540050
698#define I82578_E_PHY_ID 0x004DD040
677 699
678/* M88E1000 Specific Registers */ 700/* M88E1000 Specific Registers */
679#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 701#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -727,6 +749,9 @@
727#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 749#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
728#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 750#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
729 751
752#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
753#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
754
730/* BME1000 PHY Specific Control Register */ 755/* BME1000 PHY Specific Control Register */
731#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ 756#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
732 757
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index f37360aa12a8..d6e491bc58c9 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -96,6 +96,51 @@ struct e1000_info;
96/* Number of packet split data buffers (not including the header buffer) */ 96/* Number of packet split data buffers (not including the header buffer) */
97#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) 97#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
98 98
99#define DEFAULT_JUMBO 9234
100
101/* BM/HV Specific Registers */
102#define BM_PORT_CTRL_PAGE 769
103
104#define PHY_UPPER_SHIFT 21
105#define BM_PHY_REG(page, reg) \
106 (((reg) & MAX_PHY_REG_ADDRESS) |\
107 (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
108 (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
109
110/* PHY Wakeup Registers and defines */
111#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
112#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
113#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
114#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
115#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
116#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
117#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
118#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
119#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
120
121#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
122#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
123#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
124#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
125#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
126#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
127#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
128
129#define HV_SCC_UPPER PHY_REG(778, 16) /* Single Collision Count */
130#define HV_SCC_LOWER PHY_REG(778, 17)
131#define HV_ECOL_UPPER PHY_REG(778, 18) /* Excessive Collision Count */
132#define HV_ECOL_LOWER PHY_REG(778, 19)
133#define HV_MCC_UPPER PHY_REG(778, 20) /* Multiple Collision Count */
134#define HV_MCC_LOWER PHY_REG(778, 21)
135#define HV_LATECOL_UPPER PHY_REG(778, 23) /* Late Collision Count */
136#define HV_LATECOL_LOWER PHY_REG(778, 24)
137#define HV_COLC_UPPER PHY_REG(778, 25) /* Collision Count */
138#define HV_COLC_LOWER PHY_REG(778, 26)
139#define HV_DC_UPPER PHY_REG(778, 27) /* Defer Count */
140#define HV_DC_LOWER PHY_REG(778, 28)
141#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
142#define HV_TNCRS_LOWER PHY_REG(778, 30)
143
99enum e1000_boards { 144enum e1000_boards {
100 board_82571, 145 board_82571,
101 board_82572, 146 board_82572,
@@ -106,6 +151,7 @@ enum e1000_boards {
106 board_ich8lan, 151 board_ich8lan,
107 board_ich9lan, 152 board_ich9lan,
108 board_ich10lan, 153 board_ich10lan,
154 board_pchlan,
109}; 155};
110 156
111struct e1000_queue_stats { 157struct e1000_queue_stats {
@@ -293,6 +339,7 @@ struct e1000_adapter {
293 u32 eeprom_wol; 339 u32 eeprom_wol;
294 u32 wol; 340 u32 wol;
295 u32 pba; 341 u32 pba;
342 u32 max_hw_frame_size;
296 343
297 bool fc_autoneg; 344 bool fc_autoneg;
298 345
@@ -302,6 +349,7 @@ struct e1000_adapter {
302 unsigned int flags2; 349 unsigned int flags2;
303 struct work_struct downshift_task; 350 struct work_struct downshift_task;
304 struct work_struct update_phy_task; 351 struct work_struct update_phy_task;
352 struct work_struct led_blink_task;
305}; 353};
306 354
307struct e1000_info { 355struct e1000_info {
@@ -309,6 +357,7 @@ struct e1000_info {
309 unsigned int flags; 357 unsigned int flags;
310 unsigned int flags2; 358 unsigned int flags2;
311 u32 pba; 359 u32 pba;
360 u32 max_hw_frame_size;
312 s32 (*get_variants)(struct e1000_adapter *); 361 s32 (*get_variants)(struct e1000_adapter *);
313 struct e1000_mac_operations *mac_ops; 362 struct e1000_mac_operations *mac_ops;
314 struct e1000_phy_operations *phy_ops; 363 struct e1000_phy_operations *phy_ops;
@@ -351,6 +400,7 @@ struct e1000_info {
351 400
352/* CRC Stripping defines */ 401/* CRC Stripping defines */
353#define FLAG2_CRC_STRIPPING (1 << 0) 402#define FLAG2_CRC_STRIPPING (1 << 0)
403#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
354 404
355#define E1000_RX_DESC_PS(R, i) \ 405#define E1000_RX_DESC_PS(R, i) \
356 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 406 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -404,6 +454,7 @@ extern struct e1000_info e1000_82583_info;
404extern struct e1000_info e1000_ich8_info; 454extern struct e1000_info e1000_ich8_info;
405extern struct e1000_info e1000_ich9_info; 455extern struct e1000_info e1000_ich9_info;
406extern struct e1000_info e1000_ich10_info; 456extern struct e1000_info e1000_ich10_info;
457extern struct e1000_info e1000_pch_info;
407extern struct e1000_info e1000_es2_info; 458extern struct e1000_info e1000_es2_info;
408 459
409extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num); 460extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
@@ -425,6 +476,7 @@ extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
425extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); 476extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
426extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); 477extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
427extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); 478extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
479extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
428extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); 480extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
429extern s32 e1000e_led_on_generic(struct e1000_hw *hw); 481extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
430extern s32 e1000e_led_off_generic(struct e1000_hw *hw); 482extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
@@ -493,6 +545,15 @@ extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
493extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); 545extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
494extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); 546extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
495extern s32 e1000e_check_downshift(struct e1000_hw *hw); 547extern s32 e1000e_check_downshift(struct e1000_hw *hw);
548extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
549extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
550extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
551extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
552extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
553extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
554extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
555extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
556extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
496 557
497static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) 558static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
498{ 559{
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 8964838c686b..ae5d73689353 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1366,6 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
1366} 1366}
1367 1367
1368static struct e1000_mac_operations es2_mac_ops = { 1368static struct e1000_mac_operations es2_mac_ops = {
1369 .id_led_init = e1000e_id_led_init,
1369 .check_mng_mode = e1000e_check_mng_mode_generic, 1370 .check_mng_mode = e1000e_check_mng_mode_generic,
1370 /* check_for_link dependent on media type */ 1371 /* check_for_link dependent on media type */
1371 .cleanup_led = e1000e_cleanup_led_generic, 1372 .cleanup_led = e1000e_cleanup_led_generic,
@@ -1379,6 +1380,7 @@ static struct e1000_mac_operations es2_mac_ops = {
1379 .init_hw = e1000_init_hw_80003es2lan, 1380 .init_hw = e1000_init_hw_80003es2lan,
1380 .setup_link = e1000e_setup_link, 1381 .setup_link = e1000e_setup_link,
1381 /* setup_physical_interface dependent on media type */ 1382 /* setup_physical_interface dependent on media type */
1383 .setup_led = e1000e_setup_led_generic,
1382}; 1384};
1383 1385
1384static struct e1000_phy_operations es2_phy_ops = { 1386static struct e1000_phy_operations es2_phy_ops = {
@@ -1422,6 +1424,7 @@ struct e1000_info e1000_es2_info = {
1422 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ 1424 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
1423 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, 1425 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
1424 .pba = 38, 1426 .pba = 38,
1427 .max_hw_frame_size = DEFAULT_JUMBO,
1425 .get_variants = e1000_get_variants_80003es2lan, 1428 .get_variants = e1000_get_variants_80003es2lan,
1426 .mac_ops = &es2_mac_ops, 1429 .mac_ops = &es2_mac_ops,
1427 .phy_ops = &es2_phy_ops, 1430 .phy_ops = &es2_phy_ops,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 4d25ede88369..1bf4d2a5d34f 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -167,6 +167,15 @@ static int e1000_get_settings(struct net_device *netdev,
167 167
168 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || 168 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
169 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; 169 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
170
171 /* MDI-X => 2; MDI =>1; Invalid =>0 */
172 if ((hw->phy.media_type == e1000_media_type_copper) &&
173 !hw->mac.get_link_status)
174 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
175 ETH_TP_MDI;
176 else
177 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
178
170 return 0; 179 return 0;
171} 180}
172 181
@@ -776,6 +785,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
776 u32 after; 785 u32 after;
777 u32 i; 786 u32 i;
778 u32 toggle; 787 u32 toggle;
788 u32 mask;
779 789
780 /* 790 /*
781 * The status register is Read Only, so a write should fail. 791 * The status register is Read Only, so a write should fail.
@@ -788,17 +798,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
788 case e1000_80003es2lan: 798 case e1000_80003es2lan:
789 toggle = 0x7FFFF3FF; 799 toggle = 0x7FFFF3FF;
790 break; 800 break;
791 case e1000_82573: 801 default:
792 case e1000_82574:
793 case e1000_82583:
794 case e1000_ich8lan:
795 case e1000_ich9lan:
796 case e1000_ich10lan:
797 toggle = 0x7FFFF033; 802 toggle = 0x7FFFF033;
798 break; 803 break;
799 default:
800 toggle = 0xFFFFF833;
801 break;
802 } 804 }
803 805
804 before = er32(STATUS); 806 before = er32(STATUS);
@@ -844,11 +846,18 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
844 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); 846 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
845 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 847 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
846 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); 848 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
849 mask = 0x8003FFFF;
850 switch (mac->type) {
851 case e1000_ich10lan:
852 case e1000_pchlan:
853 mask |= (1 << 18);
854 break;
855 default:
856 break;
857 }
847 for (i = 0; i < mac->rar_entry_count; i++) 858 for (i = 0; i < mac->rar_entry_count; i++)
848 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), 859 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
849 ((mac->type == e1000_ich10lan) ? 860 mask, 0xFFFFFFFF);
850 0x8007FFFF : 0x8003FFFF),
851 0xFFFFFFFF);
852 861
853 for (i = 0; i < mac->mta_reg_count; i++) 862 for (i = 0; i < mac->mta_reg_count; i++)
854 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); 863 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
@@ -1786,15 +1795,22 @@ static int e1000_set_wol(struct net_device *netdev,
1786/* bit defines for adapter->led_status */ 1795/* bit defines for adapter->led_status */
1787#define E1000_LED_ON 0 1796#define E1000_LED_ON 0
1788 1797
1789static void e1000_led_blink_callback(unsigned long data) 1798static void e1000e_led_blink_task(struct work_struct *work)
1790{ 1799{
1791 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 1800 struct e1000_adapter *adapter = container_of(work,
1801 struct e1000_adapter, led_blink_task);
1792 1802
1793 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) 1803 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1794 adapter->hw.mac.ops.led_off(&adapter->hw); 1804 adapter->hw.mac.ops.led_off(&adapter->hw);
1795 else 1805 else
1796 adapter->hw.mac.ops.led_on(&adapter->hw); 1806 adapter->hw.mac.ops.led_on(&adapter->hw);
1807}
1808
1809static void e1000_led_blink_callback(unsigned long data)
1810{
1811 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1797 1812
1813 schedule_work(&adapter->led_blink_task);
1798 mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL); 1814 mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
1799} 1815}
1800 1816
@@ -1807,7 +1823,9 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
1807 data = INT_MAX; 1823 data = INT_MAX;
1808 1824
1809 if ((hw->phy.type == e1000_phy_ife) || 1825 if ((hw->phy.type == e1000_phy_ife) ||
1826 (hw->mac.type == e1000_pchlan) ||
1810 (hw->mac.type == e1000_82574)) { 1827 (hw->mac.type == e1000_82574)) {
1828 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
1811 if (!adapter->blink_timer.function) { 1829 if (!adapter->blink_timer.function) {
1812 init_timer(&adapter->blink_timer); 1830 init_timer(&adapter->blink_timer);
1813 adapter->blink_timer.function = 1831 adapter->blink_timer.function =
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 6cdb703be951..163c1c0cfee7 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -193,7 +193,11 @@ enum e1e_registers {
193 E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */ 193 E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
194 E1000_RFCTL = 0x05008, /* Receive Filter Control */ 194 E1000_RFCTL = 0x05008, /* Receive Filter Control */
195 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ 195 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
196 E1000_RA = 0x05400, /* Receive Address - RW Array */ 196 E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
197#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8))
198#define E1000_RA (E1000_RAL(0))
199 E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
200#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
197 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ 201 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
198 E1000_WUC = 0x05800, /* Wakeup Control - RW */ 202 E1000_WUC = 0x05800, /* Wakeup Control - RW */
199 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ 203 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
@@ -210,6 +214,7 @@ enum e1e_registers {
210 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ 214 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
211 E1000_SWSM = 0x05B50, /* SW Semaphore */ 215 E1000_SWSM = 0x05B50, /* SW Semaphore */
212 E1000_FWSM = 0x05B54, /* FW Semaphore */ 216 E1000_FWSM = 0x05B54, /* FW Semaphore */
217 E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
213 E1000_HICR = 0x08F00, /* Host Interface Control */ 218 E1000_HICR = 0x08F00, /* Host Interface Control */
214}; 219};
215 220
@@ -368,6 +373,10 @@ enum e1e_registers {
368#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE 373#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
369#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE 374#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
370#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF 375#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
376#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
377#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
378#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
379#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
371 380
372#define E1000_REVISION_4 4 381#define E1000_REVISION_4 4
373 382
@@ -383,6 +392,7 @@ enum e1000_mac_type {
383 e1000_ich8lan, 392 e1000_ich8lan,
384 e1000_ich9lan, 393 e1000_ich9lan,
385 e1000_ich10lan, 394 e1000_ich10lan,
395 e1000_pchlan,
386}; 396};
387 397
388enum e1000_media_type { 398enum e1000_media_type {
@@ -417,6 +427,8 @@ enum e1000_phy_type {
417 e1000_phy_igp_3, 427 e1000_phy_igp_3,
418 e1000_phy_ife, 428 e1000_phy_ife,
419 e1000_phy_bm, 429 e1000_phy_bm,
430 e1000_phy_82578,
431 e1000_phy_82577,
420}; 432};
421 433
422enum e1000_bus_width { 434enum e1000_bus_width {
@@ -720,6 +732,7 @@ struct e1000_host_mng_command_info {
720 732
721/* Function pointers and static data for the MAC. */ 733/* Function pointers and static data for the MAC. */
722struct e1000_mac_operations { 734struct e1000_mac_operations {
735 s32 (*id_led_init)(struct e1000_hw *);
723 bool (*check_mng_mode)(struct e1000_hw *); 736 bool (*check_mng_mode)(struct e1000_hw *);
724 s32 (*check_for_link)(struct e1000_hw *); 737 s32 (*check_for_link)(struct e1000_hw *);
725 s32 (*cleanup_led)(struct e1000_hw *); 738 s32 (*cleanup_led)(struct e1000_hw *);
@@ -733,11 +746,13 @@ struct e1000_mac_operations {
733 s32 (*init_hw)(struct e1000_hw *); 746 s32 (*init_hw)(struct e1000_hw *);
734 s32 (*setup_link)(struct e1000_hw *); 747 s32 (*setup_link)(struct e1000_hw *);
735 s32 (*setup_physical_interface)(struct e1000_hw *); 748 s32 (*setup_physical_interface)(struct e1000_hw *);
749 s32 (*setup_led)(struct e1000_hw *);
736}; 750};
737 751
738/* Function pointers for the PHY. */ 752/* Function pointers for the PHY. */
739struct e1000_phy_operations { 753struct e1000_phy_operations {
740 s32 (*acquire_phy)(struct e1000_hw *); 754 s32 (*acquire_phy)(struct e1000_hw *);
755 s32 (*check_polarity)(struct e1000_hw *);
741 s32 (*check_reset_block)(struct e1000_hw *); 756 s32 (*check_reset_block)(struct e1000_hw *);
742 s32 (*commit_phy)(struct e1000_hw *); 757 s32 (*commit_phy)(struct e1000_hw *);
743 s32 (*force_speed_duplex)(struct e1000_hw *); 758 s32 (*force_speed_duplex)(struct e1000_hw *);
@@ -869,6 +884,7 @@ struct e1000_fc_info {
869struct e1000_dev_spec_82571 { 884struct e1000_dev_spec_82571 {
870 bool laa_is_present; 885 bool laa_is_present;
871 bool alt_mac_addr_is_present; 886 bool alt_mac_addr_is_present;
887 u32 smb_counter;
872}; 888};
873 889
874struct e1000_shadow_ram { 890struct e1000_shadow_ram {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 6d1aab6316ba..9e23f50fb9cd 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -48,6 +48,10 @@
48 * 82567LF-3 Gigabit Network Connection 48 * 82567LF-3 Gigabit Network Connection
49 * 82567LM-3 Gigabit Network Connection 49 * 82567LM-3 Gigabit Network Connection
50 * 82567LM-4 Gigabit Network Connection 50 * 82567LM-4 Gigabit Network Connection
51 * 82577LM Gigabit Network Connection
52 * 82577LC Gigabit Network Connection
53 * 82578DM Gigabit Network Connection
54 * 82578DC Gigabit Network Connection
51 */ 55 */
52 56
53#include <linux/netdevice.h> 57#include <linux/netdevice.h>
@@ -116,6 +120,8 @@
116#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 120#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
117#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 121#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
118 122
123#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
124
119/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 125/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
120/* Offset 04h HSFSTS */ 126/* Offset 04h HSFSTS */
121union ich8_hws_flash_status { 127union ich8_hws_flash_status {
@@ -186,6 +192,14 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
186static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); 192static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
187static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 193static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
188static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); 194static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
195static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
196static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
197static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
198static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
199static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
200static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
201static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
202static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
189 203
190static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 204static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
191{ 205{
@@ -213,6 +227,41 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
213#define ew32flash(reg,val) __ew32flash(hw, (reg), (val)) 227#define ew32flash(reg,val) __ew32flash(hw, (reg), (val))
214 228
215/** 229/**
230 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
231 * @hw: pointer to the HW structure
232 *
233 * Initialize family-specific PHY parameters and function pointers.
234 **/
235static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
236{
237 struct e1000_phy_info *phy = &hw->phy;
238 s32 ret_val = 0;
239
240 phy->addr = 1;
241 phy->reset_delay_us = 100;
242
243 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
244 phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
245 phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
246 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
247
248 phy->id = e1000_phy_unknown;
249 e1000e_get_phy_id(hw);
250 phy->type = e1000e_get_phy_type_from_id(phy->id);
251
252 if (phy->type == e1000_phy_82577) {
253 phy->ops.check_polarity = e1000_check_polarity_82577;
254 phy->ops.force_speed_duplex =
255 e1000_phy_force_speed_duplex_82577;
256 phy->ops.get_cable_length = e1000_get_cable_length_82577;
257 phy->ops.get_phy_info = e1000_get_phy_info_82577;
258 phy->ops.commit_phy = e1000e_phy_sw_reset;
259 }
260
261 return ret_val;
262}
263
264/**
216 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers 265 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
217 * @hw: pointer to the HW structure 266 * @hw: pointer to the HW structure
218 * 267 *
@@ -273,6 +322,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
273 break; 322 break;
274 } 323 }
275 324
325 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
326
276 return 0; 327 return 0;
277} 328}
278 329
@@ -358,6 +409,36 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
358 /* Set if manageability features are enabled. */ 409 /* Set if manageability features are enabled. */
359 mac->arc_subsystem_valid = 1; 410 mac->arc_subsystem_valid = 1;
360 411
412 /* LED operations */
413 switch (mac->type) {
414 case e1000_ich8lan:
415 case e1000_ich9lan:
416 case e1000_ich10lan:
417 /* ID LED init */
418 mac->ops.id_led_init = e1000e_id_led_init;
419 /* setup LED */
420 mac->ops.setup_led = e1000e_setup_led_generic;
421 /* cleanup LED */
422 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
423 /* turn on/off LED */
424 mac->ops.led_on = e1000_led_on_ich8lan;
425 mac->ops.led_off = e1000_led_off_ich8lan;
426 break;
427 case e1000_pchlan:
428 /* ID LED init */
429 mac->ops.id_led_init = e1000_id_led_init_pchlan;
430 /* setup LED */
431 mac->ops.setup_led = e1000_setup_led_pchlan;
432 /* cleanup LED */
433 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
434 /* turn on/off LED */
435 mac->ops.led_on = e1000_led_on_pchlan;
436 mac->ops.led_off = e1000_led_off_pchlan;
437 break;
438 default:
439 break;
440 }
441
361 /* Enable PCS Lock-loss workaround for ICH8 */ 442 /* Enable PCS Lock-loss workaround for ICH8 */
362 if (mac->type == e1000_ich8lan) 443 if (mac->type == e1000_ich8lan)
363 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1); 444 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1);
@@ -378,10 +459,18 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
378 if (rc) 459 if (rc)
379 return rc; 460 return rc;
380 461
381 rc = e1000_init_phy_params_ich8lan(hw); 462 if (hw->mac.type == e1000_pchlan)
463 rc = e1000_init_phy_params_pchlan(hw);
464 else
465 rc = e1000_init_phy_params_ich8lan(hw);
382 if (rc) 466 if (rc)
383 return rc; 467 return rc;
384 468
469 if (adapter->hw.phy.type == e1000_phy_ife) {
470 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
471 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
472 }
473
385 if ((adapter->hw.mac.type == e1000_ich8lan) && 474 if ((adapter->hw.mac.type == e1000_ich8lan) &&
386 (adapter->hw.phy.type == e1000_phy_igp_3)) 475 (adapter->hw.phy.type == e1000_phy_igp_3))
387 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; 476 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
@@ -410,12 +499,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
410 499
411 while (timeout) { 500 while (timeout) {
412 extcnf_ctrl = er32(EXTCNF_CTRL); 501 extcnf_ctrl = er32(EXTCNF_CTRL);
413 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
414 ew32(EXTCNF_CTRL, extcnf_ctrl);
415 502
416 extcnf_ctrl = er32(EXTCNF_CTRL); 503 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) {
417 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 504 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
418 break; 505 ew32(EXTCNF_CTRL, extcnf_ctrl);
506
507 extcnf_ctrl = er32(EXTCNF_CTRL);
508 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
509 break;
510 }
419 mdelay(1); 511 mdelay(1);
420 timeout--; 512 timeout--;
421 } 513 }
@@ -555,6 +647,53 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
555} 647}
556 648
557/** 649/**
650 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
651 * done after every PHY reset.
652 **/
653static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
654{
655 s32 ret_val = 0;
656
657 if (hw->mac.type != e1000_pchlan)
658 return ret_val;
659
660 if (((hw->phy.type == e1000_phy_82577) &&
661 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
662 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
663 /* Disable generation of early preamble */
664 ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
665 if (ret_val)
666 return ret_val;
667
668 /* Preamble tuning for SSC */
669 ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204);
670 if (ret_val)
671 return ret_val;
672 }
673
674 if (hw->phy.type == e1000_phy_82578) {
675 /*
676 * Return registers to default by doing a soft reset then
677 * writing 0x3140 to the control register.
678 */
679 if (hw->phy.revision < 2) {
680 e1000e_phy_sw_reset(hw);
681 ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
682 }
683 }
684
685 /* Select page 0 */
686 ret_val = hw->phy.ops.acquire_phy(hw);
687 if (ret_val)
688 return ret_val;
689 hw->phy.addr = 1;
690 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
691 hw->phy.ops.release_phy(hw);
692
693 return ret_val;
694}
695
696/**
558 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset 697 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
559 * @hw: pointer to the HW structure 698 * @hw: pointer to the HW structure
560 * 699 *
@@ -575,6 +714,12 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
575 if (ret_val) 714 if (ret_val)
576 return ret_val; 715 return ret_val;
577 716
717 if (hw->mac.type == e1000_pchlan) {
718 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
719 if (ret_val)
720 return ret_val;
721 }
722
578 /* 723 /*
579 * Initialize the PHY from the NVM on ICH platforms. This 724 * Initialize the PHY from the NVM on ICH platforms. This
580 * is needed due to an issue where the NVM configuration is 725 * is needed due to an issue where the NVM configuration is
@@ -701,7 +846,7 @@ static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
701 phy->polarity_correction = (!(data & IFE_PSC_AUTO_POLARITY_DISABLE)); 846 phy->polarity_correction = (!(data & IFE_PSC_AUTO_POLARITY_DISABLE));
702 847
703 if (phy->polarity_correction) { 848 if (phy->polarity_correction) {
704 ret_val = e1000_check_polarity_ife_ich8lan(hw); 849 ret_val = phy->ops.check_polarity(hw);
705 if (ret_val) 850 if (ret_val)
706 return ret_val; 851 return ret_val;
707 } else { 852 } else {
@@ -741,6 +886,8 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
741 break; 886 break;
742 case e1000_phy_igp_3: 887 case e1000_phy_igp_3:
743 case e1000_phy_bm: 888 case e1000_phy_bm:
889 case e1000_phy_82578:
890 case e1000_phy_82577:
744 return e1000e_get_phy_info_igp(hw); 891 return e1000e_get_phy_info_igp(hw);
745 break; 892 break;
746 default: 893 default:
@@ -1852,6 +1999,79 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
1852} 1999}
1853 2000
1854/** 2001/**
2002 * e1000_id_led_init_pchlan - store LED configurations
2003 * @hw: pointer to the HW structure
2004 *
2005 * PCH does not control LEDs via the LEDCTL register, rather it uses
2006 * the PHY LED configuration register.
2007 *
2008 * PCH also does not have an "always on" or "always off" mode which
2009 * complicates the ID feature. Instead of using the "on" mode to indicate
2010 * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()),
2011 * use "link_up" mode. The LEDs will still ID on request if there is no
2012 * link based on logic in e1000_led_[on|off]_pchlan().
2013 **/
2014static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
2015{
2016 struct e1000_mac_info *mac = &hw->mac;
2017 s32 ret_val;
2018 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
2019 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
2020 u16 data, i, temp, shift;
2021
2022 /* Get default ID LED modes */
2023 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
2024 if (ret_val)
2025 goto out;
2026
2027 mac->ledctl_default = er32(LEDCTL);
2028 mac->ledctl_mode1 = mac->ledctl_default;
2029 mac->ledctl_mode2 = mac->ledctl_default;
2030
2031 for (i = 0; i < 4; i++) {
2032 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
2033 shift = (i * 5);
2034 switch (temp) {
2035 case ID_LED_ON1_DEF2:
2036 case ID_LED_ON1_ON2:
2037 case ID_LED_ON1_OFF2:
2038 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
2039 mac->ledctl_mode1 |= (ledctl_on << shift);
2040 break;
2041 case ID_LED_OFF1_DEF2:
2042 case ID_LED_OFF1_ON2:
2043 case ID_LED_OFF1_OFF2:
2044 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
2045 mac->ledctl_mode1 |= (ledctl_off << shift);
2046 break;
2047 default:
2048 /* Do nothing */
2049 break;
2050 }
2051 switch (temp) {
2052 case ID_LED_DEF1_ON2:
2053 case ID_LED_ON1_ON2:
2054 case ID_LED_OFF1_ON2:
2055 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
2056 mac->ledctl_mode2 |= (ledctl_on << shift);
2057 break;
2058 case ID_LED_DEF1_OFF2:
2059 case ID_LED_ON1_OFF2:
2060 case ID_LED_OFF1_OFF2:
2061 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
2062 mac->ledctl_mode2 |= (ledctl_off << shift);
2063 break;
2064 default:
2065 /* Do nothing */
2066 break;
2067 }
2068 }
2069
2070out:
2071 return ret_val;
2072}
2073
2074/**
1855 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width 2075 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
1856 * @hw: pointer to the HW structure 2076 * @hw: pointer to the HW structure
1857 * 2077 *
@@ -1960,6 +2180,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1960 kab |= E1000_KABGTXD_BGSQLBIAS; 2180 kab |= E1000_KABGTXD_BGSQLBIAS;
1961 ew32(KABGTXD, kab); 2181 ew32(KABGTXD, kab);
1962 2182
2183 if (hw->mac.type == e1000_pchlan)
2184 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2185
1963 return ret_val; 2186 return ret_val;
1964} 2187}
1965 2188
@@ -1985,7 +2208,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
1985 e1000_initialize_hw_bits_ich8lan(hw); 2208 e1000_initialize_hw_bits_ich8lan(hw);
1986 2209
1987 /* Initialize identification LED */ 2210 /* Initialize identification LED */
1988 ret_val = e1000e_id_led_init(hw); 2211 ret_val = mac->ops.id_led_init(hw);
1989 if (ret_val) { 2212 if (ret_val) {
1990 hw_dbg(hw, "Error initializing identification LED\n"); 2213 hw_dbg(hw, "Error initializing identification LED\n");
1991 return ret_val; 2214 return ret_val;
@@ -2031,6 +2254,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
2031 ew32(CTRL_EXT, ctrl_ext); 2254 ew32(CTRL_EXT, ctrl_ext);
2032 2255
2033 /* 2256 /*
2257 * The 82578 Rx buffer will stall if wakeup is enabled in host and
2258 * the ME. Reading the BM_WUC register will clear the host wakeup bit.
2259 * Reset the phy after disabling host wakeup to reset the Rx buffer.
2260 */
2261 if (hw->phy.type == e1000_phy_82578) {
2262 e1e_rphy(hw, BM_WUC, &i);
2263 e1000e_phy_hw_reset_generic(hw);
2264 }
2265
2266 /*
2034 * Clear all of the statistics registers (clear on read). It is 2267 * Clear all of the statistics registers (clear on read). It is
2035 * important that we do this after we have tried to establish link 2268 * important that we do this after we have tried to establish link
2036 * because the symbol error count will increment wildly if there 2269 * because the symbol error count will increment wildly if there
@@ -2054,6 +2287,9 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
2054 /* Extended Device Control */ 2287 /* Extended Device Control */
2055 reg = er32(CTRL_EXT); 2288 reg = er32(CTRL_EXT);
2056 reg |= (1 << 22); 2289 reg |= (1 << 22);
2290 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
2291 if (hw->mac.type >= e1000_pchlan)
2292 reg |= E1000_CTRL_EXT_PHYPDEN;
2057 ew32(CTRL_EXT, reg); 2293 ew32(CTRL_EXT, reg);
2058 2294
2059 /* Transmit Descriptor Control 0 */ 2295 /* Transmit Descriptor Control 0 */
@@ -2112,8 +2348,13 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
2112 * the default flow control setting, so we explicitly 2348 * the default flow control setting, so we explicitly
2113 * set it to full. 2349 * set it to full.
2114 */ 2350 */
2115 if (hw->fc.requested_mode == e1000_fc_default) 2351 if (hw->fc.requested_mode == e1000_fc_default) {
2116 hw->fc.requested_mode = e1000_fc_full; 2352 /* Workaround h/w hang when Tx flow control enabled */
2353 if (hw->mac.type == e1000_pchlan)
2354 hw->fc.requested_mode = e1000_fc_rx_pause;
2355 else
2356 hw->fc.requested_mode = e1000_fc_full;
2357 }
2117 2358
2118 /* 2359 /*
2119 * Save off the requested flow control mode for use later. Depending 2360 * Save off the requested flow control mode for use later. Depending
@@ -2130,6 +2371,14 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
2130 return ret_val; 2371 return ret_val;
2131 2372
2132 ew32(FCTTV, hw->fc.pause_time); 2373 ew32(FCTTV, hw->fc.pause_time);
2374 if ((hw->phy.type == e1000_phy_82578) ||
2375 (hw->phy.type == e1000_phy_82577)) {
2376 ret_val = hw->phy.ops.write_phy_reg(hw,
2377 PHY_REG(BM_PORT_CTRL_PAGE, 27),
2378 hw->fc.pause_time);
2379 if (ret_val)
2380 return ret_val;
2381 }
2133 2382
2134 return e1000e_set_fc_watermarks(hw); 2383 return e1000e_set_fc_watermarks(hw);
2135} 2384}
@@ -2169,18 +2418,26 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
2169 if (ret_val) 2418 if (ret_val)
2170 return ret_val; 2419 return ret_val;
2171 2420
2172 if (hw->phy.type == e1000_phy_igp_3) { 2421 switch (hw->phy.type) {
2422 case e1000_phy_igp_3:
2173 ret_val = e1000e_copper_link_setup_igp(hw); 2423 ret_val = e1000e_copper_link_setup_igp(hw);
2174 if (ret_val) 2424 if (ret_val)
2175 return ret_val; 2425 return ret_val;
2176 } else if (hw->phy.type == e1000_phy_bm) { 2426 break;
2427 case e1000_phy_bm:
2428 case e1000_phy_82578:
2177 ret_val = e1000e_copper_link_setup_m88(hw); 2429 ret_val = e1000e_copper_link_setup_m88(hw);
2178 if (ret_val) 2430 if (ret_val)
2179 return ret_val; 2431 return ret_val;
2180 } 2432 break;
2181 2433 case e1000_phy_82577:
2182 if (hw->phy.type == e1000_phy_ife) { 2434 ret_val = e1000_copper_link_setup_82577(hw);
2183 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data); 2435 if (ret_val)
2436 return ret_val;
2437 break;
2438 case e1000_phy_ife:
2439 ret_val = hw->phy.ops.read_phy_reg(hw, IFE_PHY_MDIX_CONTROL,
2440 &reg_data);
2184 if (ret_val) 2441 if (ret_val)
2185 return ret_val; 2442 return ret_val;
2186 2443
@@ -2198,9 +2455,13 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
2198 reg_data |= IFE_PMC_AUTO_MDIX; 2455 reg_data |= IFE_PMC_AUTO_MDIX;
2199 break; 2456 break;
2200 } 2457 }
2201 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); 2458 ret_val = hw->phy.ops.write_phy_reg(hw, IFE_PHY_MDIX_CONTROL,
2459 reg_data);
2202 if (ret_val) 2460 if (ret_val)
2203 return ret_val; 2461 return ret_val;
2462 break;
2463 default:
2464 break;
2204 } 2465 }
2205 return e1000e_setup_copper_link(hw); 2466 return e1000e_setup_copper_link(hw);
2206} 2467}
@@ -2417,18 +2678,26 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
2417 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation 2678 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
2418 * to a lower speed. 2679 * to a lower speed.
2419 * 2680 *
2420 * Should only be called for ICH9 and ICH10 devices. 2681 * Should only be called for applicable parts.
2421 **/ 2682 **/
2422void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 2683void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
2423{ 2684{
2424 u32 phy_ctrl; 2685 u32 phy_ctrl;
2425 2686
2426 if ((hw->mac.type == e1000_ich10lan) || 2687 switch (hw->mac.type) {
2427 (hw->mac.type == e1000_ich9lan)) { 2688 case e1000_ich9lan:
2689 case e1000_ich10lan:
2690 case e1000_pchlan:
2428 phy_ctrl = er32(PHY_CTRL); 2691 phy_ctrl = er32(PHY_CTRL);
2429 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | 2692 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
2430 E1000_PHY_CTRL_GBE_DISABLE; 2693 E1000_PHY_CTRL_GBE_DISABLE;
2431 ew32(PHY_CTRL, phy_ctrl); 2694 ew32(PHY_CTRL, phy_ctrl);
2695
2696 /* Workaround SWFLAG unexpectedly set during S0->Sx */
2697 if (hw->mac.type == e1000_pchlan)
2698 udelay(500);
2699 default:
2700 break;
2432 } 2701 }
2433 2702
2434 return; 2703 return;
@@ -2482,13 +2751,99 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
2482} 2751}
2483 2752
2484/** 2753/**
2754 * e1000_setup_led_pchlan - Configures SW controllable LED
2755 * @hw: pointer to the HW structure
2756 *
2757 * This prepares the SW controllable LED for use.
2758 **/
2759static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
2760{
2761 return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG,
2762 (u16)hw->mac.ledctl_mode1);
2763}
2764
2765/**
2766 * e1000_cleanup_led_pchlan - Restore the default LED operation
2767 * @hw: pointer to the HW structure
2768 *
2769 * Return the LED back to the default configuration.
2770 **/
2771static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
2772{
2773 return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG,
2774 (u16)hw->mac.ledctl_default);
2775}
2776
2777/**
2778 * e1000_led_on_pchlan - Turn LEDs on
2779 * @hw: pointer to the HW structure
2780 *
2781 * Turn on the LEDs.
2782 **/
2783static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
2784{
2785 u16 data = (u16)hw->mac.ledctl_mode2;
2786 u32 i, led;
2787
2788 /*
2789 * If no link, then turn LED on by setting the invert bit
2790 * for each LED that's mode is "link_up" in ledctl_mode2.
2791 */
2792 if (!(er32(STATUS) & E1000_STATUS_LU)) {
2793 for (i = 0; i < 3; i++) {
2794 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
2795 if ((led & E1000_PHY_LED0_MODE_MASK) !=
2796 E1000_LEDCTL_MODE_LINK_UP)
2797 continue;
2798 if (led & E1000_PHY_LED0_IVRT)
2799 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
2800 else
2801 data |= (E1000_PHY_LED0_IVRT << (i * 5));
2802 }
2803 }
2804
2805 return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data);
2806}
2807
2808/**
2809 * e1000_led_off_pchlan - Turn LEDs off
2810 * @hw: pointer to the HW structure
2811 *
2812 * Turn off the LEDs.
2813 **/
2814static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
2815{
2816 u16 data = (u16)hw->mac.ledctl_mode1;
2817 u32 i, led;
2818
2819 /*
2820 * If no link, then turn LED off by clearing the invert bit
2821 * for each LED that's mode is "link_up" in ledctl_mode1.
2822 */
2823 if (!(er32(STATUS) & E1000_STATUS_LU)) {
2824 for (i = 0; i < 3; i++) {
2825 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
2826 if ((led & E1000_PHY_LED0_MODE_MASK) !=
2827 E1000_LEDCTL_MODE_LINK_UP)
2828 continue;
2829 if (led & E1000_PHY_LED0_IVRT)
2830 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
2831 else
2832 data |= (E1000_PHY_LED0_IVRT << (i * 5));
2833 }
2834 }
2835
2836 return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data);
2837}
2838
2839/**
2485 * e1000_get_cfg_done_ich8lan - Read config done bit 2840 * e1000_get_cfg_done_ich8lan - Read config done bit
2486 * @hw: pointer to the HW structure 2841 * @hw: pointer to the HW structure
2487 * 2842 *
2488 * Read the management control register for the config done bit for 2843 * Read the management control register for the config done bit for
2489 * completion status. NOTE: silicon which is EEPROM-less will fail trying 2844 * completion status. NOTE: silicon which is EEPROM-less will fail trying
2490 * to read the config done bit, so an error is *ONLY* logged and returns 2845 * to read the config done bit, so an error is *ONLY* logged and returns
2491 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon 2846 * 0. If we were to return with error, EEPROM-less silicon
2492 * would not be able to be reset or change link. 2847 * would not be able to be reset or change link.
2493 **/ 2848 **/
2494static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) 2849static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
@@ -2498,7 +2853,8 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
2498 e1000e_get_cfg_done(hw); 2853 e1000e_get_cfg_done(hw);
2499 2854
2500 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 2855 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
2501 if (hw->mac.type != e1000_ich10lan) { 2856 if ((hw->mac.type != e1000_ich10lan) &&
2857 (hw->mac.type != e1000_pchlan)) {
2502 if (((er32(EECD) & E1000_EECD_PRES) == 0) && 2858 if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
2503 (hw->phy.type == e1000_phy_igp_3)) { 2859 (hw->phy.type == e1000_phy_igp_3)) {
2504 e1000e_phy_init_script_igp3(hw); 2860 e1000e_phy_init_script_igp3(hw);
@@ -2524,6 +2880,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
2524static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) 2880static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
2525{ 2881{
2526 u32 temp; 2882 u32 temp;
2883 u16 phy_data;
2527 2884
2528 e1000e_clear_hw_cntrs_base(hw); 2885 e1000e_clear_hw_cntrs_base(hw);
2529 2886
@@ -2541,22 +2898,42 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
2541 temp = er32(IAC); 2898 temp = er32(IAC);
2542 temp = er32(ICRXOC); 2899 temp = er32(ICRXOC);
2543 2900
2901 /* Clear PHY statistics registers */
2902 if ((hw->phy.type == e1000_phy_82578) ||
2903 (hw->phy.type == e1000_phy_82577)) {
2904 hw->phy.ops.read_phy_reg(hw, HV_SCC_UPPER, &phy_data);
2905 hw->phy.ops.read_phy_reg(hw, HV_SCC_LOWER, &phy_data);
2906 hw->phy.ops.read_phy_reg(hw, HV_ECOL_UPPER, &phy_data);
2907 hw->phy.ops.read_phy_reg(hw, HV_ECOL_LOWER, &phy_data);
2908 hw->phy.ops.read_phy_reg(hw, HV_MCC_UPPER, &phy_data);
2909 hw->phy.ops.read_phy_reg(hw, HV_MCC_LOWER, &phy_data);
2910 hw->phy.ops.read_phy_reg(hw, HV_LATECOL_UPPER, &phy_data);
2911 hw->phy.ops.read_phy_reg(hw, HV_LATECOL_LOWER, &phy_data);
2912 hw->phy.ops.read_phy_reg(hw, HV_COLC_UPPER, &phy_data);
2913 hw->phy.ops.read_phy_reg(hw, HV_COLC_LOWER, &phy_data);
2914 hw->phy.ops.read_phy_reg(hw, HV_DC_UPPER, &phy_data);
2915 hw->phy.ops.read_phy_reg(hw, HV_DC_LOWER, &phy_data);
2916 hw->phy.ops.read_phy_reg(hw, HV_TNCRS_UPPER, &phy_data);
2917 hw->phy.ops.read_phy_reg(hw, HV_TNCRS_LOWER, &phy_data);
2918 }
2544} 2919}
2545 2920
2546static struct e1000_mac_operations ich8_mac_ops = { 2921static struct e1000_mac_operations ich8_mac_ops = {
2922 .id_led_init = e1000e_id_led_init,
2547 .check_mng_mode = e1000_check_mng_mode_ich8lan, 2923 .check_mng_mode = e1000_check_mng_mode_ich8lan,
2548 .check_for_link = e1000e_check_for_copper_link, 2924 .check_for_link = e1000e_check_for_copper_link,
2549 .cleanup_led = e1000_cleanup_led_ich8lan, 2925 /* cleanup_led dependent on mac type */
2550 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, 2926 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
2551 .get_bus_info = e1000_get_bus_info_ich8lan, 2927 .get_bus_info = e1000_get_bus_info_ich8lan,
2552 .get_link_up_info = e1000_get_link_up_info_ich8lan, 2928 .get_link_up_info = e1000_get_link_up_info_ich8lan,
2553 .led_on = e1000_led_on_ich8lan, 2929 /* led_on dependent on mac type */
2554 .led_off = e1000_led_off_ich8lan, 2930 /* led_off dependent on mac type */
2555 .update_mc_addr_list = e1000e_update_mc_addr_list_generic, 2931 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
2556 .reset_hw = e1000_reset_hw_ich8lan, 2932 .reset_hw = e1000_reset_hw_ich8lan,
2557 .init_hw = e1000_init_hw_ich8lan, 2933 .init_hw = e1000_init_hw_ich8lan,
2558 .setup_link = e1000_setup_link_ich8lan, 2934 .setup_link = e1000_setup_link_ich8lan,
2559 .setup_physical_interface= e1000_setup_copper_link_ich8lan, 2935 .setup_physical_interface= e1000_setup_copper_link_ich8lan,
2936 /* id_led_init dependent on mac type */
2560}; 2937};
2561 2938
2562static struct e1000_phy_operations ich8_phy_ops = { 2939static struct e1000_phy_operations ich8_phy_ops = {
@@ -2595,6 +2972,7 @@ struct e1000_info e1000_ich8_info = {
2595 | FLAG_HAS_FLASH 2972 | FLAG_HAS_FLASH
2596 | FLAG_APME_IN_WUC, 2973 | FLAG_APME_IN_WUC,
2597 .pba = 8, 2974 .pba = 8,
2975 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
2598 .get_variants = e1000_get_variants_ich8lan, 2976 .get_variants = e1000_get_variants_ich8lan,
2599 .mac_ops = &ich8_mac_ops, 2977 .mac_ops = &ich8_mac_ops,
2600 .phy_ops = &ich8_phy_ops, 2978 .phy_ops = &ich8_phy_ops,
@@ -2613,6 +2991,7 @@ struct e1000_info e1000_ich9_info = {
2613 | FLAG_HAS_FLASH 2991 | FLAG_HAS_FLASH
2614 | FLAG_APME_IN_WUC, 2992 | FLAG_APME_IN_WUC,
2615 .pba = 10, 2993 .pba = 10,
2994 .max_hw_frame_size = DEFAULT_JUMBO,
2616 .get_variants = e1000_get_variants_ich8lan, 2995 .get_variants = e1000_get_variants_ich8lan,
2617 .mac_ops = &ich8_mac_ops, 2996 .mac_ops = &ich8_mac_ops,
2618 .phy_ops = &ich8_phy_ops, 2997 .phy_ops = &ich8_phy_ops,
@@ -2631,6 +3010,25 @@ struct e1000_info e1000_ich10_info = {
2631 | FLAG_HAS_FLASH 3010 | FLAG_HAS_FLASH
2632 | FLAG_APME_IN_WUC, 3011 | FLAG_APME_IN_WUC,
2633 .pba = 10, 3012 .pba = 10,
3013 .max_hw_frame_size = DEFAULT_JUMBO,
3014 .get_variants = e1000_get_variants_ich8lan,
3015 .mac_ops = &ich8_mac_ops,
3016 .phy_ops = &ich8_phy_ops,
3017 .nvm_ops = &ich8_nvm_ops,
3018};
3019
3020struct e1000_info e1000_pch_info = {
3021 .mac = e1000_pchlan,
3022 .flags = FLAG_IS_ICH
3023 | FLAG_HAS_WOL
3024 | FLAG_RX_CSUM_ENABLED
3025 | FLAG_HAS_CTRLEXT_ON_LOAD
3026 | FLAG_HAS_AMT
3027 | FLAG_HAS_FLASH
3028 | FLAG_HAS_JUMBO_FRAMES
3029 | FLAG_APME_IN_WUC,
3030 .pba = 26,
3031 .max_hw_frame_size = 4096,
2634 .get_variants = e1000_get_variants_ich8lan, 3032 .get_variants = e1000_get_variants_ich8lan,
2635 .mac_ops = &ich8_mac_ops, 3033 .mac_ops = &ich8_mac_ops,
2636 .phy_ops = &ich8_phy_ops, 3034 .phy_ops = &ich8_phy_ops,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 18a4f5902f3b..be6d9e990374 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -378,6 +378,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
378 378
379 mac->get_link_status = 0; 379 mac->get_link_status = 0;
380 380
381 if (hw->phy.type == e1000_phy_82578) {
382 ret_val = e1000_link_stall_workaround_hv(hw);
383 if (ret_val)
384 return ret_val;
385 }
386
381 /* 387 /*
382 * Check if there was DownShift, must be checked 388 * Check if there was DownShift, must be checked
383 * immediately after link-up 389 * immediately after link-up
@@ -1406,6 +1412,38 @@ s32 e1000e_id_led_init(struct e1000_hw *hw)
1406} 1412}
1407 1413
1408/** 1414/**
1415 * e1000e_setup_led_generic - Configures SW controllable LED
1416 * @hw: pointer to the HW structure
1417 *
1418 * This prepares the SW controllable LED for use and saves the current state
1419 * of the LED so it can be later restored.
1420 **/
1421s32 e1000e_setup_led_generic(struct e1000_hw *hw)
1422{
1423 u32 ledctl;
1424
1425 if (hw->mac.ops.setup_led != e1000e_setup_led_generic) {
1426 return -E1000_ERR_CONFIG;
1427 }
1428
1429 if (hw->phy.media_type == e1000_media_type_fiber) {
1430 ledctl = er32(LEDCTL);
1431 hw->mac.ledctl_default = ledctl;
1432 /* Turn off LED0 */
1433 ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
1434 E1000_LEDCTL_LED0_BLINK |
1435 E1000_LEDCTL_LED0_MODE_MASK);
1436 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
1437 E1000_LEDCTL_LED0_MODE_SHIFT);
1438 ew32(LEDCTL, ledctl);
1439 } else if (hw->phy.media_type == e1000_media_type_copper) {
1440 ew32(LEDCTL, hw->mac.ledctl_mode1);
1441 }
1442
1443 return 0;
1444}
1445
1446/**
1409 * e1000e_cleanup_led_generic - Set LED config to default operation 1447 * e1000e_cleanup_led_generic - Set LED config to default operation
1410 * @hw: pointer to the HW structure 1448 * @hw: pointer to the HW structure
1411 * 1449 *
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ccaaee0951cf..677f60490f67 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -48,7 +48,7 @@
48 48
49#include "e1000.h" 49#include "e1000.h"
50 50
51#define DRV_VERSION "0.3.3.4-k4" 51#define DRV_VERSION "1.0.2-k2"
52char e1000e_driver_name[] = "e1000e"; 52char e1000e_driver_name[] = "e1000e";
53const char e1000e_driver_version[] = DRV_VERSION; 53const char e1000e_driver_version[] = DRV_VERSION;
54 54
@@ -62,6 +62,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
62 [board_ich8lan] = &e1000_ich8_info, 62 [board_ich8lan] = &e1000_ich8_info,
63 [board_ich9lan] = &e1000_ich9_info, 63 [board_ich9lan] = &e1000_ich9_info,
64 [board_ich10lan] = &e1000_ich10_info, 64 [board_ich10lan] = &e1000_ich10_info,
65 [board_pchlan] = &e1000_pch_info,
65}; 66};
66 67
67#ifdef DEBUG 68#ifdef DEBUG
@@ -2255,8 +2256,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2255 ew32(TARC(1), tarc); 2256 ew32(TARC(1), tarc);
2256 } 2257 }
2257 2258
2258 e1000e_config_collision_dist(hw);
2259
2260 /* Setup Transmit Descriptor Settings for eop descriptor */ 2259 /* Setup Transmit Descriptor Settings for eop descriptor */
2261 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 2260 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2262 2261
@@ -2269,6 +2268,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2269 2268
2270 ew32(TCTL, tctl); 2269 ew32(TCTL, tctl);
2271 2270
2271 e1000e_config_collision_dist(hw);
2272
2272 adapter->tx_queue_len = adapter->netdev->tx_queue_len; 2273 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
2273} 2274}
2274 2275
@@ -2308,6 +2309,23 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2308 if (adapter->flags2 & FLAG2_CRC_STRIPPING) 2309 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2309 rctl |= E1000_RCTL_SECRC; 2310 rctl |= E1000_RCTL_SECRC;
2310 2311
2312 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2313 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2314 u16 phy_data;
2315
2316 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2317 phy_data &= 0xfff8;
2318 phy_data |= (1 << 2);
2319 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2320
2321 e1e_rphy(hw, 22, &phy_data);
2322 phy_data &= 0x0fff;
2323 phy_data |= (1 << 14);
2324 e1e_wphy(hw, 0x10, 0x2823);
2325 e1e_wphy(hw, 0x11, 0x0003);
2326 e1e_wphy(hw, 22, phy_data);
2327 }
2328
2311 /* Setup buffer sizes */ 2329 /* Setup buffer sizes */
2312 rctl &= ~E1000_RCTL_SZ_4096; 2330 rctl &= ~E1000_RCTL_SZ_4096;
2313 rctl |= E1000_RCTL_BSEX; 2331 rctl |= E1000_RCTL_BSEX;
@@ -2751,23 +2769,25 @@ void e1000e_reset(struct e1000_adapter *adapter)
2751 /* 2769 /*
2752 * flow control settings 2770 * flow control settings
2753 * 2771 *
2754 * The high water mark must be low enough to fit one full frame 2772 * The high water mark must be low enough to fit two full frame
2755 * (or the size used for early receive) above it in the Rx FIFO. 2773 * (or the size used for early receive) above it in the Rx FIFO.
2756 * Set it to the lower of: 2774 * Set it to the lower of:
2757 * - 90% of the Rx FIFO size, and 2775 * - 90% of the Rx FIFO size, and
2758 * - the full Rx FIFO size minus the early receive size (for parts 2776 * - the full Rx FIFO size minus the early receive size (for parts
2759 * with ERT support assuming ERT set to E1000_ERT_2048), or 2777 * with ERT support assuming ERT set to E1000_ERT_2048), or
2760 * - the full Rx FIFO size minus one full frame 2778 * - the full Rx FIFO size minus two full frames
2761 */ 2779 */
2762 if (adapter->flags & FLAG_HAS_ERT) 2780 if ((adapter->flags & FLAG_HAS_ERT) &&
2781 (adapter->netdev->mtu > ETH_DATA_LEN))
2763 hwm = min(((pba << 10) * 9 / 10), 2782 hwm = min(((pba << 10) * 9 / 10),
2764 ((pba << 10) - (E1000_ERT_2048 << 3))); 2783 ((pba << 10) - (E1000_ERT_2048 << 3)));
2765 else 2784 else
2766 hwm = min(((pba << 10) * 9 / 10), 2785 hwm = min(((pba << 10) * 9 / 10),
2767 ((pba << 10) - adapter->max_frame_size)); 2786 ((pba << 10) - (2 * adapter->max_frame_size)));
2768 2787
2769 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 2788 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
2770 fc->low_water = fc->high_water - 8; 2789 fc->low_water = (fc->high_water - (2 * adapter->max_frame_size));
2790 fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */
2771 2791
2772 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 2792 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
2773 fc->pause_time = 0xFFFF; 2793 fc->pause_time = 0xFFFF;
@@ -2787,6 +2807,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
2787 e1000_get_hw_control(adapter); 2807 e1000_get_hw_control(adapter);
2788 2808
2789 ew32(WUC, 0); 2809 ew32(WUC, 0);
2810 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
2811 e1e_wphy(&adapter->hw, BM_WUC, 0);
2790 2812
2791 if (mac->ops.init_hw(hw)) 2813 if (mac->ops.init_hw(hw))
2792 e_err("Hardware Error\n"); 2814 e_err("Hardware Error\n");
@@ -2799,7 +2821,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
2799 e1000e_reset_adaptive(hw); 2821 e1000e_reset_adaptive(hw);
2800 e1000_get_phy_info(hw); 2822 e1000_get_phy_info(hw);
2801 2823
2802 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { 2824 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
2825 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
2803 u16 phy_data = 0; 2826 u16 phy_data = 0;
2804 /* 2827 /*
2805 * speed up time to link by disabling smart power down, ignore 2828 * speed up time to link by disabling smart power down, ignore
@@ -3266,6 +3289,7 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3266{ 3289{
3267 struct e1000_hw *hw = &adapter->hw; 3290 struct e1000_hw *hw = &adapter->hw;
3268 struct pci_dev *pdev = adapter->pdev; 3291 struct pci_dev *pdev = adapter->pdev;
3292 u16 phy_data;
3269 3293
3270 /* 3294 /*
3271 * Prevent stats update while adapter is being reset, or if the pci 3295 * Prevent stats update while adapter is being reset, or if the pci
@@ -3285,11 +3309,34 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3285 adapter->stats.roc += er32(ROC); 3309 adapter->stats.roc += er32(ROC);
3286 3310
3287 adapter->stats.mpc += er32(MPC); 3311 adapter->stats.mpc += er32(MPC);
3288 adapter->stats.scc += er32(SCC); 3312 if ((hw->phy.type == e1000_phy_82578) ||
3289 adapter->stats.ecol += er32(ECOL); 3313 (hw->phy.type == e1000_phy_82577)) {
3290 adapter->stats.mcc += er32(MCC); 3314 e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
3291 adapter->stats.latecol += er32(LATECOL); 3315 e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
3292 adapter->stats.dc += er32(DC); 3316 adapter->stats.scc += phy_data;
3317
3318 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
3319 e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
3320 adapter->stats.ecol += phy_data;
3321
3322 e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
3323 e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
3324 adapter->stats.mcc += phy_data;
3325
3326 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
3327 e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
3328 adapter->stats.latecol += phy_data;
3329
3330 e1e_rphy(hw, HV_DC_UPPER, &phy_data);
3331 e1e_rphy(hw, HV_DC_LOWER, &phy_data);
3332 adapter->stats.dc += phy_data;
3333 } else {
3334 adapter->stats.scc += er32(SCC);
3335 adapter->stats.ecol += er32(ECOL);
3336 adapter->stats.mcc += er32(MCC);
3337 adapter->stats.latecol += er32(LATECOL);
3338 adapter->stats.dc += er32(DC);
3339 }
3293 adapter->stats.xonrxc += er32(XONRXC); 3340 adapter->stats.xonrxc += er32(XONRXC);
3294 adapter->stats.xontxc += er32(XONTXC); 3341 adapter->stats.xontxc += er32(XONTXC);
3295 adapter->stats.xoffrxc += er32(XOFFRXC); 3342 adapter->stats.xoffrxc += er32(XOFFRXC);
@@ -3307,13 +3354,28 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3307 3354
3308 hw->mac.tx_packet_delta = er32(TPT); 3355 hw->mac.tx_packet_delta = er32(TPT);
3309 adapter->stats.tpt += hw->mac.tx_packet_delta; 3356 adapter->stats.tpt += hw->mac.tx_packet_delta;
3310 hw->mac.collision_delta = er32(COLC); 3357 if ((hw->phy.type == e1000_phy_82578) ||
3358 (hw->phy.type == e1000_phy_82577)) {
3359 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3360 e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
3361 hw->mac.collision_delta = phy_data;
3362 } else {
3363 hw->mac.collision_delta = er32(COLC);
3364 }
3311 adapter->stats.colc += hw->mac.collision_delta; 3365 adapter->stats.colc += hw->mac.collision_delta;
3312 3366
3313 adapter->stats.algnerrc += er32(ALGNERRC); 3367 adapter->stats.algnerrc += er32(ALGNERRC);
3314 adapter->stats.rxerrc += er32(RXERRC); 3368 adapter->stats.rxerrc += er32(RXERRC);
3315 if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583)) 3369 if ((hw->phy.type == e1000_phy_82578) ||
3316 adapter->stats.tncrs += er32(TNCRS); 3370 (hw->phy.type == e1000_phy_82577)) {
3371 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3372 e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
3373 adapter->stats.tncrs += phy_data;
3374 } else {
3375 if ((hw->mac.type != e1000_82574) &&
3376 (hw->mac.type != e1000_82583))
3377 adapter->stats.tncrs += er32(TNCRS);
3378 }
3317 adapter->stats.cexterr += er32(CEXTERR); 3379 adapter->stats.cexterr += er32(CEXTERR);
3318 adapter->stats.tsctc += er32(TSCTC); 3380 adapter->stats.tsctc += er32(TSCTC);
3319 adapter->stats.tsctfc += er32(TSCTFC); 3381 adapter->stats.tsctfc += er32(TSCTFC);
@@ -3854,7 +3916,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3854 buffer_info->length = size; 3916 buffer_info->length = size;
3855 buffer_info->time_stamp = jiffies; 3917 buffer_info->time_stamp = jiffies;
3856 buffer_info->next_to_watch = i; 3918 buffer_info->next_to_watch = i;
3857 buffer_info->dma = map[0] + offset; 3919 buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
3858 count++; 3920 count++;
3859 3921
3860 len -= size; 3922 len -= size;
@@ -3885,7 +3947,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3885 buffer_info->length = size; 3947 buffer_info->length = size;
3886 buffer_info->time_stamp = jiffies; 3948 buffer_info->time_stamp = jiffies;
3887 buffer_info->next_to_watch = i; 3949 buffer_info->next_to_watch = i;
3888 buffer_info->dma = map[f + 1] + offset; 3950 buffer_info->dma = map[f] + offset;
3889 3951
3890 len -= size; 3952 len -= size;
3891 offset += size; 3953 offset += size;
@@ -4149,7 +4211,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4149 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); 4211 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
4150 if (count) { 4212 if (count) {
4151 e1000_tx_queue(adapter, tx_flags, count); 4213 e1000_tx_queue(adapter, tx_flags, count);
4152 netdev->trans_start = jiffies;
4153 /* Make sure there is space in the ring for the next send. */ 4214 /* Make sure there is space in the ring for the next send. */
4154 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); 4215 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4155 4216
@@ -4210,27 +4271,17 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4210 struct e1000_adapter *adapter = netdev_priv(netdev); 4271 struct e1000_adapter *adapter = netdev_priv(netdev);
4211 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 4272 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4212 4273
4213 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || 4274 /* Jumbo frame support */
4214 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 4275 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
4215 e_err("Invalid MTU setting\n"); 4276 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
4277 e_err("Jumbo Frames not supported.\n");
4216 return -EINVAL; 4278 return -EINVAL;
4217 } 4279 }
4218 4280
4219 /* Jumbo frame size limits */ 4281 /* Supported frame sizes */
4220 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 4282 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
4221 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 4283 (max_frame > adapter->max_hw_frame_size)) {
4222 e_err("Jumbo Frames not supported.\n"); 4284 e_err("Unsupported MTU setting\n");
4223 return -EINVAL;
4224 }
4225 if (adapter->hw.phy.type == e1000_phy_ife) {
4226 e_err("Jumbo Frames not supported.\n");
4227 return -EINVAL;
4228 }
4229 }
4230
4231#define MAX_STD_JUMBO_FRAME_SIZE 9234
4232 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
4233 e_err("MTU > 9216 not supported.\n");
4234 return -EINVAL; 4285 return -EINVAL;
4235 } 4286 }
4236 4287
@@ -4350,6 +4401,81 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4350 } 4401 }
4351} 4402}
4352 4403
4404static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4405{
4406 struct e1000_hw *hw = &adapter->hw;
4407 u32 i, mac_reg;
4408 u16 phy_reg;
4409 int retval = 0;
4410
4411 /* copy MAC RARs to PHY RARs */
4412 for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4413 mac_reg = er32(RAL(i));
4414 e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
4415 e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
4416 mac_reg = er32(RAH(i));
4417 e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
4418 e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
4419 }
4420
4421 /* copy MAC MTA to PHY MTA */
4422 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4423 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4424 e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
4425 e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
4426 }
4427
4428 /* configure PHY Rx Control register */
4429 e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
4430 mac_reg = er32(RCTL);
4431 if (mac_reg & E1000_RCTL_UPE)
4432 phy_reg |= BM_RCTL_UPE;
4433 if (mac_reg & E1000_RCTL_MPE)
4434 phy_reg |= BM_RCTL_MPE;
4435 phy_reg &= ~(BM_RCTL_MO_MASK);
4436 if (mac_reg & E1000_RCTL_MO_3)
4437 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4438 << BM_RCTL_MO_SHIFT);
4439 if (mac_reg & E1000_RCTL_BAM)
4440 phy_reg |= BM_RCTL_BAM;
4441 if (mac_reg & E1000_RCTL_PMCF)
4442 phy_reg |= BM_RCTL_PMCF;
4443 mac_reg = er32(CTRL);
4444 if (mac_reg & E1000_CTRL_RFCE)
4445 phy_reg |= BM_RCTL_RFCE;
4446 e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
4447
4448 /* enable PHY wakeup in MAC register */
4449 ew32(WUFC, wufc);
4450 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4451
4452 /* configure and enable PHY wakeup in PHY registers */
4453 e1e_wphy(&adapter->hw, BM_WUFC, wufc);
4454 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4455
4456 /* activate PHY wakeup */
4457 retval = hw->phy.ops.acquire_phy(hw);
4458 if (retval) {
4459 e_err("Could not acquire PHY\n");
4460 return retval;
4461 }
4462 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4463 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4464 retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
4465 if (retval) {
4466 e_err("Could not read PHY page 769\n");
4467 goto out;
4468 }
4469 phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4470 retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
4471 if (retval)
4472 e_err("Could not set PHY Host Wakeup bit\n");
4473out:
4474 hw->phy.ops.release_phy(hw);
4475
4476 return retval;
4477}
4478
4353static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 4479static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4354{ 4480{
4355 struct net_device *netdev = pci_get_drvdata(pdev); 4481 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4392,8 +4518,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4392 #define E1000_CTRL_ADVD3WUC 0x00100000 4518 #define E1000_CTRL_ADVD3WUC 0x00100000
4393 /* phy power management enable */ 4519 /* phy power management enable */
4394 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 4520 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4395 ctrl |= E1000_CTRL_ADVD3WUC | 4521 ctrl |= E1000_CTRL_ADVD3WUC;
4396 E1000_CTRL_EN_PHY_PWR_MGMT; 4522 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
4523 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
4397 ew32(CTRL, ctrl); 4524 ew32(CTRL, ctrl);
4398 4525
4399 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 4526 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
@@ -4411,8 +4538,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4411 /* Allow time for pending master requests to run */ 4538 /* Allow time for pending master requests to run */
4412 e1000e_disable_pcie_master(&adapter->hw); 4539 e1000e_disable_pcie_master(&adapter->hw);
4413 4540
4414 ew32(WUC, E1000_WUC_PME_EN); 4541 if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) &&
4415 ew32(WUFC, wufc); 4542 !(hw->mac.ops.check_mng_mode(hw))) {
4543 /* enable wakeup by the PHY */
4544 retval = e1000_init_phy_wakeup(adapter, wufc);
4545 if (retval)
4546 return retval;
4547 } else {
4548 /* enable wakeup by the MAC */
4549 ew32(WUFC, wufc);
4550 ew32(WUC, E1000_WUC_PME_EN);
4551 }
4416 } else { 4552 } else {
4417 ew32(WUC, 0); 4553 ew32(WUC, 0);
4418 ew32(WUFC, 0); 4554 ew32(WUFC, 0);
@@ -4555,8 +4691,37 @@ static int e1000_resume(struct pci_dev *pdev)
4555 } 4691 }
4556 4692
4557 e1000e_power_up_phy(adapter); 4693 e1000e_power_up_phy(adapter);
4694
4695 /* report the system wakeup cause from S3/S4 */
4696 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
4697 u16 phy_data;
4698
4699 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
4700 if (phy_data) {
4701 e_info("PHY Wakeup cause - %s\n",
4702 phy_data & E1000_WUS_EX ? "Unicast Packet" :
4703 phy_data & E1000_WUS_MC ? "Multicast Packet" :
4704 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
4705 phy_data & E1000_WUS_MAG ? "Magic Packet" :
4706 phy_data & E1000_WUS_LNKC ? "Link Status "
4707 " Change" : "other");
4708 }
4709 e1e_wphy(&adapter->hw, BM_WUS, ~0);
4710 } else {
4711 u32 wus = er32(WUS);
4712 if (wus) {
4713 e_info("MAC Wakeup cause - %s\n",
4714 wus & E1000_WUS_EX ? "Unicast Packet" :
4715 wus & E1000_WUS_MC ? "Multicast Packet" :
4716 wus & E1000_WUS_BC ? "Broadcast Packet" :
4717 wus & E1000_WUS_MAG ? "Magic Packet" :
4718 wus & E1000_WUS_LNKC ? "Link Status Change" :
4719 "other");
4720 }
4721 ew32(WUS, ~0);
4722 }
4723
4558 e1000e_reset(adapter); 4724 e1000e_reset(adapter);
4559 ew32(WUS, ~0);
4560 4725
4561 e1000_init_manageability(adapter); 4726 e1000_init_manageability(adapter);
4562 4727
@@ -4846,6 +5011,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4846 adapter->flags2 = ei->flags2; 5011 adapter->flags2 = ei->flags2;
4847 adapter->hw.adapter = adapter; 5012 adapter->hw.adapter = adapter;
4848 adapter->hw.mac.type = ei->mac; 5013 adapter->hw.mac.type = ei->mac;
5014 adapter->max_hw_frame_size = ei->max_hw_frame_size;
4849 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 5015 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
4850 5016
4851 mmio_start = pci_resource_start(pdev, 0); 5017 mmio_start = pci_resource_start(pdev, 0);
@@ -5001,6 +5167,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5001 /* APME bit in EEPROM is mapped to WUC.APME */ 5167 /* APME bit in EEPROM is mapped to WUC.APME */
5002 eeprom_data = er32(WUC); 5168 eeprom_data = er32(WUC);
5003 eeprom_apme_mask = E1000_WUC_APME; 5169 eeprom_apme_mask = E1000_WUC_APME;
5170 if (eeprom_data & E1000_WUC_PHY_WAKE)
5171 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
5004 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 5172 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
5005 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 5173 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
5006 (adapter->hw.bus.func == 1)) 5174 (adapter->hw.bus.func == 1))
@@ -5202,6 +5370,11 @@ static struct pci_device_id e1000_pci_tbl[] = {
5202 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, 5370 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5203 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, 5371 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5204 5372
5373 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
5374 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
5375 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
5376 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
5377
5205 { } /* terminate list */ 5378 { } /* terminate list */
5206}; 5379};
5207MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 5380MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index e909f96698e8..1342e0b1815c 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -427,6 +427,8 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
427 e1000_validate_option(&crc_stripping, &opt, adapter); 427 e1000_validate_option(&crc_stripping, &opt, adapter);
428 if (crc_stripping == OPTION_ENABLED) 428 if (crc_stripping == OPTION_ENABLED)
429 adapter->flags2 |= FLAG2_CRC_STRIPPING; 429 adapter->flags2 |= FLAG2_CRC_STRIPPING;
430 } else {
431 adapter->flags2 |= FLAG2_CRC_STRIPPING;
430 } 432 }
431 } 433 }
432 { /* Kumeran Lock Loss Workaround */ 434 { /* Kumeran Lock Loss Workaround */
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index dc4a9cba6a73..e23459cf3d0e 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -37,6 +37,9 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw);
37static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); 37static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
38static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, 38static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
39 u16 *data, bool read); 39 u16 *data, bool read);
40static u32 e1000_get_phy_addr_for_hv_page(u32 page);
41static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
42 u16 *data, bool read);
40 43
41/* Cable length tables */ 44/* Cable length tables */
42static const u16 e1000_m88_cable_length_table[] = 45static const u16 e1000_m88_cable_length_table[] =
@@ -54,6 +57,55 @@ static const u16 e1000_igp_2_cable_length_table[] =
54#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ 57#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
55 ARRAY_SIZE(e1000_igp_2_cable_length_table) 58 ARRAY_SIZE(e1000_igp_2_cable_length_table)
56 59
60#define BM_PHY_REG_PAGE(offset) \
61 ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
62#define BM_PHY_REG_NUM(offset) \
63 ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
64 (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
65 ~MAX_PHY_REG_ADDRESS)))
66
67#define HV_INTC_FC_PAGE_START 768
68#define I82578_ADDR_REG 29
69#define I82577_ADDR_REG 16
70#define I82577_CFG_REG 22
71#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
72#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
73#define I82577_CTRL_REG 23
74#define I82577_CTRL_DOWNSHIFT_MASK (7 << 10)
75
76/* 82577 specific PHY registers */
77#define I82577_PHY_CTRL_2 18
78#define I82577_PHY_STATUS_2 26
79#define I82577_PHY_DIAG_STATUS 31
80
81/* I82577 PHY Status 2 */
82#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
83#define I82577_PHY_STATUS2_MDIX 0x0800
84#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
85#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
86
87/* I82577 PHY Control 2 */
88#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
89#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
90
91/* I82577 PHY Diagnostics Status */
92#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
93#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
94
95/* BM PHY Copper Specific Control 1 */
96#define BM_CS_CTRL1 16
97
98/* BM PHY Copper Specific Status */
99#define BM_CS_STATUS 17
100#define BM_CS_STATUS_LINK_UP 0x0400
101#define BM_CS_STATUS_RESOLVED 0x0800
102#define BM_CS_STATUS_SPEED_MASK 0xC000
103#define BM_CS_STATUS_SPEED_1000 0x8000
104
105#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
106#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
107#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
108
57/** 109/**
58 * e1000e_check_reset_block_generic - Check if PHY reset is blocked 110 * e1000e_check_reset_block_generic - Check if PHY reset is blocked
59 * @hw: pointer to the HW structure 111 * @hw: pointer to the HW structure
@@ -82,23 +134,48 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
82s32 e1000e_get_phy_id(struct e1000_hw *hw) 134s32 e1000e_get_phy_id(struct e1000_hw *hw)
83{ 135{
84 struct e1000_phy_info *phy = &hw->phy; 136 struct e1000_phy_info *phy = &hw->phy;
85 s32 ret_val; 137 s32 ret_val = 0;
86 u16 phy_id; 138 u16 phy_id;
139 u16 retry_count = 0;
87 140
88 ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); 141 if (!(phy->ops.read_phy_reg))
89 if (ret_val) 142 goto out;
90 return ret_val;
91 143
92 phy->id = (u32)(phy_id << 16); 144 while (retry_count < 2) {
93 udelay(20); 145 ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
94 ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); 146 if (ret_val)
95 if (ret_val) 147 goto out;
96 return ret_val;
97 148
98 phy->id |= (u32)(phy_id & PHY_REVISION_MASK); 149 phy->id = (u32)(phy_id << 16);
99 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); 150 udelay(20);
151 ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
152 if (ret_val)
153 goto out;
100 154
101 return 0; 155 phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
156 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
157
158 if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
159 goto out;
160
161 /*
162 * If the PHY ID is still unknown, we may have an 82577i
163 * without link. We will try again after setting Slow
164 * MDIC mode. No harm in trying again in this case since
165 * the PHY ID is unknown at this point anyway
166 */
167 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
168 if (ret_val)
169 goto out;
170
171 retry_count++;
172 }
173out:
174 /* Revert to MDIO fast mode, if applicable */
175 if (retry_count)
176 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
177
178 return ret_val;
102} 179}
103 180
104/** 181/**
@@ -410,6 +487,43 @@ s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
410} 487}
411 488
412/** 489/**
490 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
491 * @hw: pointer to the HW structure
492 *
493 * Sets up Carrier-sense on Transmit and downshift values.
494 **/
495s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
496{
497 struct e1000_phy_info *phy = &hw->phy;
498 s32 ret_val;
499 u16 phy_data;
500
501 /* Enable CRS on TX. This must be set for half-duplex operation. */
502 ret_val = phy->ops.read_phy_reg(hw, I82577_CFG_REG, &phy_data);
503 if (ret_val)
504 goto out;
505
506 phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
507
508 /* Enable downshift */
509 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
510
511 ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data);
512 if (ret_val)
513 goto out;
514
515 /* Set number of link attempts before downshift */
516 ret_val = phy->ops.read_phy_reg(hw, I82577_CTRL_REG, &phy_data);
517 if (ret_val)
518 goto out;
519 phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK;
520 ret_val = phy->ops.write_phy_reg(hw, I82577_CTRL_REG, phy_data);
521
522out:
523 return ret_val;
524}
525
526/**
413 * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link 527 * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link
414 * @hw: pointer to the HW structure 528 * @hw: pointer to the HW structure
415 * 529 *
@@ -427,8 +541,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
427 if (ret_val) 541 if (ret_val)
428 return ret_val; 542 return ret_val;
429 543
430 /* For newer PHYs this bit is downshift enable */ 544 /* For BM PHY this bit is downshift enable */
431 if (phy->type == e1000_phy_m88) 545 if (phy->type != e1000_phy_bm)
432 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 546 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
433 547
434 /* 548 /*
@@ -520,10 +634,27 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
520 634
521 /* Commit the changes. */ 635 /* Commit the changes. */
522 ret_val = e1000e_commit_phy(hw); 636 ret_val = e1000e_commit_phy(hw);
523 if (ret_val) 637 if (ret_val) {
524 hw_dbg(hw, "Error committing the PHY changes\n"); 638 hw_dbg(hw, "Error committing the PHY changes\n");
639 return ret_val;
640 }
525 641
526 return ret_val; 642 if (phy->type == e1000_phy_82578) {
643 ret_val = phy->ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
644 &phy_data);
645 if (ret_val)
646 return ret_val;
647
648 /* 82578 PHY - set the downshift count to 1x. */
649 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
650 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
651 ret_val = phy->ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
652 phy_data);
653 if (ret_val)
654 return ret_val;
655 }
656
657 return 0;
527} 658}
528 659
529/** 660/**
@@ -1251,6 +1382,8 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
1251 switch (phy->type) { 1382 switch (phy->type) {
1252 case e1000_phy_m88: 1383 case e1000_phy_m88:
1253 case e1000_phy_gg82563: 1384 case e1000_phy_gg82563:
1385 case e1000_phy_82578:
1386 case e1000_phy_82577:
1254 offset = M88E1000_PHY_SPEC_STATUS; 1387 offset = M88E1000_PHY_SPEC_STATUS;
1255 mask = M88E1000_PSSR_DOWNSHIFT; 1388 mask = M88E1000_PSSR_DOWNSHIFT;
1256 break; 1389 break;
@@ -1886,6 +2019,12 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
1886 case BME1000_E_PHY_ID_R2: 2019 case BME1000_E_PHY_ID_R2:
1887 phy_type = e1000_phy_bm; 2020 phy_type = e1000_phy_bm;
1888 break; 2021 break;
2022 case I82578_E_PHY_ID:
2023 phy_type = e1000_phy_82578;
2024 break;
2025 case I82577_E_PHY_ID:
2026 phy_type = e1000_phy_82577;
2027 break;
1889 default: 2028 default:
1890 phy_type = e1000_phy_unknown; 2029 phy_type = e1000_phy_unknown;
1891 break; 2030 break;
@@ -2181,11 +2320,16 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2181 u16 *data, bool read) 2320 u16 *data, bool read)
2182{ 2321{
2183 s32 ret_val; 2322 s32 ret_val;
2184 u16 reg = ((u16)offset) & PHY_REG_MASK; 2323 u16 reg = BM_PHY_REG_NUM(offset);
2185 u16 phy_reg = 0; 2324 u16 phy_reg = 0;
2186 u8 phy_acquired = 1; 2325 u8 phy_acquired = 1;
2187 2326
2188 2327
2328 /* Gig must be disabled for MDIO accesses to page 800 */
2329 if ((hw->mac.type == e1000_pchlan) &&
2330 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
2331 hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
2332
2189 ret_val = hw->phy.ops.acquire_phy(hw); 2333 ret_val = hw->phy.ops.acquire_phy(hw);
2190 if (ret_val) { 2334 if (ret_val) {
2191 phy_acquired = 0; 2335 phy_acquired = 0;
@@ -2289,3 +2433,524 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
2289 2433
2290 return 0; 2434 return 0;
2291} 2435}
2436
2437s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
2438{
2439 s32 ret_val = 0;
2440 u16 data = 0;
2441
2442 ret_val = hw->phy.ops.acquire_phy(hw);
2443 if (ret_val)
2444 return ret_val;
2445
2446 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
2447 hw->phy.addr = 1;
2448 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2449 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
2450 if (ret_val) {
2451 hw->phy.ops.release_phy(hw);
2452 return ret_val;
2453 }
2454 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
2455 (0x2180 | (slow << 10)));
2456
2457 /* dummy read when reverting to fast mode - throw away result */
2458 if (!slow)
2459 e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
2460
2461 hw->phy.ops.release_phy(hw);
2462
2463 return ret_val;
2464}
2465
2466/**
2467 * e1000_read_phy_reg_hv - Read HV PHY register
2468 * @hw: pointer to the HW structure
2469 * @offset: register offset to be read
2470 * @data: pointer to the read data
2471 *
2472 * Acquires semaphore, if necessary, then reads the PHY register at offset
2473 * and storing the retrieved information in data. Release any acquired
2474 * semaphore before exiting.
2475 **/
2476s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2477{
2478 s32 ret_val;
2479 u16 page = BM_PHY_REG_PAGE(offset);
2480 u16 reg = BM_PHY_REG_NUM(offset);
2481 bool in_slow_mode = false;
2482
2483 /* Workaround failure in MDIO access while cable is disconnected */
2484 if ((hw->phy.type == e1000_phy_82577) &&
2485 !(er32(STATUS) & E1000_STATUS_LU)) {
2486 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
2487 if (ret_val)
2488 goto out;
2489
2490 in_slow_mode = true;
2491 }
2492
2493 /* Page 800 works differently than the rest so it has its own func */
2494 if (page == BM_WUC_PAGE) {
2495 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
2496 data, true);
2497 goto out;
2498 }
2499
2500 if (page > 0 && page < HV_INTC_FC_PAGE_START) {
2501 ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
2502 data, true);
2503 goto out;
2504 }
2505
2506 ret_val = hw->phy.ops.acquire_phy(hw);
2507 if (ret_val)
2508 goto out;
2509
2510 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2511
2512 if (page == HV_INTC_FC_PAGE_START)
2513 page = 0;
2514
2515 if (reg > MAX_PHY_MULTI_PAGE_REG) {
2516 if ((hw->phy.type != e1000_phy_82578) ||
2517 ((reg != I82578_ADDR_REG) &&
2518 (reg != I82578_ADDR_REG + 1))) {
2519 u32 phy_addr = hw->phy.addr;
2520
2521 hw->phy.addr = 1;
2522
2523 /* Page is shifted left, PHY expects (page x 32) */
2524 ret_val = e1000e_write_phy_reg_mdic(hw,
2525 IGP01E1000_PHY_PAGE_SELECT,
2526 (page << IGP_PAGE_SHIFT));
2527 if (ret_val) {
2528 hw->phy.ops.release_phy(hw);
2529 goto out;
2530 }
2531 hw->phy.addr = phy_addr;
2532 }
2533 }
2534
2535 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2536 data);
2537 hw->phy.ops.release_phy(hw);
2538
2539out:
2540 /* Revert to MDIO fast mode, if applicable */
2541 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2542 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
2543
2544 return ret_val;
2545}
2546
2547/**
2548 * e1000_write_phy_reg_hv - Write HV PHY register
2549 * @hw: pointer to the HW structure
2550 * @offset: register offset to write to
2551 * @data: data to write at register offset
2552 *
2553 * Acquires semaphore, if necessary, then writes the data to PHY register
2554 * at the offset. Release any acquired semaphores before exiting.
2555 **/
2556s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2557{
2558 s32 ret_val;
2559 u16 page = BM_PHY_REG_PAGE(offset);
2560 u16 reg = BM_PHY_REG_NUM(offset);
2561 bool in_slow_mode = false;
2562
2563 /* Workaround failure in MDIO access while cable is disconnected */
2564 if ((hw->phy.type == e1000_phy_82577) &&
2565 !(er32(STATUS) & E1000_STATUS_LU)) {
2566 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
2567 if (ret_val)
2568 goto out;
2569
2570 in_slow_mode = true;
2571 }
2572
2573 /* Page 800 works differently than the rest so it has its own func */
2574 if (page == BM_WUC_PAGE) {
2575 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
2576 &data, false);
2577 goto out;
2578 }
2579
2580 if (page > 0 && page < HV_INTC_FC_PAGE_START) {
2581 ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
2582 &data, false);
2583 goto out;
2584 }
2585
2586 ret_val = hw->phy.ops.acquire_phy(hw);
2587 if (ret_val)
2588 goto out;
2589
2590 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2591
2592 if (page == HV_INTC_FC_PAGE_START)
2593 page = 0;
2594
2595 /*
2596 * Workaround MDIO accesses being disabled after entering IEEE Power
2597 * Down (whenever bit 11 of the PHY Control register is set)
2598 */
2599 if ((hw->phy.type == e1000_phy_82578) &&
2600 (hw->phy.revision >= 1) &&
2601 (hw->phy.addr == 2) &&
2602 ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
2603 (data & (1 << 11))) {
2604 u16 data2 = 0x7EFF;
2605 hw->phy.ops.release_phy(hw);
2606 ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
2607 &data2, false);
2608 if (ret_val)
2609 goto out;
2610
2611 ret_val = hw->phy.ops.acquire_phy(hw);
2612 if (ret_val)
2613 goto out;
2614 }
2615
2616 if (reg > MAX_PHY_MULTI_PAGE_REG) {
2617 if ((hw->phy.type != e1000_phy_82578) ||
2618 ((reg != I82578_ADDR_REG) &&
2619 (reg != I82578_ADDR_REG + 1))) {
2620 u32 phy_addr = hw->phy.addr;
2621
2622 hw->phy.addr = 1;
2623
2624 /* Page is shifted left, PHY expects (page x 32) */
2625 ret_val = e1000e_write_phy_reg_mdic(hw,
2626 IGP01E1000_PHY_PAGE_SELECT,
2627 (page << IGP_PAGE_SHIFT));
2628 if (ret_val) {
2629 hw->phy.ops.release_phy(hw);
2630 goto out;
2631 }
2632 hw->phy.addr = phy_addr;
2633 }
2634 }
2635
2636 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2637 data);
2638 hw->phy.ops.release_phy(hw);
2639
2640out:
2641 /* Revert to MDIO fast mode, if applicable */
2642 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2643 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
2644
2645 return ret_val;
2646}
2647
2648/**
2649 * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
2650 * @page: page to be accessed
2651 **/
2652static u32 e1000_get_phy_addr_for_hv_page(u32 page)
2653{
2654 u32 phy_addr = 2;
2655
2656 if (page >= HV_INTC_FC_PAGE_START)
2657 phy_addr = 1;
2658
2659 return phy_addr;
2660}
2661
2662/**
2663 * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
2664 * @hw: pointer to the HW structure
2665 * @offset: register offset to be read or written
2666 * @data: pointer to the data to be read or written
2667 * @read: determines if operation is read or written
2668 *
2669 * Acquires semaphore, if necessary, then reads the PHY register at offset
2670 * and storing the retreived information in data. Release any acquired
2671 * semaphores before exiting. Note that the procedure to read these regs
2672 * uses the address port and data port to read/write.
2673 **/
2674static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2675 u16 *data, bool read)
2676{
2677 s32 ret_val;
2678 u32 addr_reg = 0;
2679 u32 data_reg = 0;
2680 u8 phy_acquired = 1;
2681
2682 /* This takes care of the difference with desktop vs mobile phy */
2683 addr_reg = (hw->phy.type == e1000_phy_82578) ?
2684 I82578_ADDR_REG : I82577_ADDR_REG;
2685 data_reg = addr_reg + 1;
2686
2687 ret_val = hw->phy.ops.acquire_phy(hw);
2688 if (ret_val) {
2689 hw_dbg(hw, "Could not acquire PHY\n");
2690 phy_acquired = 0;
2691 goto out;
2692 }
2693
2694 /* All operations in this function are phy address 2 */
2695 hw->phy.addr = 2;
2696
2697 /* masking with 0x3F to remove the page from offset */
2698 ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
2699 if (ret_val) {
2700 hw_dbg(hw, "Could not write PHY the HV address register\n");
2701 goto out;
2702 }
2703
2704 /* Read or write the data value next */
2705 if (read)
2706 ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data);
2707 else
2708 ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
2709
2710 if (ret_val) {
2711 hw_dbg(hw, "Could not read data value from HV data register\n");
2712 goto out;
2713 }
2714
2715out:
2716 if (phy_acquired == 1)
2717 hw->phy.ops.release_phy(hw);
2718 return ret_val;
2719}
2720
2721/**
2722 * e1000_link_stall_workaround_hv - Si workaround
2723 * @hw: pointer to the HW structure
2724 *
2725 * This function works around a Si bug where the link partner can get
2726 * a link up indication before the PHY does. If small packets are sent
2727 * by the link partner they can be placed in the packet buffer without
2728 * being properly accounted for by the PHY and will stall preventing
2729 * further packets from being received. The workaround is to clear the
2730 * packet buffer after the PHY detects link up.
2731 **/
2732s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
2733{
2734 s32 ret_val = 0;
2735 u16 data;
2736
2737 if (hw->phy.type != e1000_phy_82578)
2738 goto out;
2739
2740 /* check if link is up and at 1Gbps */
2741 ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data);
2742 if (ret_val)
2743 goto out;
2744
2745 data &= BM_CS_STATUS_LINK_UP |
2746 BM_CS_STATUS_RESOLVED |
2747 BM_CS_STATUS_SPEED_MASK;
2748
2749 if (data != (BM_CS_STATUS_LINK_UP |
2750 BM_CS_STATUS_RESOLVED |
2751 BM_CS_STATUS_SPEED_1000))
2752 goto out;
2753
2754 mdelay(200);
2755
2756 /* flush the packets in the fifo buffer */
2757 ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL,
2758 HV_MUX_DATA_CTRL_GEN_TO_MAC |
2759 HV_MUX_DATA_CTRL_FORCE_SPEED);
2760 if (ret_val)
2761 goto out;
2762
2763 ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL,
2764 HV_MUX_DATA_CTRL_GEN_TO_MAC);
2765
2766out:
2767 return ret_val;
2768}
2769
2770/**
2771 * e1000_check_polarity_82577 - Checks the polarity.
2772 * @hw: pointer to the HW structure
2773 *
2774 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2775 *
2776 * Polarity is determined based on the PHY specific status register.
2777 **/
2778s32 e1000_check_polarity_82577(struct e1000_hw *hw)
2779{
2780 struct e1000_phy_info *phy = &hw->phy;
2781 s32 ret_val;
2782 u16 data;
2783
2784 ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data);
2785
2786 if (!ret_val)
2787 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
2788 ? e1000_rev_polarity_reversed
2789 : e1000_rev_polarity_normal;
2790
2791 return ret_val;
2792}
2793
2794/**
2795 * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
2796 * @hw: pointer to the HW structure
2797 *
2798 * Calls the PHY setup function to force speed and duplex. Clears the
2799 * auto-crossover to force MDI manually. Waits for link and returns
2800 * successful if link up is successful, else -E1000_ERR_PHY (-2).
2801 **/
2802s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
2803{
2804 struct e1000_phy_info *phy = &hw->phy;
2805 s32 ret_val;
2806 u16 phy_data;
2807 bool link;
2808
2809 ret_val = phy->ops.read_phy_reg(hw, PHY_CONTROL, &phy_data);
2810 if (ret_val)
2811 goto out;
2812
2813 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
2814
2815 ret_val = phy->ops.write_phy_reg(hw, PHY_CONTROL, phy_data);
2816 if (ret_val)
2817 goto out;
2818
2819 /*
2820 * Clear Auto-Crossover to force MDI manually. 82577 requires MDI
2821 * forced whenever speed and duplex are forced.
2822 */
2823 ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_CTRL_2, &phy_data);
2824 if (ret_val)
2825 goto out;
2826
2827 phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
2828 phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
2829
2830 ret_val = phy->ops.write_phy_reg(hw, I82577_PHY_CTRL_2, phy_data);
2831 if (ret_val)
2832 goto out;
2833
2834 hw_dbg(hw, "I82577_PHY_CTRL_2: %X\n", phy_data);
2835
2836 udelay(1);
2837
2838 if (phy->autoneg_wait_to_complete) {
2839 hw_dbg(hw, "Waiting for forced speed/duplex link on 82577 phy\n");
2840
2841 ret_val = e1000e_phy_has_link_generic(hw,
2842 PHY_FORCE_LIMIT,
2843 100000,
2844 &link);
2845 if (ret_val)
2846 goto out;
2847
2848 if (!link)
2849 hw_dbg(hw, "Link taking longer than expected.\n");
2850
2851 /* Try once more */
2852 ret_val = e1000e_phy_has_link_generic(hw,
2853 PHY_FORCE_LIMIT,
2854 100000,
2855 &link);
2856 if (ret_val)
2857 goto out;
2858 }
2859
2860out:
2861 return ret_val;
2862}
2863
2864/**
2865 * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
2866 * @hw: pointer to the HW structure
2867 *
2868 * Read PHY status to determine if link is up. If link is up, then
2869 * set/determine 10base-T extended distance and polarity correction. Read
2870 * PHY port status to determine MDI/MDIx and speed. Based on the speed,
2871 * determine on the cable length, local and remote receiver.
2872 **/
2873s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
2874{
2875 struct e1000_phy_info *phy = &hw->phy;
2876 s32 ret_val;
2877 u16 data;
2878 bool link;
2879
2880 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
2881 if (ret_val)
2882 goto out;
2883
2884 if (!link) {
2885 hw_dbg(hw, "Phy info is only valid if link is up\n");
2886 ret_val = -E1000_ERR_CONFIG;
2887 goto out;
2888 }
2889
2890 phy->polarity_correction = true;
2891
2892 ret_val = e1000_check_polarity_82577(hw);
2893 if (ret_val)
2894 goto out;
2895
2896 ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data);
2897 if (ret_val)
2898 goto out;
2899
2900 phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false;
2901
2902 if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
2903 I82577_PHY_STATUS2_SPEED_1000MBPS) {
2904 ret_val = hw->phy.ops.get_cable_length(hw);
2905 if (ret_val)
2906 goto out;
2907
2908 ret_val = phy->ops.read_phy_reg(hw, PHY_1000T_STATUS, &data);
2909 if (ret_val)
2910 goto out;
2911
2912 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
2913 ? e1000_1000t_rx_status_ok
2914 : e1000_1000t_rx_status_not_ok;
2915
2916 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
2917 ? e1000_1000t_rx_status_ok
2918 : e1000_1000t_rx_status_not_ok;
2919 } else {
2920 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
2921 phy->local_rx = e1000_1000t_rx_status_undefined;
2922 phy->remote_rx = e1000_1000t_rx_status_undefined;
2923 }
2924
2925out:
2926 return ret_val;
2927}
2928
2929/**
2930 * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
2931 * @hw: pointer to the HW structure
2932 *
2933 * Reads the diagnostic status register and verifies result is valid before
2934 * placing it in the phy_cable_length field.
2935 **/
2936s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
2937{
2938 struct e1000_phy_info *phy = &hw->phy;
2939 s32 ret_val;
2940 u16 phy_data, length;
2941
2942 ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
2943 if (ret_val)
2944 goto out;
2945
2946 length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
2947 I82577_DSTATUS_CABLE_LENGTH_SHIFT;
2948
2949 if (length == E1000_CABLE_LENGTH_UNDEFINED)
2950 ret_val = E1000_ERR_PHY;
2951
2952 phy->cable_length = length;
2953
2954out:
2955 return ret_val;
2956}
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 9080f07da8fe..8005b602f776 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -661,8 +661,6 @@ static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
661 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1) 661 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
662 netif_stop_queue(netdev); 662 netif_stop_queue(netdev);
663 663
664 netdev->trans_start = jiffies;
665
666 spin_unlock_irqrestore(&enic->wq_lock[0], flags); 664 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
667 665
668 return NETDEV_TX_OK; 666 return NETDEV_TX_OK;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index d0b1d9f17a5d..b60a3041b64c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -77,27 +77,31 @@
77 * Hardware access: 77 * Hardware access:
78 */ 78 */
79 79
80#define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */ 80#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
81#define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */ 81#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
82#define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */ 82#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
83#define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */ 83#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
84#define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */ 84#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
85#define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */ 85#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
86#define DEV_HAS_MSI 0x000040 /* device supports MSI */ 86#define DEV_HAS_MSI 0x0000040 /* device supports MSI */
87#define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */ 87#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
88#define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */ 88#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
89#define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */ 89#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
90#define DEV_HAS_STATISTICS_V2 0x000600 /* device supports hw statistics version 2 */ 90#define DEV_HAS_STATISTICS_V2 0x0000600 /* device supports hw statistics version 2 */
91#define DEV_HAS_STATISTICS_V3 0x000e00 /* device supports hw statistics version 3 */ 91#define DEV_HAS_STATISTICS_V3 0x0000e00 /* device supports hw statistics version 3 */
92#define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */ 92#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
93#define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */ 93#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
94#define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */ 94#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
95#define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */ 95#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
96#define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */ 96#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
97#define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */ 97#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
98#define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */ 98#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
99#define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */ 99#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
100#define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */ 100#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
101#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
102#define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
103#define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
104#define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
101 105
102enum { 106enum {
103 NvRegIrqStatus = 0x000, 107 NvRegIrqStatus = 0x000,
@@ -898,6 +902,12 @@ enum {
898}; 902};
899static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; 903static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
900 904
905/*
906 * Power down phy when interface is down (persists through reboot;
907 * older Linux and other OSes may not power it up again)
908 */
909static int phy_power_down = 0;
910
901static inline struct fe_priv *get_nvpriv(struct net_device *dev) 911static inline struct fe_priv *get_nvpriv(struct net_device *dev)
902{ 912{
903 return netdev_priv(dev); 913 return netdev_priv(dev);
@@ -1265,14 +1275,7 @@ static int phy_init(struct net_device *dev)
1265 } 1275 }
1266 } 1276 }
1267 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1277 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1268 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 1278 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1269 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
1270 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
1271 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
1272 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
1273 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
1274 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
1275 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
1276 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1279 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1277 phy_reserved |= PHY_REALTEK_INIT7; 1280 phy_reserved |= PHY_REALTEK_INIT7;
1278 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1281 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
@@ -1463,14 +1466,7 @@ static int phy_init(struct net_device *dev)
1463 } 1466 }
1464 } 1467 }
1465 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1468 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1466 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 1469 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1467 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
1468 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
1469 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
1470 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
1471 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
1472 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
1473 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
1474 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1470 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1475 phy_reserved |= PHY_REALTEK_INIT7; 1471 phy_reserved |= PHY_REALTEK_INIT7;
1476 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1472 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
@@ -1503,7 +1499,10 @@ static int phy_init(struct net_device *dev)
1503 1499
1504 /* restart auto negotiation, power down phy */ 1500 /* restart auto negotiation, power down phy */
1505 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1501 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1506 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN); 1502 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1503 if (phy_power_down) {
1504 mii_control |= BMCR_PDOWN;
1505 }
1507 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1506 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1508 return PHY_ERROR; 1507 return PHY_ERROR;
1509 } 1508 }
@@ -5534,7 +5533,7 @@ static int nv_close(struct net_device *dev)
5534 5533
5535 nv_drain_rxtx(dev); 5534 nv_drain_rxtx(dev);
5536 5535
5537 if (np->wolenabled) { 5536 if (np->wolenabled || !phy_power_down) {
5538 nv_txrx_gate(dev, false); 5537 nv_txrx_gate(dev, false);
5539 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5538 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5540 nv_start_rx(dev); 5539 nv_start_rx(dev);
@@ -5835,8 +5834,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5835 /* take phy and nic out of low power mode */ 5834 /* take phy and nic out of low power mode */
5836 powerstate = readl(base + NvRegPowerState2); 5835 powerstate = readl(base + NvRegPowerState2);
5837 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5836 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5838 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 5837 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5839 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5840 pci_dev->revision >= 0xA3) 5838 pci_dev->revision >= 0xA3)
5841 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5839 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5842 writel(powerstate, base + NvRegPowerState2); 5840 writel(powerstate, base + NvRegPowerState2);
@@ -5892,14 +5890,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5892 /* Limit the number of tx's outstanding for hw bug */ 5890 /* Limit the number of tx's outstanding for hw bug */
5893 if (id->driver_data & DEV_NEED_TX_LIMIT) { 5891 if (id->driver_data & DEV_NEED_TX_LIMIT) {
5894 np->tx_limit = 1; 5892 np->tx_limit = 1;
5895 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 5893 if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
5896 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
5897 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
5898 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
5899 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
5900 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
5901 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
5902 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) &&
5903 pci_dev->revision >= 0xA2) 5894 pci_dev->revision >= 0xA2)
5904 np->tx_limit = 0; 5895 np->tx_limit = 0;
5905 } 5896 }
@@ -6149,7 +6140,8 @@ static int nv_resume(struct pci_dev *pdev)
6149 for (i = 0;i <= np->register_size/sizeof(u32); i++) 6140 for (i = 0;i <= np->register_size/sizeof(u32); i++)
6150 writel(np->saved_config_space[i], base+i*sizeof(u32)); 6141 writel(np->saved_config_space[i], base+i*sizeof(u32));
6151 6142
6152 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE); 6143 if (np->driver_data & DEV_NEED_MSI_FIX)
6144 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
6153 6145
6154 /* restore phy state, including autoneg */ 6146 /* restore phy state, including autoneg */
6155 phy_init(dev); 6147 phy_init(dev);
@@ -6198,160 +6190,164 @@ static void nv_shutdown(struct pci_dev *pdev)
6198 6190
6199static struct pci_device_id pci_tbl[] = { 6191static struct pci_device_id pci_tbl[] = {
6200 { /* nForce Ethernet Controller */ 6192 { /* nForce Ethernet Controller */
6201 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), 6193 PCI_DEVICE(0x10DE, 0x01C3),
6202 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6194 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6203 }, 6195 },
6204 { /* nForce2 Ethernet Controller */ 6196 { /* nForce2 Ethernet Controller */
6205 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), 6197 PCI_DEVICE(0x10DE, 0x0066),
6206 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6198 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6207 }, 6199 },
6208 { /* nForce3 Ethernet Controller */ 6200 { /* nForce3 Ethernet Controller */
6209 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), 6201 PCI_DEVICE(0x10DE, 0x00D6),
6210 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6202 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6211 }, 6203 },
6212 { /* nForce3 Ethernet Controller */ 6204 { /* nForce3 Ethernet Controller */
6213 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 6205 PCI_DEVICE(0x10DE, 0x0086),
6214 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6206 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6215 }, 6207 },
6216 { /* nForce3 Ethernet Controller */ 6208 { /* nForce3 Ethernet Controller */
6217 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 6209 PCI_DEVICE(0x10DE, 0x008C),
6218 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6210 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6219 }, 6211 },
6220 { /* nForce3 Ethernet Controller */ 6212 { /* nForce3 Ethernet Controller */
6221 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 6213 PCI_DEVICE(0x10DE, 0x00E6),
6222 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6214 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6223 }, 6215 },
6224 { /* nForce3 Ethernet Controller */ 6216 { /* nForce3 Ethernet Controller */
6225 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 6217 PCI_DEVICE(0x10DE, 0x00DF),
6226 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6218 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6227 }, 6219 },
6228 { /* CK804 Ethernet Controller */ 6220 { /* CK804 Ethernet Controller */
6229 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 6221 PCI_DEVICE(0x10DE, 0x0056),
6230 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6222 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6231 }, 6223 },
6232 { /* CK804 Ethernet Controller */ 6224 { /* CK804 Ethernet Controller */
6233 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 6225 PCI_DEVICE(0x10DE, 0x0057),
6234 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6226 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6235 }, 6227 },
6236 { /* MCP04 Ethernet Controller */ 6228 { /* MCP04 Ethernet Controller */
6237 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 6229 PCI_DEVICE(0x10DE, 0x0037),
6238 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6230 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6239 }, 6231 },
6240 { /* MCP04 Ethernet Controller */ 6232 { /* MCP04 Ethernet Controller */
6241 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 6233 PCI_DEVICE(0x10DE, 0x0038),
6242 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6234 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6243 }, 6235 },
6244 { /* MCP51 Ethernet Controller */ 6236 { /* MCP51 Ethernet Controller */
6245 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 6237 PCI_DEVICE(0x10DE, 0x0268),
6246 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 6238 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6247 }, 6239 },
6248 { /* MCP51 Ethernet Controller */ 6240 { /* MCP51 Ethernet Controller */
6249 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 6241 PCI_DEVICE(0x10DE, 0x0269),
6250 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, 6242 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6251 }, 6243 },
6252 { /* MCP55 Ethernet Controller */ 6244 { /* MCP55 Ethernet Controller */
6253 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 6245 PCI_DEVICE(0x10DE, 0x0372),
6254 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT, 6246 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6255 }, 6247 },
6256 { /* MCP55 Ethernet Controller */ 6248 { /* MCP55 Ethernet Controller */
6257 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 6249 PCI_DEVICE(0x10DE, 0x0373),
6258 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT, 6250 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6259 }, 6251 },
6260 { /* MCP61 Ethernet Controller */ 6252 { /* MCP61 Ethernet Controller */
6261 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 6253 PCI_DEVICE(0x10DE, 0x03E5),
6262 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6254 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6263 }, 6255 },
6264 { /* MCP61 Ethernet Controller */ 6256 { /* MCP61 Ethernet Controller */
6265 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 6257 PCI_DEVICE(0x10DE, 0x03E6),
6266 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6258 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6267 }, 6259 },
6268 { /* MCP61 Ethernet Controller */ 6260 { /* MCP61 Ethernet Controller */
6269 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 6261 PCI_DEVICE(0x10DE, 0x03EE),
6270 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6262 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6271 }, 6263 },
6272 { /* MCP61 Ethernet Controller */ 6264 { /* MCP61 Ethernet Controller */
6273 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 6265 PCI_DEVICE(0x10DE, 0x03EF),
6274 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 6266 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6275 }, 6267 },
6276 { /* MCP65 Ethernet Controller */ 6268 { /* MCP65 Ethernet Controller */
6277 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 6269 PCI_DEVICE(0x10DE, 0x0450),
6278 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6270 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6279 }, 6271 },
6280 { /* MCP65 Ethernet Controller */ 6272 { /* MCP65 Ethernet Controller */
6281 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 6273 PCI_DEVICE(0x10DE, 0x0451),
6282 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6274 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6283 }, 6275 },
6284 { /* MCP65 Ethernet Controller */ 6276 { /* MCP65 Ethernet Controller */
6285 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 6277 PCI_DEVICE(0x10DE, 0x0452),
6286 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6278 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6287 }, 6279 },
6288 { /* MCP65 Ethernet Controller */ 6280 { /* MCP65 Ethernet Controller */
6289 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 6281 PCI_DEVICE(0x10DE, 0x0453),
6290 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6282 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6291 }, 6283 },
6292 { /* MCP67 Ethernet Controller */ 6284 { /* MCP67 Ethernet Controller */
6293 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 6285 PCI_DEVICE(0x10DE, 0x054C),
6294 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6286 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6295 }, 6287 },
6296 { /* MCP67 Ethernet Controller */ 6288 { /* MCP67 Ethernet Controller */
6297 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 6289 PCI_DEVICE(0x10DE, 0x054D),
6298 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6290 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6299 }, 6291 },
6300 { /* MCP67 Ethernet Controller */ 6292 { /* MCP67 Ethernet Controller */
6301 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 6293 PCI_DEVICE(0x10DE, 0x054E),
6302 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6294 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6303 }, 6295 },
6304 { /* MCP67 Ethernet Controller */ 6296 { /* MCP67 Ethernet Controller */
6305 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 6297 PCI_DEVICE(0x10DE, 0x054F),
6306 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE, 6298 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6307 }, 6299 },
6308 { /* MCP73 Ethernet Controller */ 6300 { /* MCP73 Ethernet Controller */
6309 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 6301 PCI_DEVICE(0x10DE, 0x07DC),
6310 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6302 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6311 }, 6303 },
6312 { /* MCP73 Ethernet Controller */ 6304 { /* MCP73 Ethernet Controller */
6313 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 6305 PCI_DEVICE(0x10DE, 0x07DD),
6314 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6306 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6315 }, 6307 },
6316 { /* MCP73 Ethernet Controller */ 6308 { /* MCP73 Ethernet Controller */
6317 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 6309 PCI_DEVICE(0x10DE, 0x07DE),
6318 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6310 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6319 }, 6311 },
6320 { /* MCP73 Ethernet Controller */ 6312 { /* MCP73 Ethernet Controller */
6321 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 6313 PCI_DEVICE(0x10DE, 0x07DF),
6322 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE, 6314 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6323 }, 6315 },
6324 { /* MCP77 Ethernet Controller */ 6316 { /* MCP77 Ethernet Controller */
6325 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 6317 PCI_DEVICE(0x10DE, 0x0760),
6326 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6318 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6327 }, 6319 },
6328 { /* MCP77 Ethernet Controller */ 6320 { /* MCP77 Ethernet Controller */
6329 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 6321 PCI_DEVICE(0x10DE, 0x0761),
6330 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6322 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6331 }, 6323 },
6332 { /* MCP77 Ethernet Controller */ 6324 { /* MCP77 Ethernet Controller */
6333 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 6325 PCI_DEVICE(0x10DE, 0x0762),
6334 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6326 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6335 }, 6327 },
6336 { /* MCP77 Ethernet Controller */ 6328 { /* MCP77 Ethernet Controller */
6337 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 6329 PCI_DEVICE(0x10DE, 0x0763),
6338 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6330 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6339 }, 6331 },
6340 { /* MCP79 Ethernet Controller */ 6332 { /* MCP79 Ethernet Controller */
6341 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 6333 PCI_DEVICE(0x10DE, 0x0AB0),
6342 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6334 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6343 }, 6335 },
6344 { /* MCP79 Ethernet Controller */ 6336 { /* MCP79 Ethernet Controller */
6345 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 6337 PCI_DEVICE(0x10DE, 0x0AB1),
6346 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6338 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6347 }, 6339 },
6348 { /* MCP79 Ethernet Controller */ 6340 { /* MCP79 Ethernet Controller */
6349 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 6341 PCI_DEVICE(0x10DE, 0x0AB2),
6350 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6342 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6351 }, 6343 },
6352 { /* MCP79 Ethernet Controller */ 6344 { /* MCP79 Ethernet Controller */
6353 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 6345 PCI_DEVICE(0x10DE, 0x0AB3),
6354 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6346 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6347 },
6348 { /* MCP89 Ethernet Controller */
6349 PCI_DEVICE(0x10DE, 0x0D7D),
6350 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
6355 }, 6351 },
6356 {0,}, 6352 {0,},
6357}; 6353};
@@ -6390,6 +6386,8 @@ module_param(dma_64bit, int, 0);
6390MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6386MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6391module_param(phy_cross, int, 0); 6387module_param(phy_cross, int, 0);
6392MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); 6388MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6389module_param(phy_power_down, int, 0);
6390MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6393 6391
6394MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6392MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6395MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6393MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d12e0e0336f4..3af581303ca2 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -301,13 +301,17 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
301 of_device_is_compatible(np, "ucc_geth_phy")) { 301 of_device_is_compatible(np, "ucc_geth_phy")) {
302#ifdef CONFIG_UCC_GETH 302#ifdef CONFIG_UCC_GETH
303 u32 id; 303 u32 id;
304 static u32 mii_mng_master;
304 305
305 tbipa = &regs->utbipar; 306 tbipa = &regs->utbipar;
306 307
307 if ((err = get_ucc_id_for_range(addr, addr + size, &id))) 308 if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
308 goto err_free_irqs; 309 goto err_free_irqs;
309 310
310 ucc_set_qe_mux_mii_mng(id - 1); 311 if (!mii_mng_master) {
312 mii_mng_master = id;
313 ucc_set_qe_mux_mii_mng(id - 1);
314 }
311#else 315#else
312 err = -ENODEV; 316 err = -ENODEV;
313 goto err_free_irqs; 317 goto err_free_irqs;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 91317bc11154..2cd94338b5d3 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -259,7 +259,7 @@ extern const char gfar_driver_version[];
259(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ 259(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
260 IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ 260 IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
261 | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \ 261 | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \
262 | IEVENT_MAG) 262 | IEVENT_MAG | IEVENT_BABR)
263 263
264#define IMASK_INIT_CLEAR 0x00000000 264#define IMASK_INIT_CLEAR 0x00000000
265#define IMASK_BABR 0x80000000 265#define IMASK_BABR 0x80000000
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 310ee035067c..26151fa35df5 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1163,7 +1163,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
1163 hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); 1163 hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
1164 1164
1165 /* Trigger an immediate transmit demand. */ 1165 /* Trigger an immediate transmit demand. */
1166 dev->trans_start = jiffies; 1166 dev->trans_start = jiffies; /* prevent tx timeout */
1167 hmp->stats.tx_errors++; 1167 hmp->stats.tx_errors++;
1168 1168
1169 /* Restart the chip's Tx/Rx processes . */ 1169 /* Restart the chip's Tx/Rx processes . */
@@ -1364,7 +1364,6 @@ static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev)
1364 hmp->tx_full = 1; 1364 hmp->tx_full = 1;
1365 netif_stop_queue(dev); 1365 netif_stop_queue(dev);
1366 } 1366 }
1367 dev->trans_start = jiffies;
1368 1367
1369 if (hamachi_debug > 4) { 1368 if (hamachi_debug > 4) {
1370 printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n", 1369 printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n",
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 8e93750d5120..ea17319624aa 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3139,8 +3139,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3139 /* set time_stamp *before* dma to help avoid a possible race */ 3139 /* set time_stamp *before* dma to help avoid a possible race */
3140 buffer_info->time_stamp = jiffies; 3140 buffer_info->time_stamp = jiffies;
3141 buffer_info->next_to_watch = i; 3141 buffer_info->next_to_watch = i;
3142 buffer_info->dma = map[count]; 3142 buffer_info->dma = skb_shinfo(skb)->dma_head;
3143 count++;
3144 3143
3145 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3144 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3146 struct skb_frag_struct *frag; 3145 struct skb_frag_struct *frag;
@@ -3164,7 +3163,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3164 tx_ring->buffer_info[i].skb = skb; 3163 tx_ring->buffer_info[i].skb = skb;
3165 tx_ring->buffer_info[first].next_to_watch = i; 3164 tx_ring->buffer_info[first].next_to_watch = i;
3166 3165
3167 return count; 3166 return count + 1;
3168} 3167}
3169 3168
3170static inline void igb_tx_queue_adv(struct igb_adapter *adapter, 3169static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
@@ -3344,7 +3343,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
3344 if (count) { 3343 if (count) {
3345 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count, 3344 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
3346 skb->len, hdr_len); 3345 skb->len, hdr_len);
3347 netdev->trans_start = jiffies;
3348 /* Make sure there is space in the ring for the next send. */ 3346 /* Make sure there is space in the ring for the next send. */
3349 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); 3347 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3350 } else { 3348 } else {
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 44a8eef03a74..22aadb7884fa 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2119,8 +2119,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2119 /* set time_stamp *before* dma to help avoid a possible race */ 2119 /* set time_stamp *before* dma to help avoid a possible race */
2120 buffer_info->time_stamp = jiffies; 2120 buffer_info->time_stamp = jiffies;
2121 buffer_info->next_to_watch = i; 2121 buffer_info->next_to_watch = i;
2122 buffer_info->dma = map[count]; 2122 buffer_info->dma = skb_shinfo(skb)->dma_head;
2123 count++;
2124 2123
2125 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2124 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2126 struct skb_frag_struct *frag; 2125 struct skb_frag_struct *frag;
@@ -2144,7 +2143,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2144 tx_ring->buffer_info[i].skb = skb; 2143 tx_ring->buffer_info[i].skb = skb;
2145 tx_ring->buffer_info[first].next_to_watch = i; 2144 tx_ring->buffer_info[first].next_to_watch = i;
2146 2145
2147 return count; 2146 return count + 1;
2148} 2147}
2149 2148
2150static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, 2149static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
@@ -2270,7 +2269,6 @@ static int igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2270 if (count) { 2269 if (count) {
2271 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, 2270 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2272 skb->len, hdr_len); 2271 skb->len, hdr_len);
2273 netdev->trans_start = jiffies;
2274 /* Make sure there is space in the ring for the next send. */ 2272 /* Make sure there is space in the ring for the next send. */
2275 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); 2273 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2276 } else { 2274 } else {
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 006ba23110db..394b2b17075e 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1859,6 +1859,42 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1859 IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__); 1859 IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__);
1860} 1860}
1861 1861
1862#ifdef CONFIG_PM
1863/* USB suspend, so power off the transmitter/receiver */
1864static int irda_usb_suspend(struct usb_interface *intf, pm_message_t message)
1865{
1866 struct irda_usb_cb *self = usb_get_intfdata(intf);
1867 int i;
1868
1869 netif_device_detach(self->netdev);
1870
1871 if (self->tx_urb != NULL)
1872 usb_kill_urb(self->tx_urb);
1873 if (self->speed_urb != NULL)
1874 usb_kill_urb(self->speed_urb);
1875 for (i = 0; i < self->max_rx_urb; i++) {
1876 if (self->rx_urb[i] != NULL)
1877 usb_kill_urb(self->rx_urb[i]);
1878 }
1879 return 0;
1880}
1881
1882/* Coming out of suspend, so reset hardware */
1883static int irda_usb_resume(struct usb_interface *intf)
1884{
1885 struct irda_usb_cb *self = usb_get_intfdata(intf);
1886 int i;
1887
1888 for (i = 0; i < self->max_rx_urb; i++) {
1889 if (self->rx_urb[i] != NULL)
1890 usb_submit_urb(self->rx_urb[i], GFP_KERNEL);
1891 }
1892
1893 netif_device_attach(self->netdev);
1894 return 0;
1895}
1896#endif
1897
1862/*------------------------------------------------------------------*/ 1898/*------------------------------------------------------------------*/
1863/* 1899/*
1864 * USB device callbacks 1900 * USB device callbacks
@@ -1868,6 +1904,10 @@ static struct usb_driver irda_driver = {
1868 .probe = irda_usb_probe, 1904 .probe = irda_usb_probe,
1869 .disconnect = irda_usb_disconnect, 1905 .disconnect = irda_usb_disconnect,
1870 .id_table = dongles, 1906 .id_table = dongles,
1907#ifdef CONFIG_PM
1908 .suspend = irda_usb_suspend,
1909 .resume = irda_usb_resume,
1910#endif
1871}; 1911};
1872 1912
1873/************************* MODULE CALLBACKS *************************/ 1913/************************* MODULE CALLBACKS *************************/
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 04cb81a739c2..9c897cf86b9f 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1300,7 +1300,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1300 buffer_info->length = size; 1300 buffer_info->length = size;
1301 WARN_ON(buffer_info->dma != 0); 1301 WARN_ON(buffer_info->dma != 0);
1302 buffer_info->time_stamp = jiffies; 1302 buffer_info->time_stamp = jiffies;
1303 buffer_info->dma = map[0] + offset; 1303 buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
1304 pci_map_single(adapter->pdev, 1304 pci_map_single(adapter->pdev,
1305 skb->data + offset, 1305 skb->data + offset,
1306 size, 1306 size,
@@ -1340,7 +1340,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1340 1340
1341 buffer_info->length = size; 1341 buffer_info->length = size;
1342 buffer_info->time_stamp = jiffies; 1342 buffer_info->time_stamp = jiffies;
1343 buffer_info->dma = map[f + 1] + offset; 1343 buffer_info->dma = map[f] + offset;
1344 buffer_info->next_to_watch = 0; 1344 buffer_info->next_to_watch = 0;
1345 1345
1346 len -= size; 1346 len -= size;
@@ -1488,7 +1488,6 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1488 1488
1489 if (count) { 1489 if (count) {
1490 ixgb_tx_queue(adapter, count, vlan_id, tx_flags); 1490 ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1491 netdev->trans_start = jiffies;
1492 /* Make sure there is space in the ring for the next send. */ 1491 /* Make sure there is space in the ring for the next send. */
1493 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); 1492 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1494 1493
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 05a24055ac2f..cd22323cfd22 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -121,17 +121,18 @@ struct ixgbe_queue_stats {
121 121
122struct ixgbe_ring { 122struct ixgbe_ring {
123 void *desc; /* descriptor ring memory */ 123 void *desc; /* descriptor ring memory */
124 dma_addr_t dma; /* phys. address of descriptor ring */
125 unsigned int size; /* length in bytes */
126 unsigned int count; /* amount of descriptors */
127 unsigned int next_to_use;
128 unsigned int next_to_clean;
129
130 int queue_index; /* needed for multiqueue queue management */
131 union { 124 union {
132 struct ixgbe_tx_buffer *tx_buffer_info; 125 struct ixgbe_tx_buffer *tx_buffer_info;
133 struct ixgbe_rx_buffer *rx_buffer_info; 126 struct ixgbe_rx_buffer *rx_buffer_info;
134 }; 127 };
128 u8 atr_sample_rate;
129 u8 atr_count;
130 u16 count; /* amount of descriptors */
131 u16 rx_buf_len;
132 u16 next_to_use;
133 u16 next_to_clean;
134
135 u8 queue_index; /* needed for multiqueue queue management */
135 136
136 u16 head; 137 u16 head;
137 u16 tail; 138 u16 tail;
@@ -139,23 +140,24 @@ struct ixgbe_ring {
139 unsigned int total_bytes; 140 unsigned int total_bytes;
140 unsigned int total_packets; 141 unsigned int total_packets;
141 142
142 u16 reg_idx; /* holds the special value that gets the hardware register
143 * offset associated with this ring, which is different
144 * for DCB and RSS modes */
145
146#ifdef CONFIG_IXGBE_DCA 143#ifdef CONFIG_IXGBE_DCA
147 /* cpu for tx queue */ 144 /* cpu for tx queue */
148 int cpu; 145 int cpu;
149#endif 146#endif
150 struct ixgbe_queue_stats stats;
151 u64 v_idx; /* maps directly to the index for this ring in the hardware
152 * vector array, can also be used for finding the bit in EICR
153 * and friends that represents the vector for this ring */
154 147
148 u16 work_limit; /* max work per interrupt */
149 u16 reg_idx; /* holds the special value that gets
150 * the hardware register offset
151 * associated with this ring, which is
152 * different for DCB and RSS modes
153 */
155 154
156 u16 work_limit; /* max work per interrupt */ 155 struct ixgbe_queue_stats stats;
157 u16 rx_buf_len; 156 unsigned long reinit_state;
158 u64 rsc_count; /* stat for coalesced packets */ 157 u64 rsc_count; /* stat for coalesced packets */
158
159 unsigned int size; /* length in bytes */
160 dma_addr_t dma; /* phys. address of descriptor ring */
159}; 161};
160 162
161enum ixgbe_ring_f_enum { 163enum ixgbe_ring_f_enum {
@@ -163,6 +165,7 @@ enum ixgbe_ring_f_enum {
163 RING_F_DCB, 165 RING_F_DCB,
164 RING_F_VMDQ, 166 RING_F_VMDQ,
165 RING_F_RSS, 167 RING_F_RSS,
168 RING_F_FDIR,
166#ifdef IXGBE_FCOE 169#ifdef IXGBE_FCOE
167 RING_F_FCOE, 170 RING_F_FCOE,
168#endif /* IXGBE_FCOE */ 171#endif /* IXGBE_FCOE */
@@ -173,6 +176,7 @@ enum ixgbe_ring_f_enum {
173#define IXGBE_MAX_DCB_INDICES 8 176#define IXGBE_MAX_DCB_INDICES 8
174#define IXGBE_MAX_RSS_INDICES 16 177#define IXGBE_MAX_RSS_INDICES 16
175#define IXGBE_MAX_VMDQ_INDICES 16 178#define IXGBE_MAX_VMDQ_INDICES 16
179#define IXGBE_MAX_FDIR_INDICES 64
176#ifdef IXGBE_FCOE 180#ifdef IXGBE_FCOE
177#define IXGBE_MAX_FCOE_INDICES 8 181#define IXGBE_MAX_FCOE_INDICES 8
178#endif /* IXGBE_FCOE */ 182#endif /* IXGBE_FCOE */
@@ -193,6 +197,9 @@ struct ixgbe_ring_feature {
193 */ 197 */
194struct ixgbe_q_vector { 198struct ixgbe_q_vector {
195 struct ixgbe_adapter *adapter; 199 struct ixgbe_adapter *adapter;
200 unsigned int v_idx; /* index of q_vector within array, also used for
201 * finding the bit in EICR and friends that
202 * represents the vector for this ring */
196 struct napi_struct napi; 203 struct napi_struct napi;
197 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ 204 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
198 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ 205 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -201,7 +208,6 @@ struct ixgbe_q_vector {
201 u8 tx_itr; 208 u8 tx_itr;
202 u8 rx_itr; 209 u8 rx_itr;
203 u32 eitr; 210 u32 eitr;
204 u32 v_idx; /* vector index in list */
205}; 211};
206 212
207/* Helper macros to switch between ints/sec and what the register uses. 213/* Helper macros to switch between ints/sec and what the register uses.
@@ -223,6 +229,10 @@ struct ixgbe_q_vector {
223#define IXGBE_TX_CTXTDESC_ADV(R, i) \ 229#define IXGBE_TX_CTXTDESC_ADV(R, i) \
224 (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) 230 (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
225 231
232#define IXGBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
233#define IXGBE_TX_DESC(R, i) IXGBE_GET_DESC(R, i, ixgbe_legacy_tx_desc)
234#define IXGBE_RX_DESC(R, i) IXGBE_GET_DESC(R, i, ixgbe_legacy_rx_desc)
235
226#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 236#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
227#ifdef IXGBE_FCOE 237#ifdef IXGBE_FCOE
228/* Use 3K as the baby jumbo frame size for FCoE */ 238/* Use 3K as the baby jumbo frame size for FCoE */
@@ -315,10 +325,13 @@ struct ixgbe_adapter {
315#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) 325#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
316#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24) 326#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24)
317#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) 327#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
318#define IXGBE_FLAG_RSC_CAPABLE (u32)(1 << 26) 328#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26)
319#define IXGBE_FLAG_RSC_ENABLED (u32)(1 << 27) 329#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
320#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) 330#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
321 331
332 u32 flags2;
333#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
334#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
322/* default to trying for four seconds */ 335/* default to trying for four seconds */
323#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 336#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
324 337
@@ -327,6 +340,10 @@ struct ixgbe_adapter {
327 struct pci_dev *pdev; 340 struct pci_dev *pdev;
328 struct net_device_stats net_stats; 341 struct net_device_stats net_stats;
329 342
343 u32 test_icr;
344 struct ixgbe_ring test_tx_ring;
345 struct ixgbe_ring test_rx_ring;
346
330 /* structs defined in ixgbe_hw.h */ 347 /* structs defined in ixgbe_hw.h */
331 struct ixgbe_hw hw; 348 struct ixgbe_hw hw;
332 u16 msg_enable; 349 u16 msg_enable;
@@ -349,6 +366,10 @@ struct ixgbe_adapter {
349 struct timer_list sfp_timer; 366 struct timer_list sfp_timer;
350 struct work_struct multispeed_fiber_task; 367 struct work_struct multispeed_fiber_task;
351 struct work_struct sfp_config_module_task; 368 struct work_struct sfp_config_module_task;
369 u32 fdir_pballoc;
370 u32 atr_sample_rate;
371 spinlock_t fdir_perfect_lock;
372 struct work_struct fdir_reinit_task;
352#ifdef IXGBE_FCOE 373#ifdef IXGBE_FCOE
353 struct ixgbe_fcoe fcoe; 374 struct ixgbe_fcoe fcoe;
354#endif /* IXGBE_FCOE */ 375#endif /* IXGBE_FCOE */
@@ -361,6 +382,7 @@ enum ixbge_state_t {
361 __IXGBE_TESTING, 382 __IXGBE_TESTING,
362 __IXGBE_RESETTING, 383 __IXGBE_RESETTING,
363 __IXGBE_DOWN, 384 __IXGBE_DOWN,
385 __IXGBE_FDIR_INIT_DONE,
364 __IXGBE_SFP_MODULE_NOT_FOUND 386 __IXGBE_SFP_MODULE_NOT_FOUND
365}; 387};
366 388
@@ -393,7 +415,63 @@ extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
393extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 415extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
394extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 416extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
395extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 417extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
396extern void ixgbe_write_eitr(struct ixgbe_adapter *, int, u32); 418extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
419extern int ethtool_ioctl(struct ifreq *ifr);
420extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
421extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
422extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
423extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
424 struct ixgbe_atr_input *input,
425 u8 queue);
426extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
427 struct ixgbe_atr_input *input,
428 u16 soft_id,
429 u8 queue);
430extern u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, u32 key);
431extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
432 u16 vlan_id);
433extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
434 u32 src_addr);
435extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
436 u32 dst_addr);
437extern s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
438 u32 src_addr_1, u32 src_addr_2,
439 u32 src_addr_3, u32 src_addr_4);
440extern s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
441 u32 dst_addr_1, u32 dst_addr_2,
442 u32 dst_addr_3, u32 dst_addr_4);
443extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
444 u16 src_port);
445extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
446 u16 dst_port);
447extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
448 u16 flex_byte);
449extern s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
450 u8 vm_pool);
451extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
452 u8 l4type);
453extern s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input,
454 u16 *vlan_id);
455extern s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
456 u32 *src_addr);
457extern s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
458 u32 *dst_addr);
459extern s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
460 u32 *src_addr_1, u32 *src_addr_2,
461 u32 *src_addr_3, u32 *src_addr_4);
462extern s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
463 u32 *dst_addr_1, u32 *dst_addr_2,
464 u32 *dst_addr_3, u32 *dst_addr_4);
465extern s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
466 u16 *src_port);
467extern s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
468 u16 *dst_port);
469extern s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
470 u16 *flex_byte);
471extern s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input,
472 u8 *vm_pool);
473extern s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
474 u8 *l4type);
397#ifdef IXGBE_FCOE 475#ifdef IXGBE_FCOE
398extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 476extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
399extern int ixgbe_fso(struct ixgbe_adapter *adapter, 477extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 88e8350aa786..b9923047ce11 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -293,6 +293,17 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
293 u32 rmcs_reg; 293 u32 rmcs_reg;
294 u32 reg; 294 u32 reg;
295 295
296#ifdef CONFIG_DCB
297 if (hw->fc.requested_mode == ixgbe_fc_pfc)
298 goto out;
299
300#endif /* CONFIG_DCB */
301 /* Negotiate the fc mode to use */
302 ret_val = ixgbe_fc_autoneg(hw);
303 if (ret_val)
304 goto out;
305
306 /* Disable any previous flow control settings */
296 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 307 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
297 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); 308 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
298 309
@@ -304,14 +315,20 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
304 * 0: Flow control is completely disabled 315 * 0: Flow control is completely disabled
305 * 1: Rx flow control is enabled (we can receive pause frames, 316 * 1: Rx flow control is enabled (we can receive pause frames,
306 * but not send pause frames). 317 * but not send pause frames).
307 * 2: Tx flow control is enabled (we can send pause frames but 318 * 2: Tx flow control is enabled (we can send pause frames but
308 * we do not support receiving pause frames). 319 * we do not support receiving pause frames).
309 * 3: Both Rx and Tx flow control (symmetric) are enabled. 320 * 3: Both Rx and Tx flow control (symmetric) are enabled.
310 * other: Invalid. 321 * other: Invalid.
322#ifdef CONFIG_DCB
323 * 4: Priority Flow Control is enabled.
324#endif
311 */ 325 */
312 switch (hw->fc.current_mode) { 326 switch (hw->fc.current_mode) {
313 case ixgbe_fc_none: 327 case ixgbe_fc_none:
314 /* Flow control completely disabled by software override. */ 328 /*
329 * Flow control is disabled by software override or autoneg.
330 * The code below will actually disable it in the HW.
331 */
315 break; 332 break;
316 case ixgbe_fc_rx_pause: 333 case ixgbe_fc_rx_pause:
317 /* 334 /*
@@ -336,6 +353,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
336 fctrl_reg |= IXGBE_FCTRL_RFCE; 353 fctrl_reg |= IXGBE_FCTRL_RFCE;
337 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 354 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
338 break; 355 break;
356#ifdef CONFIG_DCB
357 case ixgbe_fc_pfc:
358 goto out;
359 break;
360#endif /* CONFIG_DCB */
339 default: 361 default:
340 hw_dbg(hw, "Flow control param set incorrectly\n"); 362 hw_dbg(hw, "Flow control param set incorrectly\n");
341 ret_val = -IXGBE_ERR_CONFIG; 363 ret_val = -IXGBE_ERR_CONFIG;
@@ -343,7 +365,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
343 break; 365 break;
344 } 366 }
345 367
346 /* Enable 802.3x based flow control settings. */ 368 /* Set 802.3x based flow control settings. */
347 fctrl_reg |= IXGBE_FCTRL_DPF; 369 fctrl_reg |= IXGBE_FCTRL_DPF;
348 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 370 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
349 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 371 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
@@ -377,79 +399,6 @@ out:
377} 399}
378 400
379/** 401/**
380 * ixgbe_setup_fc_82598 - Configure flow control settings
381 * @hw: pointer to hardware structure
382 * @packetbuf_num: packet buffer number (0-7)
383 *
384 * Configures the flow control settings based on SW configuration. This
385 * function is used for 802.3x flow control configuration only.
386 **/
387static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
388{
389 s32 ret_val = 0;
390 ixgbe_link_speed speed;
391 bool link_up;
392
393 /* Validate the packetbuf configuration */
394 if (packetbuf_num < 0 || packetbuf_num > 7) {
395 hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
396 " 0-7\n", packetbuf_num);
397 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
398 goto out;
399 }
400
401 /*
402 * Validate the water mark configuration. Zero water marks are invalid
403 * because it causes the controller to just blast out fc packets.
404 */
405 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
406 if (hw->fc.requested_mode != ixgbe_fc_none) {
407 hw_dbg(hw, "Invalid water mark configuration\n");
408 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
409 goto out;
410 }
411 }
412
413 /*
414 * Validate the requested mode. Strict IEEE mode does not allow
415 * ixgbe_fc_rx_pause because it will cause testing anomalies.
416 */
417 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
418 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
419 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
420 goto out;
421 }
422
423 /*
424 * 10gig parts do not have a word in the EEPROM to determine the
425 * default flow control setting, so we explicitly set it to full.
426 */
427 if (hw->fc.requested_mode == ixgbe_fc_default)
428 hw->fc.requested_mode = ixgbe_fc_full;
429
430 /*
431 * Save off the requested flow control mode for use later. Depending
432 * on the link partner's capabilities, we may or may not use this mode.
433 */
434
435 hw->fc.current_mode = hw->fc.requested_mode;
436
437 /* Decide whether to use autoneg or not. */
438 hw->mac.ops.check_link(hw, &speed, &link_up, false);
439 if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber &&
440 (speed == IXGBE_LINK_SPEED_1GB_FULL))
441 ret_val = ixgbe_fc_autoneg(hw);
442
443 if (ret_val)
444 goto out;
445
446 ret_val = ixgbe_fc_enable_82598(hw, packetbuf_num);
447
448out:
449 return ret_val;
450}
451
452/**
453 * ixgbe_setup_mac_link_82598 - Configures MAC link settings 402 * ixgbe_setup_mac_link_82598 - Configures MAC link settings
454 * @hw: pointer to hardware structure 403 * @hw: pointer to hardware structure
455 * 404 *
@@ -488,13 +437,6 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
488 } 437 }
489 } 438 }
490 439
491 /*
492 * We want to save off the original Flow Control configuration just in
493 * case we get disconnected and then reconnected into a different hub
494 * or switch with different Flow Control capabilities.
495 */
496 ixgbe_setup_fc_82598(hw, 0);
497
498 /* Add delay to filter out noises during initial link setup */ 440 /* Add delay to filter out noises during initial link setup */
499 msleep(50); 441 msleep(50);
500 442
@@ -581,6 +523,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
581 else 523 else
582 *speed = IXGBE_LINK_SPEED_1GB_FULL; 524 *speed = IXGBE_LINK_SPEED_1GB_FULL;
583 525
526 /* if link is down, zero out the current_mode */
527 if (*link_up == false) {
528 hw->fc.current_mode = ixgbe_fc_none;
529 hw->fc.fc_was_autonegged = false;
530 }
584out: 531out:
585 return 0; 532 return 0;
586} 533}
@@ -1168,7 +1115,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1168 .disable_mc = &ixgbe_disable_mc_generic, 1115 .disable_mc = &ixgbe_disable_mc_generic,
1169 .clear_vfta = &ixgbe_clear_vfta_82598, 1116 .clear_vfta = &ixgbe_clear_vfta_82598,
1170 .set_vfta = &ixgbe_set_vfta_82598, 1117 .set_vfta = &ixgbe_set_vfta_82598,
1171 .setup_fc = &ixgbe_setup_fc_82598, 1118 .fc_enable = &ixgbe_fc_enable_82598,
1172}; 1119};
1173 1120
1174static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1121static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 5d2783081a94..1984cab7d48b 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -71,10 +71,10 @@ s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw);
71s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw); 71s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw);
72s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); 72s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
73s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); 73s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
74s32 ixgbe_start_hw_rev_0_82599(struct ixgbe_hw *hw);
75s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); 74s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
76s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw); 75s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
77u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); 76u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
77static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
78 78
79void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 79void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
80{ 80{
@@ -122,10 +122,9 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
122 IXGBE_WRITE_FLUSH(hw); 122 IXGBE_WRITE_FLUSH(hw);
123 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 123 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
124 } 124 }
125 /* Now restart DSP */ 125 /* Now restart DSP by setting Restart_AN */
126 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000102); 126 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
127 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000b1d); 127 (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
128 IXGBE_WRITE_FLUSH(hw);
129 128
130 /* Release the semaphore */ 129 /* Release the semaphore */
131 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 130 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
@@ -414,9 +413,6 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw)
414 } 413 }
415 } 414 }
416 415
417 /* Set up flow control */
418 status = ixgbe_setup_fc_generic(hw, 0);
419
420 /* Add delay to filter out noises during initial link setup */ 416 /* Add delay to filter out noises during initial link setup */
421 msleep(50); 417 msleep(50);
422 418
@@ -462,11 +458,31 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
462 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 458 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
463 bool link_up = false; 459 bool link_up = false;
464 bool negotiation; 460 bool negotiation;
461 int i;
465 462
466 /* Mask off requested but non-supported speeds */ 463 /* Mask off requested but non-supported speeds */
467 hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation); 464 hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation);
468 speed &= phy_link_speed; 465 speed &= phy_link_speed;
469 466
467 /* Set autoneg_advertised value based on input link speed */
468 hw->phy.autoneg_advertised = 0;
469
470 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
471 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
472
473 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
474 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
475
476 /*
477 * When the driver changes the link speeds that it can support,
478 * it sets autotry_restart to true to indicate that we need to
479 * initiate a new autotry session with the link partner. To do
480 * so, we set the speed then disable and re-enable the tx laser, to
481 * alert the link partner that it also needs to restart autotry on its
482 * end. This is consistent with true clause 37 autoneg, which also
483 * involves a loss of signal.
484 */
485
470 /* 486 /*
471 * Try each speed one by one, highest priority first. We do this in 487 * Try each speed one by one, highest priority first. We do this in
472 * software because 10gb fiber doesn't support speed autonegotiation. 488 * software because 10gb fiber doesn't support speed autonegotiation.
@@ -475,21 +491,52 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
475 speedcnt++; 491 speedcnt++;
476 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 492 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
477 493
478 /* Set hardware SDP's */ 494 /* If we already have link at this speed, just jump out */
495 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
496
497 if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
498 goto out;
499
500 /* Set the module link speed */
479 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 501 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
480 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 502 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
481 503
482 ixgbe_setup_mac_link_speed_82599(hw, 504 /* Allow module to change analog characteristics (1G->10G) */
483 IXGBE_LINK_SPEED_10GB_FULL, 505 msleep(40);
484 autoneg,
485 autoneg_wait_to_complete);
486 506
487 msleep(50); 507 status = ixgbe_setup_mac_link_speed_82599(hw,
488 508 IXGBE_LINK_SPEED_10GB_FULL,
489 /* If we have link, just jump out */ 509 autoneg,
490 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 510 autoneg_wait_to_complete);
491 if (link_up) 511 if (status != 0)
492 goto out; 512 goto out;
513
514 /* Flap the tx laser if it has not already been done */
515 if (hw->mac.autotry_restart) {
516 /* Disable tx laser; allow 100us to go dark per spec */
517 esdp_reg |= IXGBE_ESDP_SDP3;
518 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
519 udelay(100);
520
521 /* Enable tx laser; allow 2ms to light up per spec */
522 esdp_reg &= ~IXGBE_ESDP_SDP3;
523 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
524 msleep(2);
525
526 hw->mac.autotry_restart = false;
527 }
528
529 /* The controller may take up to 500ms at 10g to acquire link */
530 for (i = 0; i < 5; i++) {
531 /* Wait for the link partner to also set speed */
532 msleep(100);
533
534 /* If we have link, just jump out */
535 hw->mac.ops.check_link(hw, &phy_link_speed,
536 &link_up, false);
537 if (link_up)
538 goto out;
539 }
493 } 540 }
494 541
495 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 542 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
@@ -497,16 +544,44 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
497 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 544 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
498 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 545 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
499 546
500 /* Set hardware SDP's */ 547 /* If we already have link at this speed, just jump out */
548 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
549
550 if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
551 goto out;
552
553 /* Set the module link speed */
501 esdp_reg &= ~IXGBE_ESDP_SDP5; 554 esdp_reg &= ~IXGBE_ESDP_SDP5;
502 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 555 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
503 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 556 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
504 557
505 ixgbe_setup_mac_link_speed_82599( 558 /* Allow module to change analog characteristics (10G->1G) */
506 hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg, 559 msleep(40);
507 autoneg_wait_to_complete);
508 560
509 msleep(50); 561 status = ixgbe_setup_mac_link_speed_82599(hw,
562 IXGBE_LINK_SPEED_1GB_FULL,
563 autoneg,
564 autoneg_wait_to_complete);
565 if (status != 0)
566 goto out;
567
568 /* Flap the tx laser if it has not already been done */
569 if (hw->mac.autotry_restart) {
570 /* Disable tx laser; allow 100us to go dark per spec */
571 esdp_reg |= IXGBE_ESDP_SDP3;
572 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
573 udelay(100);
574
575 /* Enable tx laser; allow 2ms to light up per spec */
576 esdp_reg &= ~IXGBE_ESDP_SDP3;
577 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
578 msleep(2);
579
580 hw->mac.autotry_restart = false;
581 }
582
583 /* Wait for the link partner to also set speed */
584 msleep(100);
510 585
511 /* If we have link, just jump out */ 586 /* If we have link, just jump out */
512 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 587 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
@@ -572,6 +647,11 @@ s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
572 else 647 else
573 *speed = IXGBE_LINK_SPEED_100_FULL; 648 *speed = IXGBE_LINK_SPEED_100_FULL;
574 649
650 /* if link is down, zero out the current_mode */
651 if (*link_up == false) {
652 hw->fc.current_mode = ixgbe_fc_none;
653 hw->fc.fc_was_autonegged = false;
654 }
575 655
576 return 0; 656 return 0;
577} 657}
@@ -592,6 +672,7 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
592 s32 status = 0; 672 s32 status = 0;
593 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 673 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
594 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 674 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
675 u32 start_autoc = autoc;
595 u32 orig_autoc = 0; 676 u32 orig_autoc = 0;
596 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 677 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
597 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 678 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
@@ -604,6 +685,11 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
604 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); 685 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
605 speed &= link_capabilities; 686 speed &= link_capabilities;
606 687
688 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
689 status = IXGBE_ERR_LINK_SETUP;
690 goto out;
691 }
692
607 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 693 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
608 if (hw->mac.orig_link_settings_stored) 694 if (hw->mac.orig_link_settings_stored)
609 orig_autoc = hw->mac.orig_autoc; 695 orig_autoc = hw->mac.orig_autoc;
@@ -611,11 +697,9 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
611 orig_autoc = autoc; 697 orig_autoc = autoc;
612 698
613 699
614 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 700 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
615 status = IXGBE_ERR_LINK_SETUP; 701 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
616 } else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 702 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
617 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
618 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
619 /* Set KX4/KX/KR support according to speed requested */ 703 /* Set KX4/KX/KR support according to speed requested */
620 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 704 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
621 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 705 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
@@ -647,7 +731,7 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
647 } 731 }
648 } 732 }
649 733
650 if (status == 0) { 734 if (autoc != start_autoc) {
651 /* Restart link */ 735 /* Restart link */
652 autoc |= IXGBE_AUTOC_AN_RESTART; 736 autoc |= IXGBE_AUTOC_AN_RESTART;
653 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 737 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
@@ -674,13 +758,11 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
674 } 758 }
675 } 759 }
676 760
677 /* Set up flow control */
678 status = ixgbe_setup_fc_generic(hw, 0);
679
680 /* Add delay to filter out noises during initial link setup */ 761 /* Add delay to filter out noises during initial link setup */
681 msleep(50); 762 msleep(50);
682 } 763 }
683 764
765out:
684 return status; 766 return status;
685} 767}
686 768
@@ -1083,6 +1165,931 @@ s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
1083} 1165}
1084 1166
1085/** 1167/**
1168 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1169 * @hw: pointer to hardware structure
1170 **/
1171s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1172{
1173 int i;
1174 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1175 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1176
1177 /*
1178 * Before starting reinitialization process,
1179 * FDIRCMD.CMD must be zero.
1180 */
1181 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1182 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1183 IXGBE_FDIRCMD_CMD_MASK))
1184 break;
1185 udelay(10);
1186 }
1187 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1188 hw_dbg(hw ,"Flow Director previous command isn't complete, "
1189 "aborting table re-initialization. \n");
1190 return IXGBE_ERR_FDIR_REINIT_FAILED;
1191 }
1192
1193 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1194 IXGBE_WRITE_FLUSH(hw);
1195 /*
1196 * 82599 adapters flow director init flow cannot be restarted,
1197 * Workaround 82599 silicon errata by performing the following steps
1198 * before re-writing the FDIRCTRL control register with the same value.
1199 * - write 1 to bit 8 of FDIRCMD register &
1200 * - write 0 to bit 8 of FDIRCMD register
1201 */
1202 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1203 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1204 IXGBE_FDIRCMD_CLEARHT));
1205 IXGBE_WRITE_FLUSH(hw);
1206 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1207 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1208 ~IXGBE_FDIRCMD_CLEARHT));
1209 IXGBE_WRITE_FLUSH(hw);
1210 /*
1211 * Clear FDIR Hash register to clear any leftover hashes
1212 * waiting to be programmed.
1213 */
1214 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1215 IXGBE_WRITE_FLUSH(hw);
1216
1217 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1218 IXGBE_WRITE_FLUSH(hw);
1219
1220 /* Poll init-done after we write FDIRCTRL register */
1221 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1222 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1223 IXGBE_FDIRCTRL_INIT_DONE)
1224 break;
1225 udelay(10);
1226 }
1227 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1228 hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
1229 return IXGBE_ERR_FDIR_REINIT_FAILED;
1230 }
1231
1232 /* Clear FDIR statistics registers (read to clear) */
1233 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1234 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1235 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1236 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1237 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1238
1239 return 0;
1240}
1241
1242/**
1243 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1244 * @hw: pointer to hardware structure
1245 * @pballoc: which mode to allocate filters with
1246 **/
1247s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1248{
1249 u32 fdirctrl = 0;
1250 u32 pbsize;
1251 int i;
1252
1253 /*
1254 * Before enabling Flow Director, the Rx Packet Buffer size
1255 * must be reduced. The new value is the current size minus
1256 * flow director memory usage size.
1257 */
1258 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1259 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1260 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1261
1262 /*
1263 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1264 * intialized to zero for non DCB mode otherwise actual total RX PB
1265 * would be bigger than programmed and filter space would run into
1266 * the PB 0 region.
1267 */
1268 for (i = 1; i < 8; i++)
1269 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1270
1271 /* Send interrupt when 64 filters are left */
1272 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1273
1274 /* Set the maximum length per hash bucket to 0xA filters */
1275 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1276
1277 switch (pballoc) {
1278 case IXGBE_FDIR_PBALLOC_64K:
1279 /* 8k - 1 signature filters */
1280 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1281 break;
1282 case IXGBE_FDIR_PBALLOC_128K:
1283 /* 16k - 1 signature filters */
1284 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1285 break;
1286 case IXGBE_FDIR_PBALLOC_256K:
1287 /* 32k - 1 signature filters */
1288 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1289 break;
1290 default:
1291 /* bad value */
1292 return IXGBE_ERR_CONFIG;
1293 };
1294
1295 /* Move the flexible bytes to use the ethertype - shift 6 words */
1296 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1297
1298 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1299
1300 /* Prime the keys for hashing */
1301 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1302 htonl(IXGBE_ATR_BUCKET_HASH_KEY));
1303 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1304 htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
1305
1306 /*
1307 * Poll init-done after we write the register. Estimated times:
1308 * 10G: PBALLOC = 11b, timing is 60us
1309 * 1G: PBALLOC = 11b, timing is 600us
1310 * 100M: PBALLOC = 11b, timing is 6ms
1311 *
1312 * Multiple these timings by 4 if under full Rx load
1313 *
1314 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1315 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1316 * this might not finish in our poll time, but we can live with that
1317 * for now.
1318 */
1319 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1320 IXGBE_WRITE_FLUSH(hw);
1321 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1322 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1323 IXGBE_FDIRCTRL_INIT_DONE)
1324 break;
1325 msleep(1);
1326 }
1327 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1328 hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
1329
1330 return 0;
1331}
1332
1333/**
1334 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1335 * @hw: pointer to hardware structure
1336 * @pballoc: which mode to allocate filters with
1337 **/
1338s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1339{
1340 u32 fdirctrl = 0;
1341 u32 pbsize;
1342 int i;
1343
1344 /*
1345 * Before enabling Flow Director, the Rx Packet Buffer size
1346 * must be reduced. The new value is the current size minus
1347 * flow director memory usage size.
1348 */
1349 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1350 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1351 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1352
1353 /*
1354 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1355 * intialized to zero for non DCB mode otherwise actual total RX PB
1356 * would be bigger than programmed and filter space would run into
1357 * the PB 0 region.
1358 */
1359 for (i = 1; i < 8; i++)
1360 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1361
1362 /* Send interrupt when 64 filters are left */
1363 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1364
1365 switch (pballoc) {
1366 case IXGBE_FDIR_PBALLOC_64K:
1367 /* 2k - 1 perfect filters */
1368 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1369 break;
1370 case IXGBE_FDIR_PBALLOC_128K:
1371 /* 4k - 1 perfect filters */
1372 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1373 break;
1374 case IXGBE_FDIR_PBALLOC_256K:
1375 /* 8k - 1 perfect filters */
1376 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1377 break;
1378 default:
1379 /* bad value */
1380 return IXGBE_ERR_CONFIG;
1381 };
1382
1383 /* Turn perfect match filtering on */
1384 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1385 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1386
1387 /* Move the flexible bytes to use the ethertype - shift 6 words */
1388 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1389
1390 /* Prime the keys for hashing */
1391 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
1392 htonl(IXGBE_ATR_BUCKET_HASH_KEY));
1393 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1394 htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
1395
1396 /*
1397 * Poll init-done after we write the register. Estimated times:
1398 * 10G: PBALLOC = 11b, timing is 60us
1399 * 1G: PBALLOC = 11b, timing is 600us
1400 * 100M: PBALLOC = 11b, timing is 6ms
1401 *
1402 * Multiple these timings by 4 if under full Rx load
1403 *
1404 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1405 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1406 * this might not finish in our poll time, but we can live with that
1407 * for now.
1408 */
1409
1410 /* Set the maximum length per hash bucket to 0xA filters */
1411 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1412
1413 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1414 IXGBE_WRITE_FLUSH(hw);
1415 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1416 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1417 IXGBE_FDIRCTRL_INIT_DONE)
1418 break;
1419 msleep(1);
1420 }
1421 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1422 hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
1423
1424 return 0;
1425}
1426
1427
1428/**
1429 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
1430 * @stream: input bitstream to compute the hash on
1431 * @key: 32-bit hash key
1432 **/
1433u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
1434{
1435 /*
1436 * The algorithm is as follows:
1437 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1438 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1439 * and A[n] x B[n] is bitwise AND between same length strings
1440 *
1441 * K[n] is 16 bits, defined as:
1442 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1443 * for n modulo 32 < 15, K[n] =
1444 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1445 *
1446 * S[n] is 16 bits, defined as:
1447 * for n >= 15, S[n] = S[n:n - 15]
1448 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1449 *
1450 * To simplify for programming, the algorithm is implemented
1451 * in software this way:
1452 *
1453 * Key[31:0], Stream[335:0]
1454 *
1455 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
1456 * int_key[350:0] = tmp_key[351:1]
1457 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1458 *
1459 * hash[15:0] = 0;
1460 * for (i = 0; i < 351; i++) {
1461 * if (int_key[i])
1462 * hash ^= int_stream[(i + 15):i];
1463 * }
1464 */
1465
1466 union {
1467 u64 fill[6];
1468 u32 key[11];
1469 u8 key_stream[44];
1470 } tmp_key;
1471
1472 u8 *stream = (u8 *)atr_input;
1473 u8 int_key[44]; /* upper-most bit unused */
1474 u8 hash_str[46]; /* upper-most 2 bits unused */
1475 u16 hash_result = 0;
1476 int i, j, k, h;
1477
1478 /*
1479 * Initialize the fill member to prevent warnings
1480 * on some compilers
1481 */
1482 tmp_key.fill[0] = 0;
1483
1484 /* First load the temporary key stream */
1485 for (i = 0; i < 6; i++) {
1486 u64 fillkey = ((u64)key << 32) | key;
1487 tmp_key.fill[i] = fillkey;
1488 }
1489
1490 /*
1491 * Set the interim key for the hashing. Bit 352 is unused, so we must
1492 * shift and compensate when building the key.
1493 */
1494
1495 int_key[0] = tmp_key.key_stream[0] >> 1;
1496 for (i = 1, j = 0; i < 44; i++) {
1497 unsigned int this_key = tmp_key.key_stream[j] << 7;
1498 j++;
1499 int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
1500 }
1501
1502 /*
1503 * Set the interim bit string for the hashing. Bits 368 and 367 are
1504 * unused, so shift and compensate when building the string.
1505 */
1506 hash_str[0] = (stream[40] & 0x7f) >> 1;
1507 for (i = 1, j = 40; i < 46; i++) {
1508 unsigned int this_str = stream[j] << 7;
1509 j++;
1510 if (j > 41)
1511 j = 0;
1512 hash_str[i] = (u8)(this_str | (stream[j] >> 1));
1513 }
1514
1515 /*
1516 * Now compute the hash. i is the index into hash_str, j is into our
1517 * key stream, k is counting the number of bits, and h interates within
1518 * each byte.
1519 */
1520 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
1521 for (h = 0; h < 8 && k < 351; h++, k++) {
1522 if (int_key[j] & (1 << h)) {
1523 /*
1524 * Key bit is set, XOR in the current 16-bit
1525 * string. Example of processing:
1526 * h = 0,
1527 * tmp = (hash_str[i - 2] & 0 << 16) |
1528 * (hash_str[i - 1] & 0xff << 8) |
1529 * (hash_str[i] & 0xff >> 0)
1530 * So tmp = hash_str[15 + k:k], since the
1531 * i + 2 clause rolls off the 16-bit value
1532 * h = 7,
1533 * tmp = (hash_str[i - 2] & 0x7f << 9) |
1534 * (hash_str[i - 1] & 0xff << 1) |
1535 * (hash_str[i] & 0x80 >> 7)
1536 */
1537 int tmp = (hash_str[i] >> h);
1538 tmp |= (hash_str[i - 1] << (8 - h));
1539 tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
1540 << (16 - h);
1541 hash_result ^= (u16)tmp;
1542 }
1543 }
1544 }
1545
1546 return hash_result;
1547}
1548
1549/**
1550 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1551 * @input: input stream to modify
1552 * @vlan: the VLAN id to load
1553 **/
1554s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1555{
1556 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1557 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1558
1559 return 0;
1560}
1561
1562/**
1563 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1564 * @input: input stream to modify
1565 * @src_addr: the IP address to load
1566 **/
1567s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1568{
1569 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1570 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1571 (src_addr >> 16) & 0xff;
1572 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1573 (src_addr >> 8) & 0xff;
1574 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1575
1576 return 0;
1577}
1578
1579/**
1580 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1581 * @input: input stream to modify
1582 * @dst_addr: the IP address to load
1583 **/
1584s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1585{
1586 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1587 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1588 (dst_addr >> 16) & 0xff;
1589 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1590 (dst_addr >> 8) & 0xff;
1591 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1592
1593 return 0;
1594}
1595
1596/**
1597 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
1598 * @input: input stream to modify
1599 * @src_addr_1: the first 4 bytes of the IP address to load
1600 * @src_addr_2: the second 4 bytes of the IP address to load
1601 * @src_addr_3: the third 4 bytes of the IP address to load
1602 * @src_addr_4: the fourth 4 bytes of the IP address to load
1603 **/
1604s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1605 u32 src_addr_1, u32 src_addr_2,
1606 u32 src_addr_3, u32 src_addr_4)
1607{
1608 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
1609 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
1610 (src_addr_4 >> 8) & 0xff;
1611 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
1612 (src_addr_4 >> 16) & 0xff;
1613 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
1614
1615 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
1616 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
1617 (src_addr_3 >> 8) & 0xff;
1618 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
1619 (src_addr_3 >> 16) & 0xff;
1620 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
1621
1622 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
1623 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
1624 (src_addr_2 >> 8) & 0xff;
1625 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
1626 (src_addr_2 >> 16) & 0xff;
1627 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
1628
1629 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
1630 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
1631 (src_addr_1 >> 8) & 0xff;
1632 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
1633 (src_addr_1 >> 16) & 0xff;
1634 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
1635
1636 return 0;
1637}
1638
1639/**
1640 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
1641 * @input: input stream to modify
1642 * @dst_addr_1: the first 4 bytes of the IP address to load
1643 * @dst_addr_2: the second 4 bytes of the IP address to load
1644 * @dst_addr_3: the third 4 bytes of the IP address to load
1645 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1646 **/
1647s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
1648 u32 dst_addr_1, u32 dst_addr_2,
1649 u32 dst_addr_3, u32 dst_addr_4)
1650{
1651 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
1652 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
1653 (dst_addr_4 >> 8) & 0xff;
1654 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
1655 (dst_addr_4 >> 16) & 0xff;
1656 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
1657
1658 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
1659 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
1660 (dst_addr_3 >> 8) & 0xff;
1661 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
1662 (dst_addr_3 >> 16) & 0xff;
1663 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
1664
1665 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
1666 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
1667 (dst_addr_2 >> 8) & 0xff;
1668 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
1669 (dst_addr_2 >> 16) & 0xff;
1670 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
1671
1672 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
1673 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
1674 (dst_addr_1 >> 8) & 0xff;
1675 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
1676 (dst_addr_1 >> 16) & 0xff;
1677 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
1678
1679 return 0;
1680}
1681
1682/**
1683 * ixgbe_atr_set_src_port_82599 - Sets the source port
1684 * @input: input stream to modify
1685 * @src_port: the source port to load
1686 **/
1687s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1688{
1689 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1690 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1691
1692 return 0;
1693}
1694
1695/**
1696 * ixgbe_atr_set_dst_port_82599 - Sets the destination port
1697 * @input: input stream to modify
1698 * @dst_port: the destination port to load
1699 **/
1700s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1701{
1702 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1703 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1704
1705 return 0;
1706}
1707
1708/**
1709 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1710 * @input: input stream to modify
1711 * @flex_bytes: the flexible bytes to load
1712 **/
1713s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1714{
1715 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1716 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1717
1718 return 0;
1719}
1720
1721/**
1722 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
1723 * @input: input stream to modify
1724 * @vm_pool: the Virtual Machine pool to load
1725 **/
1726s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
1727{
1728 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
1729
1730 return 0;
1731}
1732
1733/**
1734 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1735 * @input: input stream to modify
1736 * @l4type: the layer 4 type value to load
1737 **/
1738s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1739{
1740 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1741
1742 return 0;
1743}
1744
1745/**
1746 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1747 * @input: input stream to search
1748 * @vlan: the VLAN id to load
1749 **/
1750s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1751{
1752 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1753 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1754
1755 return 0;
1756}
1757
1758/**
1759 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1760 * @input: input stream to search
1761 * @src_addr: the IP address to load
1762 **/
1763s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
1764{
1765 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1766 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1767 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1768 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1769
1770 return 0;
1771}
1772
1773/**
1774 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
1775 * @input: input stream to search
1776 * @dst_addr: the IP address to load
1777 **/
1778s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
1779{
1780 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
1781 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
1782 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
1783 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
1784
1785 return 0;
1786}
1787
1788/**
1789 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
1790 * @input: input stream to search
1791 * @src_addr_1: the first 4 bytes of the IP address to load
1792 * @src_addr_2: the second 4 bytes of the IP address to load
1793 * @src_addr_3: the third 4 bytes of the IP address to load
1794 * @src_addr_4: the fourth 4 bytes of the IP address to load
1795 **/
1796s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
1797 u32 *src_addr_1, u32 *src_addr_2,
1798 u32 *src_addr_3, u32 *src_addr_4)
1799{
1800 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
1801 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
1802 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
1803 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
1804
1805 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
1806 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
1807 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
1808 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
1809
1810 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
1811 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1812 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1813 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1814
1815 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1816 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1817 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1818 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1819
1820 return 0;
1821}
1822
1823/**
1824 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
1825 * @input: input stream to search
1826 * @dst_addr_1: the first 4 bytes of the IP address to load
1827 * @dst_addr_2: the second 4 bytes of the IP address to load
1828 * @dst_addr_3: the third 4 bytes of the IP address to load
1829 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1830 **/
1831s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
1832 u32 *dst_addr_1, u32 *dst_addr_2,
1833 u32 *dst_addr_3, u32 *dst_addr_4)
1834{
1835 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
1836 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
1837 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
1838 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
1839
1840 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
1841 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
1842 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
1843 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
1844
1845 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
1846 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
1847 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
1848 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
1849
1850 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
1851 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
1852 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
1853 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
1854
1855 return 0;
1856}
1857
1858/**
1859 * ixgbe_atr_get_src_port_82599 - Gets the source port
1860 * @input: input stream to modify
1861 * @src_port: the source port to load
1862 *
1863 * Even though the input is given in big-endian, the FDIRPORT registers
1864 * expect the ports to be programmed in little-endian. Hence the need to swap
1865 * endianness when retrieving the data. This can be confusing since the
1866 * internal hash engine expects it to be big-endian.
1867 **/
1868s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
1869{
1870 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
1871 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
1872
1873 return 0;
1874}
1875
1876/**
1877 * ixgbe_atr_get_dst_port_82599 - Gets the destination port
1878 * @input: input stream to modify
1879 * @dst_port: the destination port to load
1880 *
1881 * Even though the input is given in big-endian, the FDIRPORT registers
1882 * expect the ports to be programmed in little-endian. Hence the need to swap
1883 * endianness when retrieving the data. This can be confusing since the
1884 * internal hash engine expects it to be big-endian.
1885 **/
1886s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
1887{
1888 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
1889 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
1890
1891 return 0;
1892}
1893
1894/**
1895 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
1896 * @input: input stream to modify
1897 * @flex_bytes: the flexible bytes to load
1898 **/
1899s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
1900{
1901 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
1902 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
1903
1904 return 0;
1905}
1906
1907/**
1908 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
1909 * @input: input stream to modify
1910 * @vm_pool: the Virtual Machine pool to load
1911 **/
1912s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
1913{
1914 *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
1915
1916 return 0;
1917}
1918
1919/**
1920 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
1921 * @input: input stream to modify
1922 * @l4type: the layer 4 type value to load
1923 **/
1924s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
1925{
1926 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
1927
1928 return 0;
1929}
1930
1931/**
1932 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1933 * @hw: pointer to hardware structure
1934 * @stream: input bitstream
1935 * @queue: queue index to direct traffic to
1936 **/
1937s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1938 struct ixgbe_atr_input *input,
1939 u8 queue)
1940{
1941 u64 fdirhashcmd;
1942 u64 fdircmd;
1943 u32 fdirhash;
1944 u16 bucket_hash, sig_hash;
1945 u8 l4type;
1946
1947 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1948 IXGBE_ATR_BUCKET_HASH_KEY);
1949
1950 /* bucket_hash is only 15 bits */
1951 bucket_hash &= IXGBE_ATR_HASH_MASK;
1952
1953 sig_hash = ixgbe_atr_compute_hash_82599(input,
1954 IXGBE_ATR_SIGNATURE_HASH_KEY);
1955
1956 /* Get the l4type in order to program FDIRCMD properly */
1957 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
1958 ixgbe_atr_get_l4type_82599(input, &l4type);
1959
1960 /*
1961 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1962 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1963 */
1964 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1965
1966 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1967 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
1968
1969 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1970 case IXGBE_ATR_L4TYPE_TCP:
1971 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1972 break;
1973 case IXGBE_ATR_L4TYPE_UDP:
1974 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1975 break;
1976 case IXGBE_ATR_L4TYPE_SCTP:
1977 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1978 break;
1979 default:
1980 hw_dbg(hw, "Error on l4type input\n");
1981 return IXGBE_ERR_CONFIG;
1982 }
1983
1984 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
1985 fdircmd |= IXGBE_FDIRCMD_IPV6;
1986
1987 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
1988 fdirhashcmd = ((fdircmd << 32) | fdirhash);
1989
1990 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1991
1992 return 0;
1993}
1994
1995/**
1996 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1997 * @hw: pointer to hardware structure
1998 * @input: input bitstream
1999 * @queue: queue index to direct traffic to
2000 *
2001 * Note that the caller to this function must lock before calling, since the
2002 * hardware writes must be protected from one another.
2003 **/
2004s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2005 struct ixgbe_atr_input *input,
2006 u16 soft_id,
2007 u8 queue)
2008{
2009 u32 fdircmd = 0;
2010 u32 fdirhash;
2011 u32 src_ipv4, dst_ipv4;
2012 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
2013 u16 src_port, dst_port, vlan_id, flex_bytes;
2014 u16 bucket_hash;
2015 u8 l4type;
2016
2017 /* Get our input values */
2018 ixgbe_atr_get_l4type_82599(input, &l4type);
2019
2020 /*
2021 * Check l4type formatting, and bail out before we touch the hardware
2022 * if there's a configuration issue
2023 */
2024 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
2025 case IXGBE_ATR_L4TYPE_TCP:
2026 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
2027 break;
2028 case IXGBE_ATR_L4TYPE_UDP:
2029 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
2030 break;
2031 case IXGBE_ATR_L4TYPE_SCTP:
2032 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
2033 break;
2034 default:
2035 hw_dbg(hw, "Error on l4type input\n");
2036 return IXGBE_ERR_CONFIG;
2037 }
2038
2039 bucket_hash = ixgbe_atr_compute_hash_82599(input,
2040 IXGBE_ATR_BUCKET_HASH_KEY);
2041
2042 /* bucket_hash is only 15 bits */
2043 bucket_hash &= IXGBE_ATR_HASH_MASK;
2044
2045 ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
2046 ixgbe_atr_get_src_port_82599(input, &src_port);
2047 ixgbe_atr_get_dst_port_82599(input, &dst_port);
2048 ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
2049
2050 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
2051
2052 /* Now figure out if we're IPv4 or IPv6 */
2053 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
2054 /* IPv6 */
2055 ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
2056 &src_ipv6_3, &src_ipv6_4);
2057
2058 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
2059 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
2060 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
2061 /* The last 4 bytes is the same register as IPv4 */
2062 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
2063
2064 fdircmd |= IXGBE_FDIRCMD_IPV6;
2065 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
2066 } else {
2067 /* IPv4 */
2068 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
2069 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
2070
2071 }
2072
2073 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
2074 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
2075
2076 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
2077 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
2078 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
2079 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
2080
2081 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
2082 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
2083 fdircmd |= IXGBE_FDIRCMD_LAST;
2084 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
2085 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
2086
2087 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2088 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
2089
2090 return 0;
2091}
2092/**
1086 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 2093 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1087 * @hw: pointer to hardware structure 2094 * @hw: pointer to hardware structure
1088 * @reg: analog register to read 2095 * @reg: analog register to read
@@ -1135,8 +2142,9 @@ s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
1135s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) 2142s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1136{ 2143{
1137 u32 q_num; 2144 u32 q_num;
2145 s32 ret_val;
1138 2146
1139 ixgbe_start_hw_generic(hw); 2147 ret_val = ixgbe_start_hw_generic(hw);
1140 2148
1141 /* Clear the rate limiters */ 2149 /* Clear the rate limiters */
1142 for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) { 2150 for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) {
@@ -1145,7 +2153,13 @@ s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1145 } 2153 }
1146 IXGBE_WRITE_FLUSH(hw); 2154 IXGBE_WRITE_FLUSH(hw);
1147 2155
1148 return 0; 2156 /* We need to run link autotry after the driver loads */
2157 hw->mac.autotry_restart = true;
2158
2159 if (ret_val == 0)
2160 ret_val = ixgbe_verify_fw_version_82599(hw);
2161
2162 return ret_val;
1149} 2163}
1150 2164
1151/** 2165/**
@@ -1397,6 +2411,54 @@ san_mac_addr_out:
1397 return 0; 2411 return 0;
1398} 2412}
1399 2413
2414/**
2415 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2416 * @hw: pointer to hardware structure
2417 *
2418 * Verifies that installed the firmware version is 0.6 or higher
2419 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2420 *
2421 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2422 * if the FW version is not supported.
2423 **/
2424static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2425{
2426 s32 status = IXGBE_ERR_EEPROM_VERSION;
2427 u16 fw_offset, fw_ptp_cfg_offset;
2428 u16 fw_version = 0;
2429
2430 /* firmware check is only necessary for SFI devices */
2431 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2432 status = 0;
2433 goto fw_version_out;
2434 }
2435
2436 /* get the offset to the Firmware Module block */
2437 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2438
2439 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2440 goto fw_version_out;
2441
2442 /* get the offset to the Pass Through Patch Configuration block */
2443 hw->eeprom.ops.read(hw, (fw_offset +
2444 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2445 &fw_ptp_cfg_offset);
2446
2447 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2448 goto fw_version_out;
2449
2450 /* get the firmware version */
2451 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2452 IXGBE_FW_PATCH_VERSION_4),
2453 &fw_version);
2454
2455 if (fw_version > 0x5)
2456 status = 0;
2457
2458fw_version_out:
2459 return status;
2460}
2461
1400static struct ixgbe_mac_operations mac_ops_82599 = { 2462static struct ixgbe_mac_operations mac_ops_82599 = {
1401 .init_hw = &ixgbe_init_hw_generic, 2463 .init_hw = &ixgbe_init_hw_generic,
1402 .reset_hw = &ixgbe_reset_hw_82599, 2464 .reset_hw = &ixgbe_reset_hw_82599,
@@ -1432,7 +2494,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
1432 .disable_mc = &ixgbe_disable_mc_generic, 2494 .disable_mc = &ixgbe_disable_mc_generic,
1433 .clear_vfta = &ixgbe_clear_vfta_82599, 2495 .clear_vfta = &ixgbe_clear_vfta_82599,
1434 .set_vfta = &ixgbe_set_vfta_82599, 2496 .set_vfta = &ixgbe_set_vfta_82599,
1435 .setup_fc = &ixgbe_setup_fc_generic, 2497 .fc_enable = &ixgbe_fc_enable_generic,
1436 .init_uta_tables = &ixgbe_init_uta_tables_82599, 2498 .init_uta_tables = &ixgbe_init_uta_tables_82599,
1437 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2499 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
1438}; 2500};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 0cc3c47cb453..96a185953777 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -28,6 +28,8 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/list.h>
32#include <linux/netdevice.h>
31 33
32#include "ixgbe.h" 34#include "ixgbe.h"
33#include "ixgbe_common.h" 35#include "ixgbe_common.h"
@@ -83,6 +85,9 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
83 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 85 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
84 IXGBE_WRITE_FLUSH(hw); 86 IXGBE_WRITE_FLUSH(hw);
85 87
88 /* Setup flow control */
89 ixgbe_setup_fc(hw, 0);
90
86 /* Clear adapter stopped flag */ 91 /* Clear adapter stopped flag */
87 hw->adapter_stopped = false; 92 hw->adapter_stopped = false;
88 93
@@ -101,13 +106,17 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
101 **/ 106 **/
102s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 107s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
103{ 108{
109 s32 status;
110
104 /* Reset the hardware */ 111 /* Reset the hardware */
105 hw->mac.ops.reset_hw(hw); 112 status = hw->mac.ops.reset_hw(hw);
106 113
107 /* Start the HW */ 114 if (status == 0) {
108 hw->mac.ops.start_hw(hw); 115 /* Start the HW */
116 status = hw->mac.ops.start_hw(hw);
117 }
109 118
110 return 0; 119 return status;
111} 120}
112 121
113/** 122/**
@@ -1356,15 +1365,14 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1356 * Drivers using secondary unicast addresses must set user_set_promisc when 1365 * Drivers using secondary unicast addresses must set user_set_promisc when
1357 * manually putting the device into promiscuous mode. 1366 * manually putting the device into promiscuous mode.
1358 **/ 1367 **/
1359s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 1368s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1360 u32 addr_count, ixgbe_mc_addr_itr next) 1369 struct list_head *uc_list)
1361{ 1370{
1362 u8 *addr;
1363 u32 i; 1371 u32 i;
1364 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 1372 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1365 u32 uc_addr_in_use; 1373 u32 uc_addr_in_use;
1366 u32 fctrl; 1374 u32 fctrl;
1367 u32 vmdq; 1375 struct netdev_hw_addr *ha;
1368 1376
1369 /* 1377 /*
1370 * Clear accounting of old secondary address list, 1378 * Clear accounting of old secondary address list,
@@ -1382,10 +1390,9 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
1382 } 1390 }
1383 1391
1384 /* Add the new addresses */ 1392 /* Add the new addresses */
1385 for (i = 0; i < addr_count; i++) { 1393 list_for_each_entry(ha, uc_list, list) {
1386 hw_dbg(hw, " Adding the secondary addresses:\n"); 1394 hw_dbg(hw, " Adding the secondary addresses:\n");
1387 addr = next(hw, &addr_list, &vmdq); 1395 ixgbe_add_uc_addr(hw, ha->addr, 0);
1388 ixgbe_add_uc_addr(hw, addr, vmdq);
1389 } 1396 }
1390 1397
1391 if (hw->addr_ctrl.overflow_promisc) { 1398 if (hw->addr_ctrl.overflow_promisc) {
@@ -1577,17 +1584,16 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1577} 1584}
1578 1585
1579/** 1586/**
1580 * ixgbe_fc_enable - Enable flow control 1587 * ixgbe_fc_enable_generic - Enable flow control
1581 * @hw: pointer to hardware structure 1588 * @hw: pointer to hardware structure
1582 * @packetbuf_num: packet buffer number (0-7) 1589 * @packetbuf_num: packet buffer number (0-7)
1583 * 1590 *
1584 * Enable flow control according to the current settings. 1591 * Enable flow control according to the current settings.
1585 **/ 1592 **/
1586s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) 1593s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1587{ 1594{
1588 s32 ret_val = 0; 1595 s32 ret_val = 0;
1589 u32 mflcn_reg; 1596 u32 mflcn_reg, fccfg_reg;
1590 u32 fccfg_reg;
1591 u32 reg; 1597 u32 reg;
1592 u32 rx_pba_size; 1598 u32 rx_pba_size;
1593 1599
@@ -1596,7 +1602,12 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
1596 goto out; 1602 goto out;
1597 1603
1598#endif /* CONFIG_DCB */ 1604#endif /* CONFIG_DCB */
1605 /* Negotiate the fc mode to use */
1606 ret_val = ixgbe_fc_autoneg(hw);
1607 if (ret_val)
1608 goto out;
1599 1609
1610 /* Disable any previous flow control settings */
1600 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 1611 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
1601 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); 1612 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
1602 1613
@@ -1616,7 +1627,10 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
1616 */ 1627 */
1617 switch (hw->fc.current_mode) { 1628 switch (hw->fc.current_mode) {
1618 case ixgbe_fc_none: 1629 case ixgbe_fc_none:
1619 /* Flow control completely disabled by software override. */ 1630 /*
1631 * Flow control is disabled by software override or autoneg.
1632 * The code below will actually disable it in the HW.
1633 */
1620 break; 1634 break;
1621 case ixgbe_fc_rx_pause: 1635 case ixgbe_fc_rx_pause:
1622 /* 1636 /*
@@ -1645,7 +1659,7 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
1645 case ixgbe_fc_pfc: 1659 case ixgbe_fc_pfc:
1646 goto out; 1660 goto out;
1647 break; 1661 break;
1648#endif 1662#endif /* CONFIG_DCB */
1649 default: 1663 default:
1650 hw_dbg(hw, "Flow control param set incorrectly\n"); 1664 hw_dbg(hw, "Flow control param set incorrectly\n");
1651 ret_val = -IXGBE_ERR_CONFIG; 1665 ret_val = -IXGBE_ERR_CONFIG;
@@ -1653,7 +1667,7 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
1653 break; 1667 break;
1654 } 1668 }
1655 1669
1656 /* Enable 802.3x based flow control settings. */ 1670 /* Set 802.3x based flow control settings. */
1657 mflcn_reg |= IXGBE_MFLCN_DPF; 1671 mflcn_reg |= IXGBE_MFLCN_DPF;
1658 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 1672 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
1659 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 1673 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
@@ -1661,10 +1675,12 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
1661 reg = IXGBE_READ_REG(hw, IXGBE_MTQC); 1675 reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
1662 /* Thresholds are different for link flow control when in DCB mode */ 1676 /* Thresholds are different for link flow control when in DCB mode */
1663 if (reg & IXGBE_MTQC_RT_ENA) { 1677 if (reg & IXGBE_MTQC_RT_ENA) {
1678 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1679
1664 /* Always disable XON for LFC when in DCB mode */ 1680 /* Always disable XON for LFC when in DCB mode */
1665 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), 0); 1681 reg = (rx_pba_size >> 5) & 0xFFE0;
1682 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
1666 1683
1667 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1668 reg = (rx_pba_size >> 2) & 0xFFE0; 1684 reg = (rx_pba_size >> 2) & 0xFFE0;
1669 if (hw->fc.current_mode & ixgbe_fc_tx_pause) 1685 if (hw->fc.current_mode & ixgbe_fc_tx_pause)
1670 reg |= IXGBE_FCRTH_FCEN; 1686 reg |= IXGBE_FCRTH_FCEN;
@@ -1709,100 +1725,41 @@ out:
1709 * ixgbe_fc_autoneg - Configure flow control 1725 * ixgbe_fc_autoneg - Configure flow control
1710 * @hw: pointer to hardware structure 1726 * @hw: pointer to hardware structure
1711 * 1727 *
1712 * Negotiates flow control capabilities with link partner using autoneg and 1728 * Compares our advertised flow control capabilities to those advertised by
1713 * applies the results. 1729 * our link partner, and determines the proper flow control mode to use.
1714 **/ 1730 **/
1715s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 1731s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1716{ 1732{
1717 s32 ret_val = 0; 1733 s32 ret_val = 0;
1718 u32 i, reg, pcs_anadv_reg, pcs_lpab_reg; 1734 ixgbe_link_speed speed;
1719 1735 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1720 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 1736 bool link_up;
1721 1737
1722 /* 1738 /*
1723 * The possible values of fc.current_mode are: 1739 * AN should have completed when the cable was plugged in.
1724 * 0: Flow control is completely disabled 1740 * Look for reasons to bail out. Bail out if:
1725 * 1: Rx flow control is enabled (we can receive pause frames, 1741 * - FC autoneg is disabled, or if
1726 * but not send pause frames). 1742 * - we don't have multispeed fiber, or if
1727 * 2: Tx flow control is enabled (we can send pause frames but 1743 * - we're not running at 1G, or if
1728 * we do not support receiving pause frames). 1744 * - link is not up, or if
1729 * 3: Both Rx and Tx flow control (symmetric) are enabled. 1745 * - link is up but AN did not complete, or if
1730 * 4: Priority Flow Control is enabled. 1746 * - link is up and AN completed but timed out
1731 * other: Invalid. 1747 *
1748 * Since we're being called from an LSC, link is already know to be up.
1749 * So use link_up_wait_to_complete=false.
1732 */ 1750 */
1733 switch (hw->fc.current_mode) { 1751 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1734 case ixgbe_fc_none: 1752 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1735 /* Flow control completely disabled by software override. */ 1753
1736 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 1754 if (hw->fc.disable_fc_autoneg ||
1737 break; 1755 !hw->phy.multispeed_fiber ||
1738 case ixgbe_fc_rx_pause: 1756 (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
1739 /* 1757 !link_up ||
1740 * Rx Flow control is enabled and Tx Flow control is 1758 ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1741 * disabled by software override. Since there really 1759 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1742 * isn't a way to advertise that we are capable of RX 1760 hw->fc.fc_was_autonegged = false;
1743 * Pause ONLY, we will advertise that we support both 1761 hw->fc.current_mode = hw->fc.requested_mode;
1744 * symmetric and asymmetric Rx PAUSE. Later, we will 1762 hw_dbg(hw, "Autoneg FC was skipped.\n");
1745 * disable the adapter's ability to send PAUSE frames.
1746 */
1747 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1748 break;
1749 case ixgbe_fc_tx_pause:
1750 /*
1751 * Tx Flow control is enabled, and Rx Flow control is
1752 * disabled by software override.
1753 */
1754 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
1755 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
1756 break;
1757 case ixgbe_fc_full:
1758 /* Flow control (both Rx and Tx) is enabled by SW override. */
1759 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1760 break;
1761#ifdef CONFIG_DCB
1762 case ixgbe_fc_pfc:
1763 goto out;
1764 break;
1765#endif
1766 default:
1767 hw_dbg(hw, "Flow control param set incorrectly\n");
1768 ret_val = -IXGBE_ERR_CONFIG;
1769 goto out;
1770 break;
1771 }
1772
1773 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
1774 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
1775
1776 /* Set PCS register for autoneg */
1777 /* Enable and restart autoneg */
1778 reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
1779
1780 /* Disable AN timeout */
1781 if (hw->fc.strict_ieee)
1782 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
1783
1784 hw_dbg(hw, "Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
1785 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
1786
1787 /* See if autonegotiation has succeeded */
1788 hw->mac.autoneg_succeeded = 0;
1789 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
1790 msleep(10);
1791 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1792 if ((reg & (IXGBE_PCS1GLSTA_LINK_OK |
1793 IXGBE_PCS1GLSTA_AN_COMPLETE)) ==
1794 (IXGBE_PCS1GLSTA_LINK_OK |
1795 IXGBE_PCS1GLSTA_AN_COMPLETE)) {
1796 if (!(reg & IXGBE_PCS1GLSTA_AN_TIMED_OUT))
1797 hw->mac.autoneg_succeeded = 1;
1798 break;
1799 }
1800 }
1801
1802 if (!hw->mac.autoneg_succeeded) {
1803 /* Autoneg failed to achieve a link, so we turn fc off */
1804 hw->fc.current_mode = ixgbe_fc_none;
1805 hw_dbg(hw, "Flow Control = NONE.\n");
1806 goto out; 1763 goto out;
1807 } 1764 }
1808 1765
@@ -1845,21 +1802,23 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1845 hw_dbg(hw, "Flow Control = NONE.\n"); 1802 hw_dbg(hw, "Flow Control = NONE.\n");
1846 } 1803 }
1847 1804
1805 /* Record that current_mode is the result of a successful autoneg */
1806 hw->fc.fc_was_autonegged = true;
1807
1848out: 1808out:
1849 return ret_val; 1809 return ret_val;
1850} 1810}
1851 1811
1852/** 1812/**
1853 * ixgbe_setup_fc_generic - Set up flow control 1813 * ixgbe_setup_fc - Set up flow control
1854 * @hw: pointer to hardware structure 1814 * @hw: pointer to hardware structure
1855 * 1815 *
1856 * Sets up flow control. 1816 * Called at init time to set up flow control.
1857 **/ 1817 **/
1858s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num) 1818s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1859{ 1819{
1860 s32 ret_val = 0; 1820 s32 ret_val = 0;
1861 ixgbe_link_speed speed; 1821 u32 reg;
1862 bool link_up;
1863 1822
1864#ifdef CONFIG_DCB 1823#ifdef CONFIG_DCB
1865 if (hw->fc.requested_mode == ixgbe_fc_pfc) { 1824 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1881,16 +1840,14 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1881 * because it causes the controller to just blast out fc packets. 1840 * because it causes the controller to just blast out fc packets.
1882 */ 1841 */
1883 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { 1842 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
1884 if (hw->fc.requested_mode != ixgbe_fc_none) { 1843 hw_dbg(hw, "Invalid water mark configuration\n");
1885 hw_dbg(hw, "Invalid water mark configuration\n"); 1844 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1886 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 1845 goto out;
1887 goto out;
1888 }
1889 } 1846 }
1890 1847
1891 /* 1848 /*
1892 * Validate the requested mode. Strict IEEE mode does not allow 1849 * Validate the requested mode. Strict IEEE mode does not allow
1893 * ixgbe_fc_rx_pause because it will cause testing anomalies. 1850 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
1894 */ 1851 */
1895 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 1852 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
1896 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict " 1853 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
@@ -1907,21 +1864,77 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1907 hw->fc.requested_mode = ixgbe_fc_full; 1864 hw->fc.requested_mode = ixgbe_fc_full;
1908 1865
1909 /* 1866 /*
1910 * Save off the requested flow control mode for use later. Depending 1867 * Set up the 1G flow control advertisement registers so the HW will be
1911 * on the link partner's capabilities, we may or may not use this mode. 1868 * able to do fc autoneg once the cable is plugged in. If we end up
1869 * using 10g instead, this is harmless.
1912 */ 1870 */
1913 hw->fc.current_mode = hw->fc.requested_mode; 1871 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1914
1915 /* Decide whether to use autoneg or not. */
1916 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1917 if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber &&
1918 (speed == IXGBE_LINK_SPEED_1GB_FULL))
1919 ret_val = ixgbe_fc_autoneg(hw);
1920 1872
1921 if (ret_val) 1873 /*
1874 * The possible values of fc.requested_mode are:
1875 * 0: Flow control is completely disabled
1876 * 1: Rx flow control is enabled (we can receive pause frames,
1877 * but not send pause frames).
1878 * 2: Tx flow control is enabled (we can send pause frames but
1879 * we do not support receiving pause frames).
1880 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1881#ifdef CONFIG_DCB
1882 * 4: Priority Flow Control is enabled.
1883#endif
1884 * other: Invalid.
1885 */
1886 switch (hw->fc.requested_mode) {
1887 case ixgbe_fc_none:
1888 /* Flow control completely disabled by software override. */
1889 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1890 break;
1891 case ixgbe_fc_rx_pause:
1892 /*
1893 * Rx Flow control is enabled and Tx Flow control is
1894 * disabled by software override. Since there really
1895 * isn't a way to advertise that we are capable of RX
1896 * Pause ONLY, we will advertise that we support both
1897 * symmetric and asymmetric Rx PAUSE. Later, we will
1898 * disable the adapter's ability to send PAUSE frames.
1899 */
1900 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1901 break;
1902 case ixgbe_fc_tx_pause:
1903 /*
1904 * Tx Flow control is enabled, and Rx Flow control is
1905 * disabled by software override.
1906 */
1907 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
1908 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
1909 break;
1910 case ixgbe_fc_full:
1911 /* Flow control (both Rx and Tx) is enabled by SW override. */
1912 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1913 break;
1914#ifdef CONFIG_DCB
1915 case ixgbe_fc_pfc:
1916 goto out;
1917 break;
1918#endif /* CONFIG_DCB */
1919 default:
1920 hw_dbg(hw, "Flow control param set incorrectly\n");
1921 ret_val = -IXGBE_ERR_CONFIG;
1922 goto out; 1922 goto out;
1923 break;
1924 }
1923 1925
1924 ret_val = ixgbe_fc_enable(hw, packetbuf_num); 1926 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
1927 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
1928
1929 /* Enable and restart autoneg to inform the link partner */
1930 reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
1931
1932 /* Disable AN timeout */
1933 if (hw->fc.strict_ieee)
1934 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
1935
1936 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
1937 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
1925 1938
1926out: 1939out:
1927 return ret_val; 1940 return ret_val;
@@ -2068,6 +2081,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2068 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2081 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2069 2082
2070 if (!link_up) { 2083 if (!link_up) {
2084 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2071 autoc_reg |= IXGBE_AUTOC_FLU; 2085 autoc_reg |= IXGBE_AUTOC_FLU;
2072 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2086 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2073 msleep(10); 2087 msleep(10);
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index dd260890ad0a..0d34d4d8244c 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -59,13 +59,13 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
59s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 59s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
60 u32 mc_addr_count, 60 u32 mc_addr_count,
61 ixgbe_mc_addr_itr func); 61 ixgbe_mc_addr_itr func);
62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
63 u32 addr_count, ixgbe_mc_addr_itr func); 63 struct list_head *uc_list);
64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
65s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 65s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
66s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 66s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
67s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num); 67s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
68s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packtetbuf_num); 68s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
69s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); 69s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
70 70
71s32 ixgbe_validate_mac_addr(u8 *mac_addr); 71s32 ixgbe_validate_mac_addr(u8 *mac_addr);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index f4417fc3b0fd..589f62c7062a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -295,7 +295,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
295 /* If PFC is disabled globally then fall back to LFC. */ 295 /* If PFC is disabled globally then fall back to LFC. */
296 if (!dcb_config->pfc_mode_enable) { 296 if (!dcb_config->pfc_mode_enable) {
297 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 297 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
298 hw->mac.ops.setup_fc(hw, i); 298 hw->mac.ops.fc_enable(hw, i);
299 goto out; 299 goto out;
300 } 300 }
301 301
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 35255b8e90b7..86f4f3e36f27 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -68,6 +68,8 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
68 {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)}, 68 {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
69 {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)}, 69 {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
70 {"hw_rsc_count", IXGBE_STAT(rsc_count)}, 70 {"hw_rsc_count", IXGBE_STAT(rsc_count)},
71 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
72 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
71 {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)}, 73 {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
72 {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)}, 74 {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
73 {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)}, 75 {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
@@ -118,6 +120,13 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
118 IXGBE_PB_STATS_LEN + \ 120 IXGBE_PB_STATS_LEN + \
119 IXGBE_QUEUE_STATS_LEN) 121 IXGBE_QUEUE_STATS_LEN)
120 122
123static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
124 "Register test (offline)", "Eeprom test (offline)",
125 "Interrupt test (offline)", "Loopback test (offline)",
126 "Link test (on/offline)"
127};
128#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
129
121static int ixgbe_get_settings(struct net_device *netdev, 130static int ixgbe_get_settings(struct net_device *netdev,
122 struct ethtool_cmd *ecmd) 131 struct ethtool_cmd *ecmd)
123{ 132{
@@ -129,11 +138,12 @@ static int ixgbe_get_settings(struct net_device *netdev,
129 ecmd->supported = SUPPORTED_10000baseT_Full; 138 ecmd->supported = SUPPORTED_10000baseT_Full;
130 ecmd->autoneg = AUTONEG_ENABLE; 139 ecmd->autoneg = AUTONEG_ENABLE;
131 ecmd->transceiver = XCVR_EXTERNAL; 140 ecmd->transceiver = XCVR_EXTERNAL;
132 if (hw->phy.media_type == ixgbe_media_type_copper) { 141 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
142 (hw->mac.type == ixgbe_mac_82599EB)) {
133 ecmd->supported |= (SUPPORTED_1000baseT_Full | 143 ecmd->supported |= (SUPPORTED_1000baseT_Full |
134 SUPPORTED_TP | SUPPORTED_Autoneg); 144 SUPPORTED_Autoneg);
135 145
136 ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg); 146 ecmd->advertising = ADVERTISED_Autoneg;
137 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 147 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
138 ecmd->advertising |= ADVERTISED_10000baseT_Full; 148 ecmd->advertising |= ADVERTISED_10000baseT_Full;
139 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 149 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
@@ -148,7 +158,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
148 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 158 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
149 ADVERTISED_1000baseT_Full); 159 ADVERTISED_1000baseT_Full);
150 160
151 ecmd->port = PORT_TP; 161 if (hw->phy.media_type == ixgbe_media_type_copper) {
162 ecmd->supported |= SUPPORTED_TP;
163 ecmd->advertising |= ADVERTISED_TP;
164 ecmd->port = PORT_TP;
165 } else {
166 ecmd->supported |= SUPPORTED_FIBRE;
167 ecmd->advertising |= ADVERTISED_FIBRE;
168 ecmd->port = PORT_FIBRE;
169 }
152 } else if (hw->phy.media_type == ixgbe_media_type_backplane) { 170 } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
153 /* Set as FIBRE until SERDES defined in kernel */ 171 /* Set as FIBRE until SERDES defined in kernel */
154 switch (hw->device_id) { 172 switch (hw->device_id) {
@@ -196,16 +214,10 @@ static int ixgbe_set_settings(struct net_device *netdev,
196 struct ixgbe_adapter *adapter = netdev_priv(netdev); 214 struct ixgbe_adapter *adapter = netdev_priv(netdev);
197 struct ixgbe_hw *hw = &adapter->hw; 215 struct ixgbe_hw *hw = &adapter->hw;
198 u32 advertised, old; 216 u32 advertised, old;
199 s32 err; 217 s32 err = 0;
200 218
201 switch (hw->phy.media_type) { 219 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
202 case ixgbe_media_type_fiber: 220 (hw->mac.type == ixgbe_mac_82599EB)) {
203 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
204 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
205 return -EINVAL;
206 /* in this case we currently only support 10Gb/FULL */
207 break;
208 case ixgbe_media_type_copper:
209 /* 10000/copper and 1000/copper must autoneg 221 /* 10000/copper and 1000/copper must autoneg
210 * this function does not support any duplex forcing, but can 222 * this function does not support any duplex forcing, but can
211 * limit the advertising of the adapter to only 10000 or 1000 */ 223 * limit the advertising of the adapter to only 10000 or 1000 */
@@ -221,20 +233,23 @@ static int ixgbe_set_settings(struct net_device *netdev,
221 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 233 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
222 234
223 if (old == advertised) 235 if (old == advertised)
224 break; 236 return err;
225 /* this sets the link speed and restarts auto-neg */ 237 /* this sets the link speed and restarts auto-neg */
238 hw->mac.autotry_restart = true;
226 err = hw->mac.ops.setup_link_speed(hw, advertised, true, true); 239 err = hw->mac.ops.setup_link_speed(hw, advertised, true, true);
227 if (err) { 240 if (err) {
228 DPRINTK(PROBE, INFO, 241 DPRINTK(PROBE, INFO,
229 "setup link failed with code %d\n", err); 242 "setup link failed with code %d\n", err);
230 hw->mac.ops.setup_link_speed(hw, old, true, true); 243 hw->mac.ops.setup_link_speed(hw, old, true, true);
231 } 244 }
232 break; 245 } else {
233 default: 246 /* in this case we currently only support 10Gb/FULL */
234 break; 247 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
248 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
249 return -EINVAL;
235 } 250 }
236 251
237 return 0; 252 return err;
238} 253}
239 254
240static void ixgbe_get_pauseparam(struct net_device *netdev, 255static void ixgbe_get_pauseparam(struct net_device *netdev,
@@ -276,6 +291,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
276{ 291{
277 struct ixgbe_adapter *adapter = netdev_priv(netdev); 292 struct ixgbe_adapter *adapter = netdev_priv(netdev);
278 struct ixgbe_hw *hw = &adapter->hw; 293 struct ixgbe_hw *hw = &adapter->hw;
294 struct ixgbe_fc_info fc;
279 295
280#ifdef CONFIG_DCB 296#ifdef CONFIG_DCB
281 if (adapter->dcb_cfg.pfc_mode_enable || 297 if (adapter->dcb_cfg.pfc_mode_enable ||
@@ -284,26 +300,37 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
284 return -EINVAL; 300 return -EINVAL;
285 301
286#endif 302#endif
303
304 fc = hw->fc;
305
287 if (pause->autoneg != AUTONEG_ENABLE) 306 if (pause->autoneg != AUTONEG_ENABLE)
288 hw->fc.disable_fc_autoneg = true; 307 fc.disable_fc_autoneg = true;
289 else 308 else
290 hw->fc.disable_fc_autoneg = false; 309 fc.disable_fc_autoneg = false;
291 310
292 if (pause->rx_pause && pause->tx_pause) 311 if (pause->rx_pause && pause->tx_pause)
293 hw->fc.requested_mode = ixgbe_fc_full; 312 fc.requested_mode = ixgbe_fc_full;
294 else if (pause->rx_pause && !pause->tx_pause) 313 else if (pause->rx_pause && !pause->tx_pause)
295 hw->fc.requested_mode = ixgbe_fc_rx_pause; 314 fc.requested_mode = ixgbe_fc_rx_pause;
296 else if (!pause->rx_pause && pause->tx_pause) 315 else if (!pause->rx_pause && pause->tx_pause)
297 hw->fc.requested_mode = ixgbe_fc_tx_pause; 316 fc.requested_mode = ixgbe_fc_tx_pause;
298 else if (!pause->rx_pause && !pause->tx_pause) 317 else if (!pause->rx_pause && !pause->tx_pause)
299 hw->fc.requested_mode = ixgbe_fc_none; 318 fc.requested_mode = ixgbe_fc_none;
300 else 319 else
301 return -EINVAL; 320 return -EINVAL;
302 321
303#ifdef CONFIG_DCB 322#ifdef CONFIG_DCB
304 adapter->last_lfc_mode = hw->fc.requested_mode; 323 adapter->last_lfc_mode = fc.requested_mode;
305#endif 324#endif
306 hw->mac.ops.setup_fc(hw, 0); 325
326 /* if the thing changed then we'll update and use new autoneg */
327 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
328 hw->fc = fc;
329 if (netif_running(netdev))
330 ixgbe_reinit_locked(adapter);
331 else
332 ixgbe_reset(adapter);
333 }
307 334
308 return 0; 335 return 0;
309} 336}
@@ -743,6 +770,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
743 strncpy(drvinfo->fw_version, firmware_version, 32); 770 strncpy(drvinfo->fw_version, firmware_version, 32);
744 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 771 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
745 drvinfo->n_stats = IXGBE_STATS_LEN; 772 drvinfo->n_stats = IXGBE_STATS_LEN;
773 drvinfo->testinfo_len = IXGBE_TEST_LEN;
746 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 774 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
747} 775}
748 776
@@ -814,7 +842,6 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
814 } 842 }
815 goto err_setup; 843 goto err_setup;
816 } 844 }
817 temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
818 } 845 }
819 need_update = true; 846 need_update = true;
820 } 847 }
@@ -844,7 +871,6 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
844 } 871 }
845 goto err_setup; 872 goto err_setup;
846 } 873 }
847 temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
848 } 874 }
849 need_update = true; 875 need_update = true;
850 } 876 }
@@ -884,6 +910,8 @@ err_setup:
884static int ixgbe_get_sset_count(struct net_device *netdev, int sset) 910static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
885{ 911{
886 switch (sset) { 912 switch (sset) {
913 case ETH_SS_TEST:
914 return IXGBE_TEST_LEN;
887 case ETH_SS_STATS: 915 case ETH_SS_STATS:
888 return IXGBE_STATS_LEN; 916 return IXGBE_STATS_LEN;
889 default: 917 default:
@@ -938,6 +966,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
938 int i; 966 int i;
939 967
940 switch (stringset) { 968 switch (stringset) {
969 case ETH_SS_TEST:
970 memcpy(data, *ixgbe_gstrings_test,
971 IXGBE_TEST_LEN * ETH_GSTRING_LEN);
972 break;
941 case ETH_SS_STATS: 973 case ETH_SS_STATS:
942 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 974 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
943 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 975 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
@@ -975,6 +1007,815 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
975 } 1007 }
976} 1008}
977 1009
1010static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1011{
1012 struct ixgbe_hw *hw = &adapter->hw;
1013 bool link_up;
1014 u32 link_speed = 0;
1015 *data = 0;
1016
1017 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1018 if (link_up)
1019 return *data;
1020 else
1021 *data = 1;
1022 return *data;
1023}
1024
1025/* ethtool register test data */
1026struct ixgbe_reg_test {
1027 u16 reg;
1028 u8 array_len;
1029 u8 test_type;
1030 u32 mask;
1031 u32 write;
1032};
1033
1034/* In the hardware, registers are laid out either singly, in arrays
1035 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1036 * most tests take place on arrays or single registers (handled
1037 * as a single-element array) and special-case the tables.
1038 * Table tests are always pattern tests.
1039 *
1040 * We also make provision for some required setup steps by specifying
1041 * registers to be written without any read-back testing.
1042 */
1043
1044#define PATTERN_TEST 1
1045#define SET_READ_TEST 2
1046#define WRITE_NO_TEST 3
1047#define TABLE32_TEST 4
1048#define TABLE64_TEST_LO 5
1049#define TABLE64_TEST_HI 6
1050
1051/* default 82599 register test */
1052static struct ixgbe_reg_test reg_test_82599[] = {
1053 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1054 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1055 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1056 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1057 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1058 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1059 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1060 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1061 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1062 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1063 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1064 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1065 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1066 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1067 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1068 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1069 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1070 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1071 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1072 { 0, 0, 0, 0 }
1073};
1074
1075/* default 82598 register test */
1076static struct ixgbe_reg_test reg_test_82598[] = {
1077 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1078 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1079 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1080 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1081 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1082 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1083 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1084 /* Enable all four RX queues before testing. */
1085 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1086 /* RDH is read-only for 82598, only test RDT. */
1087 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1088 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1089 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1090 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1091 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1092 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1093 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1094 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1095 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1096 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1097 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1098 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1099 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1100 { 0, 0, 0, 0 }
1101};
1102
1103#define REG_PATTERN_TEST(R, M, W) \
1104{ \
1105 u32 pat, val, before; \
1106 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
1107 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
1108 before = readl(adapter->hw.hw_addr + R); \
1109 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
1110 val = readl(adapter->hw.hw_addr + R); \
1111 if (val != (_test[pat] & W & M)) { \
1112 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\
1113 "0x%08X expected 0x%08X\n", \
1114 R, val, (_test[pat] & W & M)); \
1115 *data = R; \
1116 writel(before, adapter->hw.hw_addr + R); \
1117 return 1; \
1118 } \
1119 writel(before, adapter->hw.hw_addr + R); \
1120 } \
1121}
1122
1123#define REG_SET_AND_CHECK(R, M, W) \
1124{ \
1125 u32 val, before; \
1126 before = readl(adapter->hw.hw_addr + R); \
1127 writel((W & M), (adapter->hw.hw_addr + R)); \
1128 val = readl(adapter->hw.hw_addr + R); \
1129 if ((W & M) != (val & M)) { \
1130 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
1131 "expected 0x%08X\n", R, (val & M), (W & M)); \
1132 *data = R; \
1133 writel(before, (adapter->hw.hw_addr + R)); \
1134 return 1; \
1135 } \
1136 writel(before, (adapter->hw.hw_addr + R)); \
1137}
1138
1139static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1140{
1141 struct ixgbe_reg_test *test;
1142 u32 value, before, after;
1143 u32 i, toggle;
1144
1145 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1146 toggle = 0x7FFFF30F;
1147 test = reg_test_82599;
1148 } else {
1149 toggle = 0x7FFFF3FF;
1150 test = reg_test_82598;
1151 }
1152
1153 /*
1154 * Because the status register is such a special case,
1155 * we handle it separately from the rest of the register
1156 * tests. Some bits are read-only, some toggle, and some
1157 * are writeable on newer MACs.
1158 */
1159 before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
1160 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
1161 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1162 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1163 if (value != after) {
1164 DPRINTK(DRV, ERR, "failed STATUS register test got: "
1165 "0x%08X expected: 0x%08X\n", after, value);
1166 *data = 1;
1167 return 1;
1168 }
1169 /* restore previous status */
1170 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
1171
1172 /*
1173 * Perform the remainder of the register test, looping through
1174 * the test table until we either fail or reach the null entry.
1175 */
1176 while (test->reg) {
1177 for (i = 0; i < test->array_len; i++) {
1178 switch (test->test_type) {
1179 case PATTERN_TEST:
1180 REG_PATTERN_TEST(test->reg + (i * 0x40),
1181 test->mask,
1182 test->write);
1183 break;
1184 case SET_READ_TEST:
1185 REG_SET_AND_CHECK(test->reg + (i * 0x40),
1186 test->mask,
1187 test->write);
1188 break;
1189 case WRITE_NO_TEST:
1190 writel(test->write,
1191 (adapter->hw.hw_addr + test->reg)
1192 + (i * 0x40));
1193 break;
1194 case TABLE32_TEST:
1195 REG_PATTERN_TEST(test->reg + (i * 4),
1196 test->mask,
1197 test->write);
1198 break;
1199 case TABLE64_TEST_LO:
1200 REG_PATTERN_TEST(test->reg + (i * 8),
1201 test->mask,
1202 test->write);
1203 break;
1204 case TABLE64_TEST_HI:
1205 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1206 test->mask,
1207 test->write);
1208 break;
1209 }
1210 }
1211 test++;
1212 }
1213
1214 *data = 0;
1215 return 0;
1216}
1217
1218static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1219{
1220 struct ixgbe_hw *hw = &adapter->hw;
1221 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1222 *data = 1;
1223 else
1224 *data = 0;
1225 return *data;
1226}
1227
1228static irqreturn_t ixgbe_test_intr(int irq, void *data)
1229{
1230 struct net_device *netdev = (struct net_device *) data;
1231 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1232
1233 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1234
1235 return IRQ_HANDLED;
1236}
1237
1238static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1239{
1240 struct net_device *netdev = adapter->netdev;
1241 u32 mask, i = 0, shared_int = true;
1242 u32 irq = adapter->pdev->irq;
1243
1244 *data = 0;
1245
1246 /* Hook up test interrupt handler just for this test */
1247 if (adapter->msix_entries) {
1248 /* NOTE: we don't test MSI-X interrupts here, yet */
1249 return 0;
1250 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1251 shared_int = false;
1252 if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
1253 netdev)) {
1254 *data = 1;
1255 return -1;
1256 }
1257 } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
1258 netdev->name, netdev)) {
1259 shared_int = false;
1260 } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
1261 netdev->name, netdev)) {
1262 *data = 1;
1263 return -1;
1264 }
1265 DPRINTK(HW, INFO, "testing %s interrupt\n",
1266 (shared_int ? "shared" : "unshared"));
1267
1268 /* Disable all the interrupts */
1269 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1270 msleep(10);
1271
1272 /* Test each interrupt */
1273 for (; i < 10; i++) {
1274 /* Interrupt to test */
1275 mask = 1 << i;
1276
1277 if (!shared_int) {
1278 /*
1279 * Disable the interrupts to be reported in
1280 * the cause register and then force the same
1281 * interrupt and see if one gets posted. If
1282 * an interrupt was posted to the bus, the
1283 * test failed.
1284 */
1285 adapter->test_icr = 0;
1286 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1287 ~mask & 0x00007FFF);
1288 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1289 ~mask & 0x00007FFF);
1290 msleep(10);
1291
1292 if (adapter->test_icr & mask) {
1293 *data = 3;
1294 break;
1295 }
1296 }
1297
1298 /*
1299 * Enable the interrupt to be reported in the cause
1300 * register and then force the same interrupt and see
1301 * if one gets posted. If an interrupt was not posted
1302 * to the bus, the test failed.
1303 */
1304 adapter->test_icr = 0;
1305 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1306 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1307 msleep(10);
1308
1309 if (!(adapter->test_icr &mask)) {
1310 *data = 4;
1311 break;
1312 }
1313
1314 if (!shared_int) {
1315 /*
1316 * Disable the other interrupts to be reported in
1317 * the cause register and then force the other
1318 * interrupts and see if any get posted. If
1319 * an interrupt was posted to the bus, the
1320 * test failed.
1321 */
1322 adapter->test_icr = 0;
1323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1324 ~mask & 0x00007FFF);
1325 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1326 ~mask & 0x00007FFF);
1327 msleep(10);
1328
1329 if (adapter->test_icr) {
1330 *data = 5;
1331 break;
1332 }
1333 }
1334 }
1335
1336 /* Disable all the interrupts */
1337 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1338 msleep(10);
1339
1340 /* Unhook test interrupt handler */
1341 free_irq(irq, netdev);
1342
1343 return *data;
1344}
1345
1346static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1347{
1348 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1349 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1350 struct ixgbe_hw *hw = &adapter->hw;
1351 struct pci_dev *pdev = adapter->pdev;
1352 u32 reg_ctl;
1353 int i;
1354
1355 /* shut down the DMA engines now so they can be reinitialized later */
1356
1357 /* first Rx */
1358 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1359 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1360 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1361 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0));
1362 reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
1363 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl);
1364
1365 /* now Tx */
1366 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0));
1367 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1368 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl);
1369 if (hw->mac.type == ixgbe_mac_82599EB) {
1370 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1371 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1372 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1373 }
1374
1375 ixgbe_reset(adapter);
1376
1377 if (tx_ring->desc && tx_ring->tx_buffer_info) {
1378 for (i = 0; i < tx_ring->count; i++) {
1379 struct ixgbe_tx_buffer *buf =
1380 &(tx_ring->tx_buffer_info[i]);
1381 if (buf->dma)
1382 pci_unmap_single(pdev, buf->dma, buf->length,
1383 PCI_DMA_TODEVICE);
1384 if (buf->skb)
1385 dev_kfree_skb(buf->skb);
1386 }
1387 }
1388
1389 if (rx_ring->desc && rx_ring->rx_buffer_info) {
1390 for (i = 0; i < rx_ring->count; i++) {
1391 struct ixgbe_rx_buffer *buf =
1392 &(rx_ring->rx_buffer_info[i]);
1393 if (buf->dma)
1394 pci_unmap_single(pdev, buf->dma,
1395 IXGBE_RXBUFFER_2048,
1396 PCI_DMA_FROMDEVICE);
1397 if (buf->skb)
1398 dev_kfree_skb(buf->skb);
1399 }
1400 }
1401
1402 if (tx_ring->desc) {
1403 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
1404 tx_ring->dma);
1405 tx_ring->desc = NULL;
1406 }
1407 if (rx_ring->desc) {
1408 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
1409 rx_ring->dma);
1410 rx_ring->desc = NULL;
1411 }
1412
1413 kfree(tx_ring->tx_buffer_info);
1414 tx_ring->tx_buffer_info = NULL;
1415 kfree(rx_ring->rx_buffer_info);
1416 rx_ring->rx_buffer_info = NULL;
1417
1418 return;
1419}
1420
1421static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1422{
1423 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1424 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1425 struct pci_dev *pdev = adapter->pdev;
1426 u32 rctl, reg_data;
1427 int i, ret_val;
1428
1429 /* Setup Tx descriptor ring and Tx buffers */
1430
1431 if (!tx_ring->count)
1432 tx_ring->count = IXGBE_DEFAULT_TXD;
1433
1434 tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
1435 sizeof(struct ixgbe_tx_buffer),
1436 GFP_KERNEL);
1437 if (!(tx_ring->tx_buffer_info)) {
1438 ret_val = 1;
1439 goto err_nomem;
1440 }
1441
1442 tx_ring->size = tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc);
1443 tx_ring->size = ALIGN(tx_ring->size, 4096);
1444 if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1445 &tx_ring->dma))) {
1446 ret_val = 2;
1447 goto err_nomem;
1448 }
1449 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1450
1451 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
1452 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1453 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
1454 ((u64) tx_ring->dma >> 32));
1455 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
1456 tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc));
1457 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
1458 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
1459
1460 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1461 reg_data |= IXGBE_HLREG0_TXPADEN;
1462 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1463
1464 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1465 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1466 reg_data |= IXGBE_DMATXCTL_TE;
1467 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1468 }
1469 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
1470 reg_data |= IXGBE_TXDCTL_ENABLE;
1471 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
1472
1473 for (i = 0; i < tx_ring->count; i++) {
1474 struct ixgbe_legacy_tx_desc *desc = IXGBE_TX_DESC(*tx_ring, i);
1475 struct sk_buff *skb;
1476 unsigned int size = 1024;
1477
1478 skb = alloc_skb(size, GFP_KERNEL);
1479 if (!skb) {
1480 ret_val = 3;
1481 goto err_nomem;
1482 }
1483 skb_put(skb, size);
1484 tx_ring->tx_buffer_info[i].skb = skb;
1485 tx_ring->tx_buffer_info[i].length = skb->len;
1486 tx_ring->tx_buffer_info[i].dma =
1487 pci_map_single(pdev, skb->data, skb->len,
1488 PCI_DMA_TODEVICE);
1489 desc->buffer_addr = cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1490 desc->lower.data = cpu_to_le32(skb->len);
1491 desc->lower.data |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
1492 IXGBE_TXD_CMD_IFCS |
1493 IXGBE_TXD_CMD_RS);
1494 desc->upper.data = 0;
1495 }
1496
1497 /* Setup Rx Descriptor ring and Rx buffers */
1498
1499 if (!rx_ring->count)
1500 rx_ring->count = IXGBE_DEFAULT_RXD;
1501
1502 rx_ring->rx_buffer_info = kcalloc(rx_ring->count,
1503 sizeof(struct ixgbe_rx_buffer),
1504 GFP_KERNEL);
1505 if (!(rx_ring->rx_buffer_info)) {
1506 ret_val = 4;
1507 goto err_nomem;
1508 }
1509
1510 rx_ring->size = rx_ring->count * sizeof(struct ixgbe_legacy_rx_desc);
1511 rx_ring->size = ALIGN(rx_ring->size, 4096);
1512 if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1513 &rx_ring->dma))) {
1514 ret_val = 5;
1515 goto err_nomem;
1516 }
1517 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1518
1519 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1520 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1521 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
1522 ((u64)rx_ring->dma & 0xFFFFFFFF));
1523 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
1524 ((u64) rx_ring->dma >> 32));
1525 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
1526 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
1527 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
1528
1529 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1530 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1531 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1532
1533 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1534 reg_data &= ~IXGBE_HLREG0_LPBK;
1535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1536
1537 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
1538#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
1539 Threshold Size mask */
1540 reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
1541 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
1542
1543 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
1544#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
1545 reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
1546 reg_data |= adapter->hw.mac.mc_filter_type;
1547 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
1548
1549 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
1550 reg_data |= IXGBE_RXDCTL_ENABLE;
1551 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
1552 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1553 int j = adapter->rx_ring[0].reg_idx;
1554 u32 k;
1555 for (k = 0; k < 10; k++) {
1556 if (IXGBE_READ_REG(&adapter->hw,
1557 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1558 break;
1559 else
1560 msleep(1);
1561 }
1562 }
1563
1564 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1565 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1566
1567 for (i = 0; i < rx_ring->count; i++) {
1568 struct ixgbe_legacy_rx_desc *rx_desc =
1569 IXGBE_RX_DESC(*rx_ring, i);
1570 struct sk_buff *skb;
1571
1572 skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
1573 if (!skb) {
1574 ret_val = 6;
1575 goto err_nomem;
1576 }
1577 skb_reserve(skb, NET_IP_ALIGN);
1578 rx_ring->rx_buffer_info[i].skb = skb;
1579 rx_ring->rx_buffer_info[i].dma =
1580 pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
1581 PCI_DMA_FROMDEVICE);
1582 rx_desc->buffer_addr =
1583 cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1584 memset(skb->data, 0x00, skb->len);
1585 }
1586
1587 return 0;
1588
1589err_nomem:
1590 ixgbe_free_desc_rings(adapter);
1591 return ret_val;
1592}
1593
1594static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1595{
1596 struct ixgbe_hw *hw = &adapter->hw;
1597 u32 reg_data;
1598
1599 /* right now we only support MAC loopback in the driver */
1600
1601 /* Setup MAC loopback */
1602 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1603 reg_data |= IXGBE_HLREG0_LPBK;
1604 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1605
1606 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
1607 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1608 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1609 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
1610
1611 /* Disable Atlas Tx lanes; re-enabled in reset path */
1612 if (hw->mac.type == ixgbe_mac_82598EB) {
1613 u8 atlas;
1614
1615 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1616 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1617 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1618
1619 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1620 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1621 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1622
1623 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1624 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1625 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1626
1627 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1628 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1629 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1630 }
1631
1632 return 0;
1633}
1634
1635static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1636{
1637 u32 reg_data;
1638
1639 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1640 reg_data &= ~IXGBE_HLREG0_LPBK;
1641 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1642}
1643
1644static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1645 unsigned int frame_size)
1646{
1647 memset(skb->data, 0xFF, frame_size);
1648 frame_size &= ~1;
1649 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1650 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1651 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1652}
1653
1654static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1655 unsigned int frame_size)
1656{
1657 frame_size &= ~1;
1658 if (*(skb->data + 3) == 0xFF) {
1659 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1660 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1661 return 0;
1662 }
1663 }
1664 return 13;
1665}
1666
1667static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1668{
1669 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1670 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1671 struct pci_dev *pdev = adapter->pdev;
1672 int i, j, k, l, lc, good_cnt, ret_val = 0;
1673 unsigned long time;
1674
1675 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1);
1676
1677 /*
1678 * Calculate the loop count based on the largest descriptor ring
1679 * The idea is to wrap the largest ring a number of times using 64
1680 * send/receive pairs during each loop
1681 */
1682
1683 if (rx_ring->count <= tx_ring->count)
1684 lc = ((tx_ring->count / 64) * 2) + 1;
1685 else
1686 lc = ((rx_ring->count / 64) * 2) + 1;
1687
1688 k = l = 0;
1689 for (j = 0; j <= lc; j++) {
1690 for (i = 0; i < 64; i++) {
1691 ixgbe_create_lbtest_frame(
1692 tx_ring->tx_buffer_info[k].skb,
1693 1024);
1694 pci_dma_sync_single_for_device(pdev,
1695 tx_ring->tx_buffer_info[k].dma,
1696 tx_ring->tx_buffer_info[k].length,
1697 PCI_DMA_TODEVICE);
1698 if (unlikely(++k == tx_ring->count))
1699 k = 0;
1700 }
1701 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
1702 msleep(200);
1703 /* set the start time for the receive */
1704 time = jiffies;
1705 good_cnt = 0;
1706 do {
1707 /* receive the sent packets */
1708 pci_dma_sync_single_for_cpu(pdev,
1709 rx_ring->rx_buffer_info[l].dma,
1710 IXGBE_RXBUFFER_2048,
1711 PCI_DMA_FROMDEVICE);
1712 ret_val = ixgbe_check_lbtest_frame(
1713 rx_ring->rx_buffer_info[l].skb, 1024);
1714 if (!ret_val)
1715 good_cnt++;
1716 if (++l == rx_ring->count)
1717 l = 0;
1718 /*
1719 * time + 20 msecs (200 msecs on 2.4) is more than
1720 * enough time to complete the receives, if it's
1721 * exceeded, break and error off
1722 */
1723 } while (good_cnt < 64 && jiffies < (time + 20));
1724 if (good_cnt != 64) {
1725 /* ret_val is the same as mis-compare */
1726 ret_val = 13;
1727 break;
1728 }
1729 if (jiffies >= (time + 20)) {
1730 /* Error code for time out error */
1731 ret_val = 14;
1732 break;
1733 }
1734 }
1735
1736 return ret_val;
1737}
1738
1739static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1740{
1741 *data = ixgbe_setup_desc_rings(adapter);
1742 if (*data)
1743 goto out;
1744 *data = ixgbe_setup_loopback_test(adapter);
1745 if (*data)
1746 goto err_loopback;
1747 *data = ixgbe_run_loopback_test(adapter);
1748 ixgbe_loopback_cleanup(adapter);
1749
1750err_loopback:
1751 ixgbe_free_desc_rings(adapter);
1752out:
1753 return *data;
1754}
1755
1756static void ixgbe_diag_test(struct net_device *netdev,
1757 struct ethtool_test *eth_test, u64 *data)
1758{
1759 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1760 bool if_running = netif_running(netdev);
1761
1762 set_bit(__IXGBE_TESTING, &adapter->state);
1763 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1764 /* Offline tests */
1765
1766 DPRINTK(HW, INFO, "offline testing starting\n");
1767
1768 /* Link test performed before hardware reset so autoneg doesn't
1769 * interfere with test result */
1770 if (ixgbe_link_test(adapter, &data[4]))
1771 eth_test->flags |= ETH_TEST_FL_FAILED;
1772
1773 if (if_running)
1774 /* indicate we're in test mode */
1775 dev_close(netdev);
1776 else
1777 ixgbe_reset(adapter);
1778
1779 DPRINTK(HW, INFO, "register testing starting\n");
1780 if (ixgbe_reg_test(adapter, &data[0]))
1781 eth_test->flags |= ETH_TEST_FL_FAILED;
1782
1783 ixgbe_reset(adapter);
1784 DPRINTK(HW, INFO, "eeprom testing starting\n");
1785 if (ixgbe_eeprom_test(adapter, &data[1]))
1786 eth_test->flags |= ETH_TEST_FL_FAILED;
1787
1788 ixgbe_reset(adapter);
1789 DPRINTK(HW, INFO, "interrupt testing starting\n");
1790 if (ixgbe_intr_test(adapter, &data[2]))
1791 eth_test->flags |= ETH_TEST_FL_FAILED;
1792
1793 ixgbe_reset(adapter);
1794 DPRINTK(HW, INFO, "loopback testing starting\n");
1795 if (ixgbe_loopback_test(adapter, &data[3]))
1796 eth_test->flags |= ETH_TEST_FL_FAILED;
1797
1798 ixgbe_reset(adapter);
1799
1800 clear_bit(__IXGBE_TESTING, &adapter->state);
1801 if (if_running)
1802 dev_open(netdev);
1803 } else {
1804 DPRINTK(HW, INFO, "online testing starting\n");
1805 /* Online tests */
1806 if (ixgbe_link_test(adapter, &data[4]))
1807 eth_test->flags |= ETH_TEST_FL_FAILED;
1808
1809 /* Online tests aren't run; pass by default */
1810 data[0] = 0;
1811 data[1] = 0;
1812 data[2] = 0;
1813 data[3] = 0;
1814
1815 clear_bit(__IXGBE_TESTING, &adapter->state);
1816 }
1817 msleep_interruptible(4 * 1000);
1818}
978 1819
979static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 1820static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
980 struct ethtool_wolinfo *wol) 1821 struct ethtool_wolinfo *wol)
@@ -1146,8 +1987,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
1146 else 1987 else
1147 /* rx only or mixed */ 1988 /* rx only or mixed */
1148 q_vector->eitr = adapter->eitr_param; 1989 q_vector->eitr = adapter->eitr_param;
1149 ixgbe_write_eitr(adapter, i, 1990 ixgbe_write_eitr(q_vector);
1150 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
1151 } 1991 }
1152 1992
1153 return 0; 1993 return 0;
@@ -1159,13 +1999,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
1159 1999
1160 ethtool_op_set_flags(netdev, data); 2000 ethtool_op_set_flags(netdev, data);
1161 2001
1162 if (!(adapter->flags & IXGBE_FLAG_RSC_CAPABLE)) 2002 if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE))
1163 return 0; 2003 return 0;
1164 2004
1165 /* if state changes we need to update adapter->flags and reset */ 2005 /* if state changes we need to update adapter->flags and reset */
1166 if ((!!(data & ETH_FLAG_LRO)) != 2006 if ((!!(data & ETH_FLAG_LRO)) !=
1167 (!!(adapter->flags & IXGBE_FLAG_RSC_ENABLED))) { 2007 (!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) {
1168 adapter->flags ^= IXGBE_FLAG_RSC_ENABLED; 2008 adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED;
1169 if (netif_running(netdev)) 2009 if (netif_running(netdev))
1170 ixgbe_reinit_locked(adapter); 2010 ixgbe_reinit_locked(adapter);
1171 else 2011 else
@@ -1201,6 +2041,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
1201 .set_msglevel = ixgbe_set_msglevel, 2041 .set_msglevel = ixgbe_set_msglevel,
1202 .get_tso = ethtool_op_get_tso, 2042 .get_tso = ethtool_op_get_tso,
1203 .set_tso = ixgbe_set_tso, 2043 .set_tso = ixgbe_set_tso,
2044 .self_test = ixgbe_diag_test,
1204 .get_strings = ixgbe_get_strings, 2045 .get_strings = ixgbe_get_strings,
1205 .phys_id = ixgbe_phys_id, 2046 .phys_id = ixgbe_phys_id,
1206 .get_sset_count = ixgbe_get_sset_count, 2047 .get_sset_count = ixgbe_get_sset_count,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index d5939de8ba28..3c3bf1f07b81 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -280,7 +280,9 @@ out_noddp_unmap:
280 * 280 *
281 * This checks ddp status. 281 * This checks ddp status.
282 * 282 *
283 * Returns : 0 for success and skb will not be delivered to ULD 283 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
284 * not passing the skb to ULD, > 0 indicates is the length of data
285 * being ddped.
284 */ 286 */
285int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 287int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
286 union ixgbe_adv_rx_desc *rx_desc, 288 union ixgbe_adv_rx_desc *rx_desc,
@@ -334,6 +336,8 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
334 /* return 0 to bypass going to ULD for DDPed data */ 336 /* return 0 to bypass going to ULD for DDPed data */
335 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) 337 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
336 rc = 0; 338 rc = 0;
339 else
340 rc = ddp->len;
337 } 341 }
338 342
339ddp_out: 343ddp_out:
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index b7f9b63aa49f..c5b50026a897 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -28,6 +28,7 @@
28#ifndef _IXGBE_FCOE_H 28#ifndef _IXGBE_FCOE_H
29#define _IXGBE_FCOE_H 29#define _IXGBE_FCOE_H
30 30
31#include <scsi/fc/fc_fs.h>
31#include <scsi/fc/fc_fcoe.h> 32#include <scsi/fc/fc_fcoe.h>
32 33
33/* shift bits within STAT fo FCSTAT */ 34/* shift bits within STAT fo FCSTAT */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index dff1da8ae5c4..a551a96ce676 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -48,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe";
48static const char ixgbe_driver_string[] = 48static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver"; 49 "Intel(R) 10 Gigabit PCI Express Network Driver";
50 50
51#define DRV_VERSION "2.0.24-k2" 51#define DRV_VERSION "2.0.34-k2"
52const char ixgbe_driver_version[] = DRV_VERSION; 52const char ixgbe_driver_version[] = DRV_VERSION;
53static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; 53static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
54 54
@@ -186,6 +186,22 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
186 } 186 }
187} 187}
188 188
189static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
190 u64 qmask)
191{
192 u32 mask;
193
194 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
195 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
196 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
197 } else {
198 mask = (qmask & 0xFFFFFFFF);
199 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
200 mask = (qmask >> 32);
201 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
202 }
203}
204
189static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 205static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
190 struct ixgbe_tx_buffer 206 struct ixgbe_tx_buffer
191 *tx_buffer_info) 207 *tx_buffer_info)
@@ -248,14 +264,13 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
248 264
249/** 265/**
250 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 266 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
251 * @adapter: board private structure 267 * @q_vector: structure containing interrupt and ring information
252 * @tx_ring: tx ring to clean 268 * @tx_ring: tx ring to clean
253 *
254 * returns true if transmit work is done
255 **/ 269 **/
256static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, 270static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
257 struct ixgbe_ring *tx_ring) 271 struct ixgbe_ring *tx_ring)
258{ 272{
273 struct ixgbe_adapter *adapter = q_vector->adapter;
259 struct net_device *netdev = adapter->netdev; 274 struct net_device *netdev = adapter->netdev;
260 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 275 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
261 struct ixgbe_tx_buffer *tx_buffer_info; 276 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -278,12 +293,24 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
278 293
279 if (cleaned && skb) { 294 if (cleaned && skb) {
280 unsigned int segs, bytecount; 295 unsigned int segs, bytecount;
296 unsigned int hlen = skb_headlen(skb);
281 297
282 /* gso_segs is currently only valid for tcp */ 298 /* gso_segs is currently only valid for tcp */
283 segs = skb_shinfo(skb)->gso_segs ?: 1; 299 segs = skb_shinfo(skb)->gso_segs ?: 1;
300#ifdef IXGBE_FCOE
301 /* adjust for FCoE Sequence Offload */
302 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
303 && (skb->protocol == htons(ETH_P_FCOE)) &&
304 skb_is_gso(skb)) {
305 hlen = skb_transport_offset(skb) +
306 sizeof(struct fc_frame_header) +
307 sizeof(struct fcoe_crc_eof);
308 segs = DIV_ROUND_UP(skb->len - hlen,
309 skb_shinfo(skb)->gso_size);
310 }
311#endif /* IXGBE_FCOE */
284 /* multiply data chunks by size of headers */ 312 /* multiply data chunks by size of headers */
285 bytecount = ((segs - 1) * skb_headlen(skb)) + 313 bytecount = ((segs - 1) * hlen) + skb->len;
286 skb->len;
287 total_packets += segs; 314 total_packets += segs;
288 total_bytes += bytecount; 315 total_bytes += bytecount;
289 } 316 }
@@ -329,18 +356,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
329 } 356 }
330 357
331 /* re-arm the interrupt */ 358 /* re-arm the interrupt */
332 if (count >= tx_ring->work_limit) { 359 if (count >= tx_ring->work_limit)
333 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 360 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
334 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
335 tx_ring->v_idx);
336 else if (tx_ring->v_idx & 0xFFFFFFFF)
337 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0),
338 tx_ring->v_idx);
339 else
340 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1),
341 (tx_ring->v_idx >> 32));
342 }
343
344 361
345 tx_ring->total_bytes += total_bytes; 362 tx_ring->total_bytes += total_bytes;
346 tx_ring->total_packets += total_packets; 363 tx_ring->total_packets += total_packets;
@@ -678,6 +695,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
678 bool cleaned = false; 695 bool cleaned = false;
679 int cleaned_count = 0; 696 int cleaned_count = 0;
680 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 697 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
698#ifdef IXGBE_FCOE
699 int ddp_bytes = 0;
700#endif /* IXGBE_FCOE */
681 701
682 i = rx_ring->next_to_clean; 702 i = rx_ring->next_to_clean;
683 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 703 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
@@ -708,7 +728,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
708 prefetch(skb->data - NET_IP_ALIGN); 728 prefetch(skb->data - NET_IP_ALIGN);
709 rx_buffer_info->skb = NULL; 729 rx_buffer_info->skb = NULL;
710 730
711 if (len && !skb_shinfo(skb)->nr_frags) { 731 if (rx_buffer_info->dma) {
712 pci_unmap_single(pdev, rx_buffer_info->dma, 732 pci_unmap_single(pdev, rx_buffer_info->dma,
713 rx_ring->rx_buf_len, 733 rx_ring->rx_buf_len,
714 PCI_DMA_FROMDEVICE); 734 PCI_DMA_FROMDEVICE);
@@ -743,7 +763,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
743 prefetch(next_rxd); 763 prefetch(next_rxd);
744 cleaned_count++; 764 cleaned_count++;
745 765
746 if (adapter->flags & IXGBE_FLAG_RSC_CAPABLE) 766 if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)
747 rsc_count = ixgbe_get_rsc_count(rx_desc); 767 rsc_count = ixgbe_get_rsc_count(rx_desc);
748 768
749 if (rsc_count) { 769 if (rsc_count) {
@@ -788,9 +808,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
788 skb->protocol = eth_type_trans(skb, adapter->netdev); 808 skb->protocol = eth_type_trans(skb, adapter->netdev);
789#ifdef IXGBE_FCOE 809#ifdef IXGBE_FCOE
790 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 810 /* if ddp, not passing to ULD unless for FCP_RSP or error */
791 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 811 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
792 if (!ixgbe_fcoe_ddp(adapter, rx_desc, skb)) 812 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
813 if (!ddp_bytes)
793 goto next_desc; 814 goto next_desc;
815 }
794#endif /* IXGBE_FCOE */ 816#endif /* IXGBE_FCOE */
795 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 817 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
796 818
@@ -816,6 +838,21 @@ next_desc:
816 if (cleaned_count) 838 if (cleaned_count)
817 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 839 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
818 840
841#ifdef IXGBE_FCOE
842 /* include DDPed FCoE data */
843 if (ddp_bytes > 0) {
844 unsigned int mss;
845
846 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
847 sizeof(struct fc_frame_header) -
848 sizeof(struct fcoe_crc_eof);
849 if (mss > 512)
850 mss &= ~511;
851 total_rx_bytes += ddp_bytes;
852 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
853 }
854#endif /* IXGBE_FCOE */
855
819 rx_ring->total_packets += total_rx_packets; 856 rx_ring->total_packets += total_rx_packets;
820 rx_ring->total_bytes += total_rx_bytes; 857 rx_ring->total_bytes += total_rx_bytes;
821 adapter->net_stats.rx_bytes += total_rx_bytes; 858 adapter->net_stats.rx_bytes += total_rx_bytes;
@@ -875,12 +912,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
875 /* rx only */ 912 /* rx only */
876 q_vector->eitr = adapter->eitr_param; 913 q_vector->eitr = adapter->eitr_param;
877 914
878 /* 915 ixgbe_write_eitr(q_vector);
879 * since this is initial set up don't need to call
880 * ixgbe_write_eitr helper
881 */
882 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
883 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
884 } 916 }
885 917
886 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 918 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
@@ -965,17 +997,19 @@ update_itr_done:
965 997
966/** 998/**
967 * ixgbe_write_eitr - write EITR register in hardware specific way 999 * ixgbe_write_eitr - write EITR register in hardware specific way
968 * @adapter: pointer to adapter struct 1000 * @q_vector: structure containing interrupt and ring information
969 * @v_idx: vector index into q_vector array
970 * @itr_reg: new value to be written in *register* format, not ints/s
971 * 1001 *
972 * This function is made to be called by ethtool and by the driver 1002 * This function is made to be called by ethtool and by the driver
973 * when it needs to update EITR registers at runtime. Hardware 1003 * when it needs to update EITR registers at runtime. Hardware
974 * specific quirks/differences are taken care of here. 1004 * specific quirks/differences are taken care of here.
975 */ 1005 */
976void ixgbe_write_eitr(struct ixgbe_adapter *adapter, int v_idx, u32 itr_reg) 1006void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
977{ 1007{
1008 struct ixgbe_adapter *adapter = q_vector->adapter;
978 struct ixgbe_hw *hw = &adapter->hw; 1009 struct ixgbe_hw *hw = &adapter->hw;
1010 int v_idx = q_vector->v_idx;
1011 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1012
979 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1013 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
980 /* must write high and low 16 bits to reset counter */ 1014 /* must write high and low 16 bits to reset counter */
981 itr_reg |= (itr_reg << 16); 1015 itr_reg |= (itr_reg << 16);
@@ -994,7 +1028,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
994 struct ixgbe_adapter *adapter = q_vector->adapter; 1028 struct ixgbe_adapter *adapter = q_vector->adapter;
995 u32 new_itr; 1029 u32 new_itr;
996 u8 current_itr, ret_itr; 1030 u8 current_itr, ret_itr;
997 int i, r_idx, v_idx = q_vector->v_idx; 1031 int i, r_idx;
998 struct ixgbe_ring *rx_ring, *tx_ring; 1032 struct ixgbe_ring *rx_ring, *tx_ring;
999 1033
1000 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1034 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
@@ -1044,14 +1078,13 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1044 } 1078 }
1045 1079
1046 if (new_itr != q_vector->eitr) { 1080 if (new_itr != q_vector->eitr) {
1047 u32 itr_reg; 1081 /* do an exponential smoothing */
1082 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1048 1083
1049 /* save the algorithm value here, not the smoothed one */ 1084 /* save the algorithm value here, not the smoothed one */
1050 q_vector->eitr = new_itr; 1085 q_vector->eitr = new_itr;
1051 /* do an exponential smoothing */ 1086
1052 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1087 ixgbe_write_eitr(q_vector);
1053 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1054 ixgbe_write_eitr(adapter, v_idx, itr_reg);
1055 } 1088 }
1056 1089
1057 return; 1090 return;
@@ -1122,14 +1155,64 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1122 if (hw->mac.type == ixgbe_mac_82598EB) 1155 if (hw->mac.type == ixgbe_mac_82598EB)
1123 ixgbe_check_fan_failure(adapter, eicr); 1156 ixgbe_check_fan_failure(adapter, eicr);
1124 1157
1125 if (hw->mac.type == ixgbe_mac_82599EB) 1158 if (hw->mac.type == ixgbe_mac_82599EB) {
1126 ixgbe_check_sfp_event(adapter, eicr); 1159 ixgbe_check_sfp_event(adapter, eicr);
1160
1161 /* Handle Flow Director Full threshold interrupt */
1162 if (eicr & IXGBE_EICR_FLOW_DIR) {
1163 int i;
1164 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1165 /* Disable transmits before FDIR Re-initialization */
1166 netif_tx_stop_all_queues(netdev);
1167 for (i = 0; i < adapter->num_tx_queues; i++) {
1168 struct ixgbe_ring *tx_ring =
1169 &adapter->tx_ring[i];
1170 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1171 &tx_ring->reinit_state))
1172 schedule_work(&adapter->fdir_reinit_task);
1173 }
1174 }
1175 }
1127 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1176 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1128 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1177 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1129 1178
1130 return IRQ_HANDLED; 1179 return IRQ_HANDLED;
1131} 1180}
1132 1181
1182static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1183 u64 qmask)
1184{
1185 u32 mask;
1186
1187 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1188 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1189 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1190 } else {
1191 mask = (qmask & 0xFFFFFFFF);
1192 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1193 mask = (qmask >> 32);
1194 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1195 }
1196 /* skip the flush */
1197}
1198
1199static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1200 u64 qmask)
1201{
1202 u32 mask;
1203
1204 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1205 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1206 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1207 } else {
1208 mask = (qmask & 0xFFFFFFFF);
1209 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1210 mask = (qmask >> 32);
1211 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1212 }
1213 /* skip the flush */
1214}
1215
1133static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) 1216static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1134{ 1217{
1135 struct ixgbe_q_vector *q_vector = data; 1218 struct ixgbe_q_vector *q_vector = data;
@@ -1143,17 +1226,16 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1143 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1226 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1144 for (i = 0; i < q_vector->txr_count; i++) { 1227 for (i = 0; i < q_vector->txr_count; i++) {
1145 tx_ring = &(adapter->tx_ring[r_idx]); 1228 tx_ring = &(adapter->tx_ring[r_idx]);
1146#ifdef CONFIG_IXGBE_DCA
1147 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1148 ixgbe_update_tx_dca(adapter, tx_ring);
1149#endif
1150 tx_ring->total_bytes = 0; 1229 tx_ring->total_bytes = 0;
1151 tx_ring->total_packets = 0; 1230 tx_ring->total_packets = 0;
1152 ixgbe_clean_tx_irq(adapter, tx_ring);
1153 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1231 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1154 r_idx + 1); 1232 r_idx + 1);
1155 } 1233 }
1156 1234
1235 /* disable interrupts on this vector only */
1236 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1237 napi_schedule(&q_vector->napi);
1238
1157 return IRQ_HANDLED; 1239 return IRQ_HANDLED;
1158} 1240}
1159 1241
@@ -1185,13 +1267,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1185 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1267 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1186 rx_ring = &(adapter->rx_ring[r_idx]); 1268 rx_ring = &(adapter->rx_ring[r_idx]);
1187 /* disable interrupts on this vector only */ 1269 /* disable interrupts on this vector only */
1188 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1270 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1189 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
1190 else if (rx_ring->v_idx & 0xFFFFFFFF)
1191 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), rx_ring->v_idx);
1192 else
1193 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1),
1194 (rx_ring->v_idx >> 32));
1195 napi_schedule(&q_vector->napi); 1271 napi_schedule(&q_vector->napi);
1196 1272
1197 return IRQ_HANDLED; 1273 return IRQ_HANDLED;
@@ -1199,27 +1275,38 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1199 1275
1200static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) 1276static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1201{ 1277{
1202 ixgbe_msix_clean_rx(irq, data); 1278 struct ixgbe_q_vector *q_vector = data;
1203 ixgbe_msix_clean_tx(irq, data); 1279 struct ixgbe_adapter *adapter = q_vector->adapter;
1280 struct ixgbe_ring *ring;
1281 int r_idx;
1282 int i;
1204 1283
1205 return IRQ_HANDLED; 1284 if (!q_vector->txr_count && !q_vector->rxr_count)
1206} 1285 return IRQ_HANDLED;
1207 1286
1208static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, 1287 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1209 u64 qmask) 1288 for (i = 0; i < q_vector->txr_count; i++) {
1210{ 1289 ring = &(adapter->tx_ring[r_idx]);
1211 u32 mask; 1290 ring->total_bytes = 0;
1291 ring->total_packets = 0;
1292 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1293 r_idx + 1);
1294 }
1212 1295
1213 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1296 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1214 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1297 for (i = 0; i < q_vector->rxr_count; i++) {
1215 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1298 ring = &(adapter->rx_ring[r_idx]);
1216 } else { 1299 ring->total_bytes = 0;
1217 mask = (qmask & 0xFFFFFFFF); 1300 ring->total_packets = 0;
1218 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); 1301 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1219 mask = (qmask >> 32); 1302 r_idx + 1);
1220 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1221 } 1303 }
1222 /* skip the flush */ 1304
1305 /* disable interrupts on this vector only */
1306 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1307 napi_schedule(&q_vector->napi);
1308
1309 return IRQ_HANDLED;
1223} 1310}
1224 1311
1225/** 1312/**
@@ -1254,29 +1341,42 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1254 if (adapter->itr_setting & 1) 1341 if (adapter->itr_setting & 1)
1255 ixgbe_set_itr_msix(q_vector); 1342 ixgbe_set_itr_msix(q_vector);
1256 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1343 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1257 ixgbe_irq_enable_queues(adapter, rx_ring->v_idx); 1344 ixgbe_irq_enable_queues(adapter,
1345 ((u64)1 << q_vector->v_idx));
1258 } 1346 }
1259 1347
1260 return work_done; 1348 return work_done;
1261} 1349}
1262 1350
1263/** 1351/**
1264 * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine 1352 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
1265 * @napi: napi struct with our devices info in it 1353 * @napi: napi struct with our devices info in it
1266 * @budget: amount of work driver is allowed to do this pass, in packets 1354 * @budget: amount of work driver is allowed to do this pass, in packets
1267 * 1355 *
1268 * This function will clean more than one rx queue associated with a 1356 * This function will clean more than one rx queue associated with a
1269 * q_vector. 1357 * q_vector.
1270 **/ 1358 **/
1271static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) 1359static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1272{ 1360{
1273 struct ixgbe_q_vector *q_vector = 1361 struct ixgbe_q_vector *q_vector =
1274 container_of(napi, struct ixgbe_q_vector, napi); 1362 container_of(napi, struct ixgbe_q_vector, napi);
1275 struct ixgbe_adapter *adapter = q_vector->adapter; 1363 struct ixgbe_adapter *adapter = q_vector->adapter;
1276 struct ixgbe_ring *rx_ring = NULL; 1364 struct ixgbe_ring *ring = NULL;
1277 int work_done = 0, i; 1365 int work_done = 0, i;
1278 long r_idx; 1366 long r_idx;
1279 u64 enable_mask = 0; 1367 bool tx_clean_complete = true;
1368
1369 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1370 for (i = 0; i < q_vector->txr_count; i++) {
1371 ring = &(adapter->tx_ring[r_idx]);
1372#ifdef CONFIG_IXGBE_DCA
1373 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1374 ixgbe_update_tx_dca(adapter, ring);
1375#endif
1376 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1377 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1378 r_idx + 1);
1379 }
1280 1380
1281 /* attempt to distribute budget to each queue fairly, but don't allow 1381 /* attempt to distribute budget to each queue fairly, but don't allow
1282 * the budget to go below 1 because we'll exit polling */ 1382 * the budget to go below 1 because we'll exit polling */
@@ -1284,31 +1384,71 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1284 budget = max(budget, 1); 1384 budget = max(budget, 1);
1285 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1385 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1286 for (i = 0; i < q_vector->rxr_count; i++) { 1386 for (i = 0; i < q_vector->rxr_count; i++) {
1287 rx_ring = &(adapter->rx_ring[r_idx]); 1387 ring = &(adapter->rx_ring[r_idx]);
1288#ifdef CONFIG_IXGBE_DCA 1388#ifdef CONFIG_IXGBE_DCA
1289 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1389 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1290 ixgbe_update_rx_dca(adapter, rx_ring); 1390 ixgbe_update_rx_dca(adapter, ring);
1291#endif 1391#endif
1292 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 1392 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1293 enable_mask |= rx_ring->v_idx;
1294 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1393 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1295 r_idx + 1); 1394 r_idx + 1);
1296 } 1395 }
1297 1396
1298 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1397 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1299 rx_ring = &(adapter->rx_ring[r_idx]); 1398 ring = &(adapter->rx_ring[r_idx]);
1300 /* If all Rx work done, exit the polling mode */ 1399 /* If all Rx work done, exit the polling mode */
1301 if (work_done < budget) { 1400 if (work_done < budget) {
1302 napi_complete(napi); 1401 napi_complete(napi);
1303 if (adapter->itr_setting & 1) 1402 if (adapter->itr_setting & 1)
1304 ixgbe_set_itr_msix(q_vector); 1403 ixgbe_set_itr_msix(q_vector);
1305 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1404 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1306 ixgbe_irq_enable_queues(adapter, enable_mask); 1405 ixgbe_irq_enable_queues(adapter,
1406 ((u64)1 << q_vector->v_idx));
1307 return 0; 1407 return 0;
1308 } 1408 }
1309 1409
1310 return work_done; 1410 return work_done;
1311} 1411}
1412
1413/**
1414 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1415 * @napi: napi struct with our devices info in it
1416 * @budget: amount of work driver is allowed to do this pass, in packets
1417 *
1418 * This function is optimized for cleaning one queue only on a single
1419 * q_vector!!!
1420 **/
1421static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1422{
1423 struct ixgbe_q_vector *q_vector =
1424 container_of(napi, struct ixgbe_q_vector, napi);
1425 struct ixgbe_adapter *adapter = q_vector->adapter;
1426 struct ixgbe_ring *tx_ring = NULL;
1427 int work_done = 0;
1428 long r_idx;
1429
1430 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1431 tx_ring = &(adapter->tx_ring[r_idx]);
1432#ifdef CONFIG_IXGBE_DCA
1433 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1434 ixgbe_update_tx_dca(adapter, tx_ring);
1435#endif
1436
1437 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
1438 work_done = budget;
1439
1440 /* If all Rx work done, exit the polling mode */
1441 if (work_done < budget) {
1442 napi_complete(napi);
1443 if (adapter->itr_setting & 1)
1444 ixgbe_set_itr_msix(q_vector);
1445 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1446 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
1447 }
1448
1449 return work_done;
1450}
1451
1312static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 1452static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1313 int r_idx) 1453 int r_idx)
1314{ 1454{
@@ -1316,7 +1456,6 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1316 1456
1317 set_bit(r_idx, q_vector->rxr_idx); 1457 set_bit(r_idx, q_vector->rxr_idx);
1318 q_vector->rxr_count++; 1458 q_vector->rxr_count++;
1319 a->rx_ring[r_idx].v_idx = (u64)1 << v_idx;
1320} 1459}
1321 1460
1322static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 1461static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
@@ -1326,7 +1465,6 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1326 1465
1327 set_bit(t_idx, q_vector->txr_idx); 1466 set_bit(t_idx, q_vector->txr_idx);
1328 q_vector->txr_count++; 1467 q_vector->txr_count++;
1329 a->tx_ring[t_idx].v_idx = (u64)1 << v_idx;
1330} 1468}
1331 1469
1332/** 1470/**
@@ -1505,14 +1643,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1505 } 1643 }
1506 1644
1507 if (new_itr != q_vector->eitr) { 1645 if (new_itr != q_vector->eitr) {
1508 u32 itr_reg; 1646 /* do an exponential smoothing */
1647 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1509 1648
1510 /* save the algorithm value here, not the smoothed one */ 1649 /* save the algorithm value here, not the smoothed one */
1511 q_vector->eitr = new_itr; 1650 q_vector->eitr = new_itr;
1512 /* do an exponential smoothing */ 1651
1513 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1652 ixgbe_write_eitr(q_vector);
1514 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1515 ixgbe_write_eitr(adapter, 0, itr_reg);
1516 } 1653 }
1517 1654
1518 return; 1655 return;
@@ -1534,6 +1671,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1534 mask |= IXGBE_EIMS_GPI_SDP1; 1671 mask |= IXGBE_EIMS_GPI_SDP1;
1535 mask |= IXGBE_EIMS_GPI_SDP2; 1672 mask |= IXGBE_EIMS_GPI_SDP2;
1536 } 1673 }
1674 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1675 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
1676 mask |= IXGBE_EIMS_FLOW_DIR;
1537 1677
1538 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1678 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1539 ixgbe_irq_enable_queues(adapter, ~0); 1679 ixgbe_irq_enable_queues(adapter, ~0);
@@ -1879,7 +2019,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1879 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 2019 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
1880 } 2020 }
1881 } else { 2021 } else {
1882 if (!(adapter->flags & IXGBE_FLAG_RSC_ENABLED) && 2022 if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) &&
1883 (netdev->mtu <= ETH_DATA_LEN)) 2023 (netdev->mtu <= ETH_DATA_LEN))
1884 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 2024 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1885 else 2025 else
@@ -2008,7 +2148,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2008 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2148 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2009 } 2149 }
2010 2150
2011 if (adapter->flags & IXGBE_FLAG_RSC_ENABLED) { 2151 if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) {
2012 /* Enable 82599 HW-RSC */ 2152 /* Enable 82599 HW-RSC */
2013 for (i = 0; i < adapter->num_rx_queues; i++) { 2153 for (i = 0; i < adapter->num_rx_queues; i++) {
2014 j = adapter->rx_ring[i].reg_idx; 2154 j = adapter->rx_ring[i].reg_idx;
@@ -2181,11 +2321,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
2181 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2321 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2182 2322
2183 /* reprogram secondary unicast list */ 2323 /* reprogram secondary unicast list */
2184 addr_count = netdev->uc_count; 2324 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc_list);
2185 if (addr_count)
2186 addr_list = netdev->uc_list->dmi_addr;
2187 hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
2188 ixgbe_addr_list_itr);
2189 2325
2190 /* reprogram multicast list */ 2326 /* reprogram multicast list */
2191 addr_count = netdev->mc_count; 2327 addr_count = netdev->mc_count;
@@ -2208,12 +2344,15 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
2208 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 2344 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2209 struct napi_struct *napi; 2345 struct napi_struct *napi;
2210 q_vector = adapter->q_vector[q_idx]; 2346 q_vector = adapter->q_vector[q_idx];
2211 if (!q_vector->rxr_count)
2212 continue;
2213 napi = &q_vector->napi; 2347 napi = &q_vector->napi;
2214 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) && 2348 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2215 (q_vector->rxr_count > 1)) 2349 if (!q_vector->rxr_count || !q_vector->txr_count) {
2216 napi->poll = &ixgbe_clean_rxonly_many; 2350 if (q_vector->txr_count == 1)
2351 napi->poll = &ixgbe_clean_txonly;
2352 else if (q_vector->rxr_count == 1)
2353 napi->poll = &ixgbe_clean_rxonly;
2354 }
2355 }
2217 2356
2218 napi_enable(napi); 2357 napi_enable(napi);
2219 } 2358 }
@@ -2231,8 +2370,6 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2231 2370
2232 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 2371 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2233 q_vector = adapter->q_vector[q_idx]; 2372 q_vector = adapter->q_vector[q_idx];
2234 if (!q_vector->rxr_count)
2235 continue;
2236 napi_disable(&q_vector->napi); 2373 napi_disable(&q_vector->napi);
2237 } 2374 }
2238} 2375}
@@ -2290,6 +2427,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2290static void ixgbe_configure(struct ixgbe_adapter *adapter) 2427static void ixgbe_configure(struct ixgbe_adapter *adapter)
2291{ 2428{
2292 struct net_device *netdev = adapter->netdev; 2429 struct net_device *netdev = adapter->netdev;
2430 struct ixgbe_hw *hw = &adapter->hw;
2293 int i; 2431 int i;
2294 2432
2295 ixgbe_set_rx_mode(netdev); 2433 ixgbe_set_rx_mode(netdev);
@@ -2311,6 +2449,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
2311 ixgbe_configure_fcoe(adapter); 2449 ixgbe_configure_fcoe(adapter);
2312 2450
2313#endif /* IXGBE_FCOE */ 2451#endif /* IXGBE_FCOE */
2452 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2453 for (i = 0; i < adapter->num_tx_queues; i++)
2454 adapter->tx_ring[i].atr_sample_rate =
2455 adapter->atr_sample_rate;
2456 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
2457 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
2458 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
2459 }
2460
2314 ixgbe_configure_tx(adapter); 2461 ixgbe_configure_tx(adapter);
2315 ixgbe_configure_rx(adapter); 2462 ixgbe_configure_rx(adapter);
2316 for (i = 0; i < adapter->num_rx_queues; i++) 2463 for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2567,6 +2714,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2567 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); 2714 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
2568 } 2715 }
2569 2716
2717 for (i = 0; i < adapter->num_tx_queues; i++)
2718 set_bit(__IXGBE_FDIR_INIT_DONE,
2719 &(adapter->tx_ring[i].reinit_state));
2720
2570 /* enable transmits */ 2721 /* enable transmits */
2571 netif_tx_start_all_queues(netdev); 2722 netif_tx_start_all_queues(netdev);
2572 2723
@@ -2602,12 +2753,28 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
2602 int err; 2753 int err;
2603 2754
2604 err = hw->mac.ops.init_hw(hw); 2755 err = hw->mac.ops.init_hw(hw);
2605 if (err && (err != IXGBE_ERR_SFP_NOT_PRESENT)) 2756 switch (err) {
2606 dev_err(&adapter->pdev->dev, "Hardware Error\n"); 2757 case 0:
2758 case IXGBE_ERR_SFP_NOT_PRESENT:
2759 break;
2760 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
2761 dev_err(&adapter->pdev->dev, "master disable timed out\n");
2762 break;
2763 case IXGBE_ERR_EEPROM_VERSION:
2764 /* We are running on a pre-production device, log a warning */
2765 dev_warn(&adapter->pdev->dev, "This device is a pre-production "
2766 "adapter/LOM. Please be aware there may be issues "
2767 "associated with your hardware. If you are "
2768 "experiencing problems please contact your Intel or "
2769 "hardware representative who provided you with this "
2770 "hardware.\n");
2771 break;
2772 default:
2773 dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
2774 }
2607 2775
2608 /* reprogram the RAR[0] in case user changed it. */ 2776 /* reprogram the RAR[0] in case user changed it. */
2609 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2777 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2610
2611} 2778}
2612 2779
2613/** 2780/**
@@ -2755,6 +2922,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
2755 del_timer_sync(&adapter->watchdog_timer); 2922 del_timer_sync(&adapter->watchdog_timer);
2756 cancel_work_sync(&adapter->watchdog_task); 2923 cancel_work_sync(&adapter->watchdog_task);
2757 2924
2925 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2926 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2927 cancel_work_sync(&adapter->fdir_reinit_task);
2928
2758 /* disable transmits in the hardware now that interrupts are off */ 2929 /* disable transmits in the hardware now that interrupts are off */
2759 for (i = 0; i < adapter->num_tx_queues; i++) { 2930 for (i = 0; i < adapter->num_tx_queues; i++) {
2760 j = adapter->tx_ring[i].reg_idx; 2931 j = adapter->tx_ring[i].reg_idx;
@@ -2802,7 +2973,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2802 } 2973 }
2803#endif 2974#endif
2804 2975
2805 tx_clean_complete = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); 2976 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
2806 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); 2977 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
2807 2978
2808 if (!tx_clean_complete) 2979 if (!tx_clean_complete)
@@ -2889,6 +3060,38 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
2889 return ret; 3060 return ret;
2890} 3061}
2891 3062
3063/**
3064 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
3065 * @adapter: board private structure to initialize
3066 *
3067 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
3068 * to the original CPU that initiated the Tx session. This runs in addition
3069 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
3070 * Rx load across CPUs using RSS.
3071 *
3072 **/
3073static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3074{
3075 bool ret = false;
3076 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
3077
3078 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
3079 f_fdir->mask = 0;
3080
3081 /* Flow Director must have RSS enabled */
3082 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3083 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3084 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
3085 adapter->num_tx_queues = f_fdir->indices;
3086 adapter->num_rx_queues = f_fdir->indices;
3087 ret = true;
3088 } else {
3089 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3090 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3091 }
3092 return ret;
3093}
3094
2892#ifdef IXGBE_FCOE 3095#ifdef IXGBE_FCOE
2893/** 3096/**
2894 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) 3097 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
@@ -2953,6 +3156,9 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2953 goto done; 3156 goto done;
2954 3157
2955#endif 3158#endif
3159 if (ixgbe_set_fdir_queues(adapter))
3160 goto done;
3161
2956 if (ixgbe_set_rss_queues(adapter)) 3162 if (ixgbe_set_rss_queues(adapter))
2957 goto done; 3163 goto done;
2958 3164
@@ -3123,6 +3329,31 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3123} 3329}
3124#endif 3330#endif
3125 3331
3332/**
3333 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
3334 * @adapter: board private structure to initialize
3335 *
3336 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
3337 *
3338 **/
3339static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
3340{
3341 int i;
3342 bool ret = false;
3343
3344 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3345 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3346 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
3347 for (i = 0; i < adapter->num_rx_queues; i++)
3348 adapter->rx_ring[i].reg_idx = i;
3349 for (i = 0; i < adapter->num_tx_queues; i++)
3350 adapter->tx_ring[i].reg_idx = i;
3351 ret = true;
3352 }
3353
3354 return ret;
3355}
3356
3126#ifdef IXGBE_FCOE 3357#ifdef IXGBE_FCOE
3127/** 3358/**
3128 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE 3359 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
@@ -3183,6 +3414,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3183 return; 3414 return;
3184 3415
3185#endif 3416#endif
3417 if (ixgbe_cache_ring_fdir(adapter))
3418 return;
3419
3186 if (ixgbe_cache_ring_rss(adapter)) 3420 if (ixgbe_cache_ring_rss(adapter))
3187 return; 3421 return;
3188} 3422}
@@ -3276,6 +3510,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3276 3510
3277 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 3511 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
3278 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 3512 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
3513 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3514 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3515 adapter->atr_sample_rate = 0;
3279 ixgbe_set_num_queues(adapter); 3516 ixgbe_set_num_queues(adapter);
3280 3517
3281 err = pci_enable_msi(adapter->pdev); 3518 err = pci_enable_msi(adapter->pdev);
@@ -3309,7 +3546,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3309 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3546 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3310 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3547 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3311 napi_vectors = adapter->num_rx_queues; 3548 napi_vectors = adapter->num_rx_queues;
3312 poll = &ixgbe_clean_rxonly; 3549 poll = &ixgbe_clean_rxtx_many;
3313 } else { 3550 } else {
3314 num_q_vectors = 1; 3551 num_q_vectors = 1;
3315 napi_vectors = 1; 3552 napi_vectors = 1;
@@ -3321,11 +3558,9 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3321 if (!q_vector) 3558 if (!q_vector)
3322 goto err_out; 3559 goto err_out;
3323 q_vector->adapter = adapter; 3560 q_vector->adapter = adapter;
3324 q_vector->v_idx = q_idx;
3325 q_vector->eitr = adapter->eitr_param; 3561 q_vector->eitr = adapter->eitr_param;
3326 if (q_idx < napi_vectors) 3562 q_vector->v_idx = q_idx;
3327 netif_napi_add(adapter->netdev, &q_vector->napi, 3563 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
3328 (*poll), 64);
3329 adapter->q_vector[q_idx] = q_vector; 3564 adapter->q_vector[q_idx] = q_vector;
3330 } 3565 }
3331 3566
@@ -3353,22 +3588,16 @@ err_out:
3353static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 3588static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
3354{ 3589{
3355 int q_idx, num_q_vectors; 3590 int q_idx, num_q_vectors;
3356 int napi_vectors;
3357 3591
3358 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3592 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3359 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3593 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3360 napi_vectors = adapter->num_rx_queues; 3594 else
3361 } else {
3362 num_q_vectors = 1; 3595 num_q_vectors = 1;
3363 napi_vectors = 1;
3364 }
3365 3596
3366 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 3597 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3367 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; 3598 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
3368
3369 adapter->q_vector[q_idx] = NULL; 3599 adapter->q_vector[q_idx] = NULL;
3370 if (q_idx < napi_vectors) 3600 netif_napi_del(&q_vector->napi);
3371 netif_napi_del(&q_vector->napi);
3372 kfree(q_vector); 3601 kfree(q_vector);
3373 } 3602 }
3374} 3603}
@@ -3547,8 +3776,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3547 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 3776 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
3548 } else if (hw->mac.type == ixgbe_mac_82599EB) { 3777 } else if (hw->mac.type == ixgbe_mac_82599EB) {
3549 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 3778 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
3550 adapter->flags |= IXGBE_FLAG_RSC_CAPABLE; 3779 adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE;
3551 adapter->flags |= IXGBE_FLAG_RSC_ENABLED; 3780 adapter->flags |= IXGBE_FLAG2_RSC_ENABLED;
3781 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
3782 adapter->ring_feature[RING_F_FDIR].indices =
3783 IXGBE_MAX_FDIR_INDICES;
3784 adapter->atr_sample_rate = 20;
3785 adapter->fdir_pballoc = 0;
3552#ifdef IXGBE_FCOE 3786#ifdef IXGBE_FCOE
3553 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 3787 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
3554 adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; 3788 adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
@@ -4138,6 +4372,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4138 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 4372 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4139 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 4373 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4140 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 4374 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4375 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
4376 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
4141#ifdef IXGBE_FCOE 4377#ifdef IXGBE_FCOE
4142 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 4378 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4143 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 4379 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
@@ -4213,57 +4449,43 @@ static void ixgbe_watchdog(unsigned long data)
4213{ 4449{
4214 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 4450 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4215 struct ixgbe_hw *hw = &adapter->hw; 4451 struct ixgbe_hw *hw = &adapter->hw;
4452 u64 eics = 0;
4453 int i;
4216 4454
4217 /* Do the watchdog outside of interrupt context due to the lovely 4455 /*
4218 * delays that some of the newer hardware requires */ 4456 * Do the watchdog outside of interrupt context due to the lovely
4219 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 4457 * delays that some of the newer hardware requires
4220 u64 eics = 0; 4458 */
4221 int i;
4222 4459
4223 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) 4460 if (test_bit(__IXGBE_DOWN, &adapter->state))
4224 eics |= ((u64)1 << i); 4461 goto watchdog_short_circuit;
4225 4462
4226 /* Cause software interrupt to ensure rx rings are cleaned */ 4463 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
4227 switch (hw->mac.type) { 4464 /*
4228 case ixgbe_mac_82598EB: 4465 * for legacy and MSI interrupts don't set any bits
4229 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 4466 * that are enabled for EIAM, because this operation
4230 IXGBE_WRITE_REG(hw, IXGBE_EICS, (u32)eics); 4467 * would set *both* EIMS and EICS for any bit in EIAM
4231 } else { 4468 */
4232 /* 4469 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4233 * for legacy and MSI interrupts don't set any 4470 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4234 * bits that are enabled for EIAM, because this 4471 goto watchdog_reschedule;
4235 * operation would set *both* EIMS and EICS for
4236 * any bit in EIAM
4237 */
4238 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4239 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4240 }
4241 break;
4242 case ixgbe_mac_82599EB:
4243 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4244 IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(0),
4245 (u32)(eics & 0xFFFFFFFF));
4246 IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1),
4247 (u32)(eics >> 32));
4248 } else {
4249 /*
4250 * for legacy and MSI interrupts don't set any
4251 * bits that are enabled for EIAM, because this
4252 * operation would set *both* EIMS and EICS for
4253 * any bit in EIAM
4254 */
4255 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4256 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4257 }
4258 break;
4259 default:
4260 break;
4261 }
4262 /* Reset the timer */
4263 mod_timer(&adapter->watchdog_timer,
4264 round_jiffies(jiffies + 2 * HZ));
4265 } 4472 }
4266 4473
4474 /* get one bit for every active tx/rx interrupt vector */
4475 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
4476 struct ixgbe_q_vector *qv = adapter->q_vector[i];
4477 if (qv->rxr_count || qv->txr_count)
4478 eics |= ((u64)1 << i);
4479 }
4480
4481 /* Cause software interrupt to ensure rx rings are cleaned */
4482 ixgbe_irq_rearm_queues(adapter, eics);
4483
4484watchdog_reschedule:
4485 /* Reset the timer */
4486 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4487
4488watchdog_short_circuit:
4267 schedule_work(&adapter->watchdog_task); 4489 schedule_work(&adapter->watchdog_task);
4268} 4490}
4269 4491
@@ -4317,6 +4539,30 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
4317} 4539}
4318 4540
4319/** 4541/**
4542 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
4543 * @work: pointer to work_struct containing our data
4544 **/
4545static void ixgbe_fdir_reinit_task(struct work_struct *work)
4546{
4547 struct ixgbe_adapter *adapter = container_of(work,
4548 struct ixgbe_adapter,
4549 fdir_reinit_task);
4550 struct ixgbe_hw *hw = &adapter->hw;
4551 int i;
4552
4553 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
4554 for (i = 0; i < adapter->num_tx_queues; i++)
4555 set_bit(__IXGBE_FDIR_INIT_DONE,
4556 &(adapter->tx_ring[i].reinit_state));
4557 } else {
4558 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
4559 "ignored adding FDIR ATR filters \n");
4560 }
4561 /* Done FDIR Re-initialization, enable transmits */
4562 netif_tx_start_all_queues(adapter->netdev);
4563}
4564
4565/**
4320 * ixgbe_watchdog_task - worker thread to bring link up 4566 * ixgbe_watchdog_task - worker thread to bring link up
4321 * @work: pointer to work_struct containing our data 4567 * @work: pointer to work_struct containing our data
4322 **/ 4568 **/
@@ -4341,12 +4587,12 @@ static void ixgbe_watchdog_task(struct work_struct *work)
4341#ifdef CONFIG_DCB 4587#ifdef CONFIG_DCB
4342 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4588 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4343 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 4589 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
4344 hw->mac.ops.setup_fc(hw, i); 4590 hw->mac.ops.fc_enable(hw, i);
4345 } else { 4591 } else {
4346 hw->mac.ops.setup_fc(hw, 0); 4592 hw->mac.ops.fc_enable(hw, 0);
4347 } 4593 }
4348#else 4594#else
4349 hw->mac.ops.setup_fc(hw, 0); 4595 hw->mac.ops.fc_enable(hw, 0);
4350#endif 4596#endif
4351 } 4597 }
4352 4598
@@ -4623,7 +4869,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
4623 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 4869 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4624 4870
4625 tx_buffer_info->length = size; 4871 tx_buffer_info->length = size;
4626 tx_buffer_info->dma = map[0] + offset; 4872 tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
4627 tx_buffer_info->time_stamp = jiffies; 4873 tx_buffer_info->time_stamp = jiffies;
4628 tx_buffer_info->next_to_watch = i; 4874 tx_buffer_info->next_to_watch = i;
4629 4875
@@ -4655,7 +4901,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
4655 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 4901 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4656 4902
4657 tx_buffer_info->length = size; 4903 tx_buffer_info->length = size;
4658 tx_buffer_info->dma = map[f + 1] + offset; 4904 tx_buffer_info->dma = map[f] + offset;
4659 tx_buffer_info->time_stamp = jiffies; 4905 tx_buffer_info->time_stamp = jiffies;
4660 tx_buffer_info->next_to_watch = i; 4906 tx_buffer_info->next_to_watch = i;
4661 4907
@@ -4743,6 +4989,58 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
4743 writel(i, adapter->hw.hw_addr + tx_ring->tail); 4989 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4744} 4990}
4745 4991
4992static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
4993 int queue, u32 tx_flags)
4994{
4995 /* Right now, we support IPv4 only */
4996 struct ixgbe_atr_input atr_input;
4997 struct tcphdr *th;
4998 struct udphdr *uh;
4999 struct iphdr *iph = ip_hdr(skb);
5000 struct ethhdr *eth = (struct ethhdr *)skb->data;
5001 u16 vlan_id, src_port, dst_port, flex_bytes;
5002 u32 src_ipv4_addr, dst_ipv4_addr;
5003 u8 l4type = 0;
5004
5005 /* check if we're UDP or TCP */
5006 if (iph->protocol == IPPROTO_TCP) {
5007 th = tcp_hdr(skb);
5008 src_port = th->source;
5009 dst_port = th->dest;
5010 l4type |= IXGBE_ATR_L4TYPE_TCP;
5011 /* l4type IPv4 type is 0, no need to assign */
5012 } else if(iph->protocol == IPPROTO_UDP) {
5013 uh = udp_hdr(skb);
5014 src_port = uh->source;
5015 dst_port = uh->dest;
5016 l4type |= IXGBE_ATR_L4TYPE_UDP;
5017 /* l4type IPv4 type is 0, no need to assign */
5018 } else {
5019 /* Unsupported L4 header, just bail here */
5020 return;
5021 }
5022
5023 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
5024
5025 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
5026 IXGBE_TX_FLAGS_VLAN_SHIFT;
5027 src_ipv4_addr = iph->saddr;
5028 dst_ipv4_addr = iph->daddr;
5029 flex_bytes = eth->h_proto;
5030
5031 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
5032 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
5033 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
5034 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
5035 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
5036 /* src and dst are inverted, think how the receiver sees them */
5037 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
5038 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
5039
5040 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
5041 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
5042}
5043
4746static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 5044static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
4747 struct ixgbe_ring *tx_ring, int size) 5045 struct ixgbe_ring *tx_ring, int size)
4748{ 5046{
@@ -4777,6 +5075,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
4777{ 5075{
4778 struct ixgbe_adapter *adapter = netdev_priv(dev); 5076 struct ixgbe_adapter *adapter = netdev_priv(dev);
4779 5077
5078 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
5079 return smp_processor_id();
5080
4780 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 5081 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
4781 return 0; /* All traffic should default to class 0 */ 5082 return 0; /* All traffic should default to class 0 */
4782 5083
@@ -4861,9 +5162,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4861 5162
4862 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); 5163 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
4863 if (count) { 5164 if (count) {
5165 /* add the ATR filter if ATR is on */
5166 if (tx_ring->atr_sample_rate) {
5167 ++tx_ring->atr_count;
5168 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
5169 test_bit(__IXGBE_FDIR_INIT_DONE,
5170 &tx_ring->reinit_state)) {
5171 ixgbe_atr(adapter, skb, tx_ring->queue_index,
5172 tx_flags);
5173 tx_ring->atr_count = 0;
5174 }
5175 }
4864 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, 5176 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
4865 hdr_len); 5177 hdr_len);
4866 netdev->trans_start = jiffies;
4867 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 5178 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
4868 5179
4869 } else { 5180 } else {
@@ -5244,6 +5555,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5244 netdev->features |= NETIF_F_FCOE_CRC; 5555 netdev->features |= NETIF_F_FCOE_CRC;
5245 netdev->features |= NETIF_F_FSO; 5556 netdev->features |= NETIF_F_FSO;
5246 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 5557 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
5558 DPRINTK(DRV, INFO, "FCoE enabled, "
5559 "disabling Flow Director\n");
5560 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
5561 adapter->flags &=
5562 ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
5563 adapter->atr_sample_rate = 0;
5247 } else { 5564 } else {
5248 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 5565 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5249 } 5566 }
@@ -5253,7 +5570,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5253 if (pci_using_dac) 5570 if (pci_using_dac)
5254 netdev->features |= NETIF_F_HIGHDMA; 5571 netdev->features |= NETIF_F_HIGHDMA;
5255 5572
5256 if (adapter->flags & IXGBE_FLAG_RSC_ENABLED) 5573 if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED)
5257 netdev->features |= NETIF_F_LRO; 5574 netdev->features |= NETIF_F_LRO;
5258 5575
5259 /* make sure the EEPROM is good */ 5576 /* make sure the EEPROM is good */
@@ -5287,6 +5604,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5287 case IXGBE_DEV_ID_82599_KX4: 5604 case IXGBE_DEV_ID_82599_KX4:
5288 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 5605 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
5289 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 5606 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
5607 /* Enable ACPI wakeup in GRC */
5608 IXGBE_WRITE_REG(hw, IXGBE_GRC,
5609 (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
5290 break; 5610 break;
5291 default: 5611 default:
5292 adapter->wol = 0; 5612 adapter->wol = 0;
@@ -5329,8 +5649,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5329 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version); 5649 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
5330 5650
5331 /* reset the hardware with the new settings */ 5651 /* reset the hardware with the new settings */
5332 hw->mac.ops.start_hw(hw); 5652 err = hw->mac.ops.start_hw(hw);
5333 5653
5654 if (err == IXGBE_ERR_EEPROM_VERSION) {
5655 /* We are running on a pre-production device, log a warning */
5656 dev_warn(&pdev->dev, "This device is a pre-production "
5657 "adapter/LOM. Please be aware there may be issues "
5658 "associated with your hardware. If you are "
5659 "experiencing problems please contact your Intel or "
5660 "hardware representative who provided you with this "
5661 "hardware.\n");
5662 }
5334 strcpy(netdev->name, "eth%d"); 5663 strcpy(netdev->name, "eth%d");
5335 err = register_netdev(netdev); 5664 err = register_netdev(netdev);
5336 if (err) 5665 if (err)
@@ -5339,6 +5668,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5339 /* carrier off reporting is important to ethtool even BEFORE open */ 5668 /* carrier off reporting is important to ethtool even BEFORE open */
5340 netif_carrier_off(netdev); 5669 netif_carrier_off(netdev);
5341 5670
5671 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5672 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5673 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
5674
5342#ifdef CONFIG_IXGBE_DCA 5675#ifdef CONFIG_IXGBE_DCA
5343 if (dca_add_requester(&pdev->dev) == 0) { 5676 if (dca_add_requester(&pdev->dev) == 0) {
5344 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 5677 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -5401,6 +5734,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5401 cancel_work_sync(&adapter->sfp_task); 5734 cancel_work_sync(&adapter->sfp_task);
5402 cancel_work_sync(&adapter->multispeed_fiber_task); 5735 cancel_work_sync(&adapter->multispeed_fiber_task);
5403 cancel_work_sync(&adapter->sfp_config_module_task); 5736 cancel_work_sync(&adapter->sfp_config_module_task);
5737 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5738 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5739 cancel_work_sync(&adapter->fdir_reinit_task);
5404 flush_scheduled_work(); 5740 flush_scheduled_work();
5405 5741
5406#ifdef CONFIG_IXGBE_DCA 5742#ifdef CONFIG_IXGBE_DCA
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index e43d6248d7d4..453e966762f0 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -606,6 +606,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
606 hw->phy.sfp_setup_needed = true; 606 hw->phy.sfp_setup_needed = true;
607 607
608 /* Determine if the SFP+ PHY is dual speed or not. */ 608 /* Determine if the SFP+ PHY is dual speed or not. */
609 hw->phy.multispeed_fiber = false;
609 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && 610 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
610 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || 611 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
611 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && 612 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index df1f7034c284..fa87309dc087 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -30,6 +30,7 @@
30 30
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/mdio.h> 32#include <linux/mdio.h>
33#include <linux/list.h>
33 34
34/* Vendor ID */ 35/* Vendor ID */
35#define IXGBE_INTEL_VENDOR_ID 0x8086 36#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -230,6 +231,34 @@
230#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ 231#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
231#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ 232#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
232 233
234/* Flow Director registers */
235#define IXGBE_FDIRCTRL 0x0EE00
236#define IXGBE_FDIRHKEY 0x0EE68
237#define IXGBE_FDIRSKEY 0x0EE6C
238#define IXGBE_FDIRDIP4M 0x0EE3C
239#define IXGBE_FDIRSIP4M 0x0EE40
240#define IXGBE_FDIRTCPM 0x0EE44
241#define IXGBE_FDIRUDPM 0x0EE48
242#define IXGBE_FDIRIP6M 0x0EE74
243#define IXGBE_FDIRM 0x0EE70
244
245/* Flow Director Stats registers */
246#define IXGBE_FDIRFREE 0x0EE38
247#define IXGBE_FDIRLEN 0x0EE4C
248#define IXGBE_FDIRUSTAT 0x0EE50
249#define IXGBE_FDIRFSTAT 0x0EE54
250#define IXGBE_FDIRMATCH 0x0EE58
251#define IXGBE_FDIRMISS 0x0EE5C
252
253/* Flow Director Programming registers */
254#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
255#define IXGBE_FDIRIPSA 0x0EE18
256#define IXGBE_FDIRIPDA 0x0EE1C
257#define IXGBE_FDIRPORT 0x0EE20
258#define IXGBE_FDIRVLAN 0x0EE24
259#define IXGBE_FDIRHASH 0x0EE28
260#define IXGBE_FDIRCMD 0x0EE2C
261
233/* Transmit DMA registers */ 262/* Transmit DMA registers */
234#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ 263#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
235#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) 264#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
@@ -1264,8 +1293,10 @@
1264#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ 1293#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
1265 1294
1266/* ESDP Bit Masks */ 1295/* ESDP Bit Masks */
1267#define IXGBE_ESDP_SDP0 0x00000001 1296#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
1268#define IXGBE_ESDP_SDP1 0x00000002 1297#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
1298#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
1299#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
1269#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ 1300#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
1270#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ 1301#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
1271#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ 1302#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
@@ -1365,8 +1396,6 @@
1365#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ 1396#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
1366#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 1397#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
1367 1398
1368#define FIBER_LINK_UP_LIMIT 50
1369
1370/* PCS1GLSTA Bit Masks */ 1399/* PCS1GLSTA Bit Masks */
1371#define IXGBE_PCS1GLSTA_LINK_OK 1 1400#define IXGBE_PCS1GLSTA_LINK_OK 1
1372#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 1401#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
@@ -1487,6 +1516,8 @@
1487#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 1516#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
1488#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 1517#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
1489#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 1518#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
1519#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
1520#define IXGBE_FW_PATCH_VERSION_4 0x7
1490 1521
1491/* PCI Bus Info */ 1522/* PCI Bus Info */
1492#define IXGBE_PCI_LINK_STATUS 0xB2 1523#define IXGBE_PCI_LINK_STATUS 0xB2
@@ -1651,6 +1682,9 @@
1651#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ 1682#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
1652#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ 1683#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
1653#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ 1684#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
1685#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
1686#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
1687#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */
1654#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ 1688#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
1655#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ 1689#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
1656#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ 1690#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
@@ -1783,6 +1817,82 @@
1783 1817
1784#endif 1818#endif
1785 1819
1820enum ixgbe_fdir_pballoc_type {
1821 IXGBE_FDIR_PBALLOC_64K = 0,
1822 IXGBE_FDIR_PBALLOC_128K,
1823 IXGBE_FDIR_PBALLOC_256K,
1824};
1825#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16
1826
1827/* Flow Director register values */
1828#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001
1829#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002
1830#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003
1831#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008
1832#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010
1833#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
1834#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
1835#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
1836#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
1837#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
1838#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
1839#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
1840#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
1841
1842#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16
1843#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16
1844#define IXGBE_FDIRIP6M_DIPM_SHIFT 16
1845#define IXGBE_FDIRM_VLANID 0x00000001
1846#define IXGBE_FDIRM_VLANP 0x00000002
1847#define IXGBE_FDIRM_POOL 0x00000004
1848#define IXGBE_FDIRM_L3P 0x00000008
1849#define IXGBE_FDIRM_L4P 0x00000010
1850#define IXGBE_FDIRM_FLEX 0x00000020
1851#define IXGBE_FDIRM_DIPv6 0x00000040
1852
1853#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
1854#define IXGBE_FDIRFREE_FREE_SHIFT 0
1855#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000
1856#define IXGBE_FDIRFREE_COLL_SHIFT 16
1857#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F
1858#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0
1859#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000
1860#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16
1861#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF
1862#define IXGBE_FDIRUSTAT_ADD_SHIFT 0
1863#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000
1864#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16
1865#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF
1866#define IXGBE_FDIRFSTAT_FADD_SHIFT 0
1867#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00
1868#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8
1869#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16
1870#define IXGBE_FDIRVLAN_FLEX_SHIFT 16
1871#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
1872#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
1873
1874#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
1875#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
1876#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
1877#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
1878#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007
1879#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
1880#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
1881#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
1882#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
1883#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
1884#define IXGBE_FDIRCMD_IPV6 0x00000080
1885#define IXGBE_FDIRCMD_CLEARHT 0x00000100
1886#define IXGBE_FDIRCMD_DROP 0x00000200
1887#define IXGBE_FDIRCMD_INT 0x00000400
1888#define IXGBE_FDIRCMD_LAST 0x00000800
1889#define IXGBE_FDIRCMD_COLLISION 0x00001000
1890#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
1891#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
1892#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
1893#define IXGBE_FDIR_INIT_DONE_POLL 10
1894#define IXGBE_FDIRCMD_CMD_POLL 10
1895
1786/* Transmit Descriptor - Legacy */ 1896/* Transmit Descriptor - Legacy */
1787struct ixgbe_legacy_tx_desc { 1897struct ixgbe_legacy_tx_desc {
1788 u64 buffer_addr; /* Address of the descriptor's data buffer */ 1898 u64 buffer_addr; /* Address of the descriptor's data buffer */
@@ -1956,6 +2066,45 @@ typedef u32 ixgbe_physical_layer;
1956#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 2066#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
1957#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 2067#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
1958 2068
2069/* Software ATR hash keys */
2070#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
2071#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
2072
2073/* Software ATR input stream offsets and masks */
2074#define IXGBE_ATR_VLAN_OFFSET 0
2075#define IXGBE_ATR_SRC_IPV6_OFFSET 2
2076#define IXGBE_ATR_SRC_IPV4_OFFSET 14
2077#define IXGBE_ATR_DST_IPV6_OFFSET 18
2078#define IXGBE_ATR_DST_IPV4_OFFSET 30
2079#define IXGBE_ATR_SRC_PORT_OFFSET 34
2080#define IXGBE_ATR_DST_PORT_OFFSET 36
2081#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
2082#define IXGBE_ATR_VM_POOL_OFFSET 40
2083#define IXGBE_ATR_L4TYPE_OFFSET 41
2084
2085#define IXGBE_ATR_L4TYPE_MASK 0x3
2086#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
2087#define IXGBE_ATR_L4TYPE_UDP 0x1
2088#define IXGBE_ATR_L4TYPE_TCP 0x2
2089#define IXGBE_ATR_L4TYPE_SCTP 0x3
2090#define IXGBE_ATR_HASH_MASK 0x7fff
2091
2092/* Flow Director ATR input struct. */
2093struct ixgbe_atr_input {
2094 /* Byte layout in order, all values with MSB first:
2095 *
2096 * vlan_id - 2 bytes
2097 * src_ip - 16 bytes
2098 * dst_ip - 16 bytes
2099 * src_port - 2 bytes
2100 * dst_port - 2 bytes
2101 * flex_bytes - 2 bytes
2102 * vm_pool - 1 byte
2103 * l4type - 1 byte
2104 */
2105 u8 byte_stream[42];
2106};
2107
1959enum ixgbe_eeprom_type { 2108enum ixgbe_eeprom_type {
1960 ixgbe_eeprom_uninitialized = 0, 2109 ixgbe_eeprom_uninitialized = 0,
1961 ixgbe_eeprom_spi, 2110 ixgbe_eeprom_spi,
@@ -2091,7 +2240,8 @@ struct ixgbe_fc_info {
2091 u16 pause_time; /* Flow Control Pause timer */ 2240 u16 pause_time; /* Flow Control Pause timer */
2092 bool send_xon; /* Flow control send XON */ 2241 bool send_xon; /* Flow control send XON */
2093 bool strict_ieee; /* Strict IEEE mode */ 2242 bool strict_ieee; /* Strict IEEE mode */
2094 bool disable_fc_autoneg; /* Turn off autoneg FC mode */ 2243 bool disable_fc_autoneg; /* Do not autonegotiate FC */
2244 bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
2095 enum ixgbe_fc_mode current_mode; /* FC mode in effect */ 2245 enum ixgbe_fc_mode current_mode; /* FC mode in effect */
2096 enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ 2246 enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
2097}; 2247};
@@ -2223,8 +2373,7 @@ struct ixgbe_mac_operations {
2223 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 2373 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
2224 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2374 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2225 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2375 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2226 s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, 2376 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *);
2227 ixgbe_mc_addr_itr);
2228 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 2377 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
2229 ixgbe_mc_addr_itr); 2378 ixgbe_mc_addr_itr);
2230 s32 (*enable_mc)(struct ixgbe_hw *); 2379 s32 (*enable_mc)(struct ixgbe_hw *);
@@ -2234,7 +2383,7 @@ struct ixgbe_mac_operations {
2234 s32 (*init_uta_tables)(struct ixgbe_hw *); 2383 s32 (*init_uta_tables)(struct ixgbe_hw *);
2235 2384
2236 /* Flow Control */ 2385 /* Flow Control */
2237 s32 (*setup_fc)(struct ixgbe_hw *, s32); 2386 s32 (*fc_enable)(struct ixgbe_hw *, s32);
2238}; 2387};
2239 2388
2240struct ixgbe_phy_operations { 2389struct ixgbe_phy_operations {
@@ -2281,6 +2430,7 @@ struct ixgbe_mac_info {
2281 bool orig_link_settings_stored; 2430 bool orig_link_settings_stored;
2282 bool autoneg; 2431 bool autoneg;
2283 bool autoneg_succeeded; 2432 bool autoneg_succeeded;
2433 bool autotry_restart;
2284}; 2434};
2285 2435
2286struct ixgbe_phy_info { 2436struct ixgbe_phy_info {
@@ -2346,6 +2496,8 @@ struct ixgbe_info {
2346#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 2496#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
2347#define IXGBE_ERR_SFP_NOT_PRESENT -20 2497#define IXGBE_ERR_SFP_NOT_PRESENT -20
2348#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 2498#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
2499#define IXGBE_ERR_FDIR_REINIT_FAILED -23
2500#define IXGBE_ERR_EEPROM_VERSION -24
2349#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2501#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
2350 2502
2351#endif /* _IXGBE_TYPE_H_ */ 2503#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 621a7c0c46ba..1e3c63d67b91 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -1939,7 +1939,6 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1939 TXCS_SELECT_QUEUE0 | 1939 TXCS_SELECT_QUEUE0 |
1940 TXCS_QUEUE0S | 1940 TXCS_QUEUE0S |
1941 TXCS_ENABLE); 1941 TXCS_ENABLE);
1942 netdev->trans_start = jiffies;
1943 1942
1944 tx_dbg(jme, "xmit: %d+%d@%lu\n", idx, 1943 tx_dbg(jme, "xmit: %d+%d@%lu\n", idx,
1945 skb_shinfo(skb)->nr_frags + 2, 1944 skb_shinfo(skb)->nr_frags + 2,
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index dc238567cae1..b4cf602c32b0 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -133,6 +133,7 @@ struct korina_private {
133 int dma_halt_cnt; 133 int dma_halt_cnt;
134 int dma_run_cnt; 134 int dma_run_cnt;
135 struct napi_struct napi; 135 struct napi_struct napi;
136 struct timer_list media_check_timer;
136 struct mii_if_info mii_if; 137 struct mii_if_info mii_if;
137 struct net_device *dev; 138 struct net_device *dev;
138 int phy_addr; 139 int phy_addr;
@@ -664,6 +665,15 @@ static void korina_check_media(struct net_device *dev, unsigned int init_media)
664 &lp->eth_regs->ethmac2); 665 &lp->eth_regs->ethmac2);
665} 666}
666 667
668static void korina_poll_media(unsigned long data)
669{
670 struct net_device *dev = (struct net_device *) data;
671 struct korina_private *lp = netdev_priv(dev);
672
673 korina_check_media(dev, 0);
674 mod_timer(&lp->media_check_timer, jiffies + HZ);
675}
676
667static void korina_set_carrier(struct mii_if_info *mii) 677static void korina_set_carrier(struct mii_if_info *mii)
668{ 678{
669 if (mii->force_media) { 679 if (mii->force_media) {
@@ -1034,6 +1044,7 @@ static int korina_open(struct net_device *dev)
1034 dev->name, lp->und_irq); 1044 dev->name, lp->und_irq);
1035 goto err_free_ovr_irq; 1045 goto err_free_ovr_irq;
1036 } 1046 }
1047 mod_timer(&lp->media_check_timer, jiffies + 1);
1037out: 1048out:
1038 return ret; 1049 return ret;
1039 1050
@@ -1053,6 +1064,8 @@ static int korina_close(struct net_device *dev)
1053 struct korina_private *lp = netdev_priv(dev); 1064 struct korina_private *lp = netdev_priv(dev);
1054 u32 tmp; 1065 u32 tmp;
1055 1066
1067 del_timer(&lp->media_check_timer);
1068
1056 /* Disable interrupts */ 1069 /* Disable interrupts */
1057 disable_irq(lp->rx_irq); 1070 disable_irq(lp->rx_irq);
1058 disable_irq(lp->tx_irq); 1071 disable_irq(lp->tx_irq);
@@ -1183,6 +1196,7 @@ static int korina_probe(struct platform_device *pdev)
1183 ": cannot register net device %d\n", rc); 1196 ": cannot register net device %d\n", rc);
1184 goto probe_err_register; 1197 goto probe_err_register;
1185 } 1198 }
1199 setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
1186out: 1200out:
1187 return rc; 1201 return rc;
1188 1202
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
new file mode 100644
index 000000000000..39b0aea2aab3
--- /dev/null
+++ b/drivers/net/ks8842.c
@@ -0,0 +1,732 @@
1/*
2 * ks8842_main.c timberdale KS8842 ethernet driver
3 * Copyright (c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* Supports:
20 * The Micrel KS8842 behind the timberdale FPGA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29
30#define DRV_NAME "ks8842"
31
32/* Timberdale specific Registers */
33#define REG_TIMB_RST 0x1c
34
35/* KS8842 registers */
36
37#define REG_SELECT_BANK 0x0e
38
39/* bank 0 registers */
40#define REG_QRFCR 0x04
41
42/* bank 2 registers */
43#define REG_MARL 0x00
44#define REG_MARM 0x02
45#define REG_MARH 0x04
46
47/* bank 3 registers */
48#define REG_GRR 0x06
49
50/* bank 16 registers */
51#define REG_TXCR 0x00
52#define REG_TXSR 0x02
53#define REG_RXCR 0x04
54#define REG_TXMIR 0x08
55#define REG_RXMIR 0x0A
56
57/* bank 17 registers */
58#define REG_TXQCR 0x00
59#define REG_RXQCR 0x02
60#define REG_TXFDPR 0x04
61#define REG_RXFDPR 0x06
62#define REG_QMU_DATA_LO 0x08
63#define REG_QMU_DATA_HI 0x0A
64
65/* bank 18 registers */
66#define REG_IER 0x00
67#define IRQ_LINK_CHANGE 0x8000
68#define IRQ_TX 0x4000
69#define IRQ_RX 0x2000
70#define IRQ_RX_OVERRUN 0x0800
71#define IRQ_TX_STOPPED 0x0200
72#define IRQ_RX_STOPPED 0x0100
73#define IRQ_RX_ERROR 0x0080
74#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
75 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
76#define REG_ISR 0x02
77#define REG_RXSR 0x04
78#define RXSR_VALID 0x8000
79#define RXSR_BROADCAST 0x80
80#define RXSR_MULTICAST 0x40
81#define RXSR_UNICAST 0x20
82#define RXSR_FRAMETYPE 0x08
83#define RXSR_TOO_LONG 0x04
84#define RXSR_RUNT 0x02
85#define RXSR_CRC_ERROR 0x01
86#define RXSR_ERROR (RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR)
87
88/* bank 32 registers */
89#define REG_SW_ID_AND_ENABLE 0x00
90#define REG_SGCR1 0x02
91#define REG_SGCR2 0x04
92#define REG_SGCR3 0x06
93
94/* bank 39 registers */
95#define REG_MACAR1 0x00
96#define REG_MACAR2 0x02
97#define REG_MACAR3 0x04
98
99/* bank 45 registers */
100#define REG_P1MBCR 0x00
101#define REG_P1MBSR 0x02
102
103/* bank 46 registers */
104#define REG_P2MBCR 0x00
105#define REG_P2MBSR 0x02
106
107/* bank 48 registers */
108#define REG_P1CR2 0x02
109
110/* bank 49 registers */
111#define REG_P1CR4 0x02
112#define REG_P1SR 0x04
113
114struct ks8842_adapter {
115 void __iomem *hw_addr;
116 int irq;
117 struct tasklet_struct tasklet;
118 spinlock_t lock; /* spinlock to be interrupt safe */
119 struct platform_device *pdev;
120};
121
122static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
123{
124 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
125}
126
127static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank,
128 u8 value, int offset)
129{
130 ks8842_select_bank(adapter, bank);
131 iowrite8(value, adapter->hw_addr + offset);
132}
133
134static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank,
135 u16 value, int offset)
136{
137 ks8842_select_bank(adapter, bank);
138 iowrite16(value, adapter->hw_addr + offset);
139}
140
141static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank,
142 u16 bits, int offset)
143{
144 u16 reg;
145 ks8842_select_bank(adapter, bank);
146 reg = ioread16(adapter->hw_addr + offset);
147 reg |= bits;
148 iowrite16(reg, adapter->hw_addr + offset);
149}
150
151static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank,
152 u16 bits, int offset)
153{
154 u16 reg;
155 ks8842_select_bank(adapter, bank);
156 reg = ioread16(adapter->hw_addr + offset);
157 reg &= ~bits;
158 iowrite16(reg, adapter->hw_addr + offset);
159}
160
161static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank,
162 u32 value, int offset)
163{
164 ks8842_select_bank(adapter, bank);
165 iowrite32(value, adapter->hw_addr + offset);
166}
167
168static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank,
169 int offset)
170{
171 ks8842_select_bank(adapter, bank);
172 return ioread8(adapter->hw_addr + offset);
173}
174
175static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank,
176 int offset)
177{
178 ks8842_select_bank(adapter, bank);
179 return ioread16(adapter->hw_addr + offset);
180}
181
182static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
183 int offset)
184{
185 ks8842_select_bank(adapter, bank);
186 return ioread32(adapter->hw_addr + offset);
187}
188
189static void ks8842_reset(struct ks8842_adapter *adapter)
190{
191 /* The KS8842 goes haywire when doing softare reset
192 * a work around in the timberdale IP is implemented to
193 * do a hardware reset instead
194 ks8842_write16(adapter, 3, 1, REG_GRR);
195 msleep(10);
196 iowrite16(0, adapter->hw_addr + REG_GRR);
197 */
198 iowrite16(32, adapter->hw_addr + REG_SELECT_BANK);
199 iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
200 msleep(20);
201}
202
203static void ks8842_update_link_status(struct net_device *netdev,
204 struct ks8842_adapter *adapter)
205{
206 /* check the status of the link */
207 if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
208 netif_carrier_on(netdev);
209 netif_wake_queue(netdev);
210 } else {
211 netif_stop_queue(netdev);
212 netif_carrier_off(netdev);
213 }
214}
215
216static void ks8842_enable_tx(struct ks8842_adapter *adapter)
217{
218 ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR);
219}
220
221static void ks8842_disable_tx(struct ks8842_adapter *adapter)
222{
223 ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR);
224}
225
226static void ks8842_enable_rx(struct ks8842_adapter *adapter)
227{
228 ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR);
229}
230
231static void ks8842_disable_rx(struct ks8842_adapter *adapter)
232{
233 ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR);
234}
235
236static void ks8842_reset_hw(struct ks8842_adapter *adapter)
237{
238 /* reset the HW */
239 ks8842_reset(adapter);
240
241 /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
242 ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
243
244 /* enable the receiver, uni + multi + broadcast + flow ctrl
245 + crc strip */
246 ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
247 REG_RXCR);
248
249 /* TX frame pointer autoincrement */
250 ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
251
252 /* RX frame pointer autoincrement */
253 ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR);
254
255 /* RX 2 kb high watermark */
256 ks8842_write16(adapter, 0, 0x1000, REG_QRFCR);
257
258 /* aggresive back off in half duplex */
259 ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
260
261 /* enable no excessive collison drop */
262 ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
263
264 /* Enable port 1 force flow control / back pressure / transmit / recv */
265 ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
266
267 /* restart port auto-negotiation */
268 ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
269 /* only advertise 10Mbps */
270 ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
271
272 /* Enable the transmitter */
273 ks8842_enable_tx(adapter);
274
275 /* Enable the receiver */
276 ks8842_enable_rx(adapter);
277
278 /* clear all interrupts */
279 ks8842_write16(adapter, 18, 0xffff, REG_ISR);
280
281 /* enable interrupts */
282 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
283
284 /* enable the switch */
285 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
286}
287
288static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
289{
290 int i;
291 u16 mac;
292
293 for (i = 0; i < ETH_ALEN; i++)
294 dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
295
296 /* make sure the switch port uses the same MAC as the QMU */
297 mac = ks8842_read16(adapter, 2, REG_MARL);
298 ks8842_write16(adapter, 39, mac, REG_MACAR1);
299 mac = ks8842_read16(adapter, 2, REG_MARM);
300 ks8842_write16(adapter, 39, mac, REG_MACAR2);
301 mac = ks8842_read16(adapter, 2, REG_MARH);
302 ks8842_write16(adapter, 39, mac, REG_MACAR3);
303}
304
305static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
306{
307 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
308}
309
310static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
311{
312 struct ks8842_adapter *adapter = netdev_priv(netdev);
313 int len = skb->len;
314 u32 *ptr = (u32 *)skb->data;
315 u32 ctrl;
316
317 dev_dbg(&adapter->pdev->dev,
318 "%s: len %u head %p data %p tail %p end %p\n",
319 __func__, skb->len, skb->head, skb->data,
320 skb_tail_pointer(skb), skb_end_pointer(skb));
321
322 /* check FIFO buffer space, we need space for CRC and command bits */
323 if (ks8842_tx_fifo_space(adapter) < len + 8)
324 return NETDEV_TX_BUSY;
325
326 /* the control word, enable IRQ, port 1 and the length */
327 ctrl = 0x8000 | 0x100 | (len << 16);
328 ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
329
330 netdev->stats.tx_bytes += len;
331
332 /* copy buffer */
333 while (len > 0) {
334 iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
335 len -= sizeof(u32);
336 ptr++;
337 }
338
339 /* enqueue packet */
340 ks8842_write16(adapter, 17, 1, REG_TXQCR);
341
342 dev_kfree_skb(skb);
343
344 return NETDEV_TX_OK;
345}
346
347static void ks8842_rx_frame(struct net_device *netdev,
348 struct ks8842_adapter *adapter)
349{
350 u32 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
351 int len = (status >> 16) & 0x7ff;
352
353 status &= 0xffff;
354
355 dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n",
356 __func__, status);
357
358 /* check the status */
359 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
360 struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2);
361
362 dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
363 __func__, len);
364 if (skb) {
365 u32 *data;
366
367 netdev->stats.rx_packets++;
368 netdev->stats.rx_bytes += len;
369 if (status & RXSR_MULTICAST)
370 netdev->stats.multicast++;
371
372 /* Align socket buffer in 4-byte boundary for
373 better performance. */
374 skb_reserve(skb, 2);
375 data = (u32 *)skb_put(skb, len);
376
377 ks8842_select_bank(adapter, 17);
378 while (len > 0) {
379 *data++ = ioread32(adapter->hw_addr +
380 REG_QMU_DATA_LO);
381 len -= sizeof(u32);
382 }
383
384 skb->protocol = eth_type_trans(skb, netdev);
385 netif_rx(skb);
386 } else
387 netdev->stats.rx_dropped++;
388 } else {
389 dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status);
390 netdev->stats.rx_errors++;
391 if (status & RXSR_TOO_LONG)
392 netdev->stats.rx_length_errors++;
393 if (status & RXSR_CRC_ERROR)
394 netdev->stats.rx_crc_errors++;
395 if (status & RXSR_RUNT)
396 netdev->stats.rx_frame_errors++;
397 }
398
399 /* set high watermark to 3K */
400 ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
401
402 /* release the frame */
403 ks8842_write16(adapter, 17, 0x01, REG_RXQCR);
404
405 /* set high watermark to 2K */
406 ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
407}
408
409void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
410{
411 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
412 dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n",
413 __func__, rx_data);
414 while (rx_data) {
415 ks8842_rx_frame(netdev, adapter);
416 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
417 }
418}
419
420void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
421{
422 u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
423 dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr);
424 netdev->stats.tx_packets++;
425 if (netif_queue_stopped(netdev))
426 netif_wake_queue(netdev);
427}
428
429void ks8842_handle_rx_overrun(struct net_device *netdev,
430 struct ks8842_adapter *adapter)
431{
432 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
433 netdev->stats.rx_errors++;
434 netdev->stats.rx_fifo_errors++;
435}
436
437void ks8842_tasklet(unsigned long arg)
438{
439 struct net_device *netdev = (struct net_device *)arg;
440 struct ks8842_adapter *adapter = netdev_priv(netdev);
441 u16 isr;
442 unsigned long flags;
443 u16 entry_bank;
444
445 /* read current bank to be able to set it back */
446 spin_lock_irqsave(&adapter->lock, flags);
447 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
448 spin_unlock_irqrestore(&adapter->lock, flags);
449
450 isr = ks8842_read16(adapter, 18, REG_ISR);
451 dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
452
453 /* Ack */
454 ks8842_write16(adapter, 18, isr, REG_ISR);
455
456 if (!netif_running(netdev))
457 return;
458
459 if (isr & IRQ_LINK_CHANGE)
460 ks8842_update_link_status(netdev, adapter);
461
462 if (isr & (IRQ_RX | IRQ_RX_ERROR))
463 ks8842_handle_rx(netdev, adapter);
464
465 if (isr & IRQ_TX)
466 ks8842_handle_tx(netdev, adapter);
467
468 if (isr & IRQ_RX_OVERRUN)
469 ks8842_handle_rx_overrun(netdev, adapter);
470
471 if (isr & IRQ_TX_STOPPED) {
472 ks8842_disable_tx(adapter);
473 ks8842_enable_tx(adapter);
474 }
475
476 if (isr & IRQ_RX_STOPPED) {
477 ks8842_disable_rx(adapter);
478 ks8842_enable_rx(adapter);
479 }
480
481 /* re-enable interrupts, put back the bank selection register */
482 spin_lock_irqsave(&adapter->lock, flags);
483 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
484 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
485 spin_unlock_irqrestore(&adapter->lock, flags);
486}
487
488static irqreturn_t ks8842_irq(int irq, void *devid)
489{
490 struct ks8842_adapter *adapter = devid;
491 u16 isr;
492 u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
493 irqreturn_t ret = IRQ_NONE;
494
495 isr = ks8842_read16(adapter, 18, REG_ISR);
496 dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
497
498 if (isr) {
499 /* disable IRQ */
500 ks8842_write16(adapter, 18, 0x00, REG_IER);
501
502 /* schedule tasklet */
503 tasklet_schedule(&adapter->tasklet);
504
505 ret = IRQ_HANDLED;
506 }
507
508 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
509
510 return ret;
511}
512
513
514/* Netdevice operations */
515
516static int ks8842_open(struct net_device *netdev)
517{
518 struct ks8842_adapter *adapter = netdev_priv(netdev);
519 int err;
520
521 dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
522
523 /* reset the HW */
524 ks8842_reset_hw(adapter);
525
526 ks8842_update_link_status(netdev, adapter);
527
528 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
529 adapter);
530 if (err) {
531 printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
532 adapter->irq, err);
533 return err;
534 }
535
536 return 0;
537}
538
539static int ks8842_close(struct net_device *netdev)
540{
541 struct ks8842_adapter *adapter = netdev_priv(netdev);
542
543 dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
544
545 /* free the irq */
546 free_irq(adapter->irq, adapter);
547
548 /* disable the switch */
549 ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
550
551 return 0;
552}
553
554static int ks8842_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
555{
556 int ret;
557 struct ks8842_adapter *adapter = netdev_priv(netdev);
558
559 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
560
561 ret = ks8842_tx_frame(skb, netdev);
562
563 if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8)
564 netif_stop_queue(netdev);
565
566 return ret;
567}
568
569static int ks8842_set_mac(struct net_device *netdev, void *p)
570{
571 struct ks8842_adapter *adapter = netdev_priv(netdev);
572 unsigned long flags;
573 struct sockaddr *addr = p;
574 char *mac = (u8 *)addr->sa_data;
575 int i;
576
577 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
578
579 if (!is_valid_ether_addr(addr->sa_data))
580 return -EADDRNOTAVAIL;
581
582 memcpy(netdev->dev_addr, mac, netdev->addr_len);
583
584 spin_lock_irqsave(&adapter->lock, flags);
585 for (i = 0; i < ETH_ALEN; i++) {
586 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
587 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
588 REG_MACAR1 + i);
589 }
590 spin_unlock_irqrestore(&adapter->lock, flags);
591 return 0;
592}
593
594static void ks8842_tx_timeout(struct net_device *netdev)
595{
596 struct ks8842_adapter *adapter = netdev_priv(netdev);
597 unsigned long flags;
598
599 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
600
601 spin_lock_irqsave(&adapter->lock, flags);
602 /* disable interrupts */
603 ks8842_write16(adapter, 18, 0, REG_IER);
604 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
605 spin_unlock_irqrestore(&adapter->lock, flags);
606
607 ks8842_reset_hw(adapter);
608
609 ks8842_update_link_status(netdev, adapter);
610}
611
612static const struct net_device_ops ks8842_netdev_ops = {
613 .ndo_open = ks8842_open,
614 .ndo_stop = ks8842_close,
615 .ndo_start_xmit = ks8842_xmit_frame,
616 .ndo_set_mac_address = ks8842_set_mac,
617 .ndo_tx_timeout = ks8842_tx_timeout,
618 .ndo_validate_addr = eth_validate_addr
619};
620
621static struct ethtool_ops ks8842_ethtool_ops = {
622 .get_link = ethtool_op_get_link,
623};
624
625static int __devinit ks8842_probe(struct platform_device *pdev)
626{
627 int err = -ENOMEM;
628 struct resource *iomem;
629 struct net_device *netdev;
630 struct ks8842_adapter *adapter;
631 u16 id;
632
633 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
634 if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
635 goto err_mem_region;
636
637 netdev = alloc_etherdev(sizeof(struct ks8842_adapter));
638 if (!netdev)
639 goto err_alloc_etherdev;
640
641 SET_NETDEV_DEV(netdev, &pdev->dev);
642
643 adapter = netdev_priv(netdev);
644 adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
645 if (!adapter->hw_addr)
646 goto err_ioremap;
647
648 adapter->irq = platform_get_irq(pdev, 0);
649 if (adapter->irq < 0) {
650 err = adapter->irq;
651 goto err_get_irq;
652 }
653
654 adapter->pdev = pdev;
655
656 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
657 spin_lock_init(&adapter->lock);
658
659 netdev->netdev_ops = &ks8842_netdev_ops;
660 netdev->ethtool_ops = &ks8842_ethtool_ops;
661
662 ks8842_read_mac_addr(adapter, netdev->dev_addr);
663
664 id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
665
666 strcpy(netdev->name, "eth%d");
667 err = register_netdev(netdev);
668 if (err)
669 goto err_register;
670
671 platform_set_drvdata(pdev, netdev);
672
673 printk(KERN_INFO DRV_NAME
674 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
675 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
676
677 return 0;
678
679err_register:
680err_get_irq:
681 iounmap(adapter->hw_addr);
682err_ioremap:
683 free_netdev(netdev);
684err_alloc_etherdev:
685 release_mem_region(iomem->start, resource_size(iomem));
686err_mem_region:
687 return err;
688}
689
690static int __devexit ks8842_remove(struct platform_device *pdev)
691{
692 struct net_device *netdev = platform_get_drvdata(pdev);
693 struct ks8842_adapter *adapter = netdev_priv(netdev);
694 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
695
696 unregister_netdev(netdev);
697 tasklet_kill(&adapter->tasklet);
698 iounmap(adapter->hw_addr);
699 free_netdev(netdev);
700 release_mem_region(iomem->start, resource_size(iomem));
701 platform_set_drvdata(pdev, NULL);
702 return 0;
703}
704
705
706static struct platform_driver ks8842_platform_driver = {
707 .driver = {
708 .name = DRV_NAME,
709 .owner = THIS_MODULE,
710 },
711 .probe = ks8842_probe,
712 .remove = ks8842_remove,
713};
714
715static int __init ks8842_init(void)
716{
717 return platform_driver_register(&ks8842_platform_driver);
718}
719
720static void __exit ks8842_exit(void)
721{
722 platform_driver_unregister(&ks8842_platform_driver);
723}
724
725module_init(ks8842_init);
726module_exit(ks8842_exit);
727
728MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
729MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
730MODULE_LICENSE("GPL v2");
731MODULE_ALIAS("platform:ks8842");
732
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 8e884869a05b..f8fa0c3f0f64 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe(int unit)
304 if (!MACH_IS_MAC) 304 if (!MACH_IS_MAC)
305 return ERR_PTR(-ENODEV); 305 return ERR_PTR(-ENODEV);
306 306
307 dev = alloc_ei_netdev(); 307 dev = ____alloc_ei_netdev(0);
308 if (!dev) 308 if (!dev)
309 return ERR_PTR(-ENOMEM); 309 return ERR_PTR(-ENOMEM);
310 310
@@ -481,15 +481,15 @@ void cleanup_module(void)
481static const struct net_device_ops mac8390_netdev_ops = { 481static const struct net_device_ops mac8390_netdev_ops = {
482 .ndo_open = mac8390_open, 482 .ndo_open = mac8390_open,
483 .ndo_stop = mac8390_close, 483 .ndo_stop = mac8390_close,
484 .ndo_start_xmit = ei_start_xmit, 484 .ndo_start_xmit = __ei_start_xmit,
485 .ndo_tx_timeout = ei_tx_timeout, 485 .ndo_tx_timeout = __ei_tx_timeout,
486 .ndo_get_stats = ei_get_stats, 486 .ndo_get_stats = __ei_get_stats,
487 .ndo_set_multicast_list = ei_set_multicast_list, 487 .ndo_set_multicast_list = __ei_set_multicast_list,
488 .ndo_validate_addr = eth_validate_addr, 488 .ndo_validate_addr = eth_validate_addr,
489 .ndo_set_mac_address = eth_mac_addr, 489 .ndo_set_mac_address = eth_mac_addr,
490 .ndo_change_mtu = eth_change_mtu, 490 .ndo_change_mtu = eth_change_mtu,
491#ifdef CONFIG_NET_POLL_CONTROLLER 491#ifdef CONFIG_NET_POLL_CONTROLLER
492 .ndo_poll_controller = ei_poll, 492 .ndo_poll_controller = __ei_poll,
493#endif 493#endif
494}; 494};
495 495
@@ -620,19 +620,12 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
620 620
621 /* Good, done, now spit out some messages */ 621 /* Good, done, now spit out some messages */
622 printk(KERN_INFO "%s: %s in slot %X (type %s)\n", 622 printk(KERN_INFO "%s: %s in slot %X (type %s)\n",
623 dev->name, ndev->board->name, ndev->board->slot, cardname[type]); 623 dev->name, ndev->board->name, ndev->board->slot, cardname[type]);
624 printk(KERN_INFO "MAC "); 624 printk(KERN_INFO
625 { 625 "MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
626 int i; 626 dev->dev_addr, dev->irq,
627 for (i = 0; i < 6; i++) { 627 (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
628 printk("%2.2x", dev->dev_addr[i]); 628 dev->mem_start, access_bitmode ? 32 : 16);
629 if (i < 5)
630 printk(":");
631 }
632 }
633 printk(" IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
634 dev->irq, (int)((dev->mem_end - dev->mem_start)/0x1000) * 4,
635 dev->mem_start, access_bitmode?32:16);
636 return 0; 629 return 0;
637} 630}
638 631
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d5334b41e4b4..99eed9f37c84 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -232,7 +232,7 @@ static int macvlan_open(struct net_device *dev)
232 if (macvlan_addr_busy(vlan->port, dev->dev_addr)) 232 if (macvlan_addr_busy(vlan->port, dev->dev_addr))
233 goto out; 233 goto out;
234 234
235 err = dev_unicast_add(lowerdev, dev->dev_addr, ETH_ALEN); 235 err = dev_unicast_add(lowerdev, dev->dev_addr);
236 if (err < 0) 236 if (err < 0)
237 goto out; 237 goto out;
238 if (dev->flags & IFF_ALLMULTI) { 238 if (dev->flags & IFF_ALLMULTI) {
@@ -244,7 +244,7 @@ static int macvlan_open(struct net_device *dev)
244 return 0; 244 return 0;
245 245
246del_unicast: 246del_unicast:
247 dev_unicast_delete(lowerdev, dev->dev_addr, ETH_ALEN); 247 dev_unicast_delete(lowerdev, dev->dev_addr);
248out: 248out:
249 return err; 249 return err;
250} 250}
@@ -258,7 +258,7 @@ static int macvlan_stop(struct net_device *dev)
258 if (dev->flags & IFF_ALLMULTI) 258 if (dev->flags & IFF_ALLMULTI)
259 dev_set_allmulti(lowerdev, -1); 259 dev_set_allmulti(lowerdev, -1);
260 260
261 dev_unicast_delete(lowerdev, dev->dev_addr, ETH_ALEN); 261 dev_unicast_delete(lowerdev, dev->dev_addr);
262 262
263 macvlan_hash_del(vlan); 263 macvlan_hash_del(vlan);
264 return 0; 264 return 0;
@@ -282,10 +282,11 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
282 if (macvlan_addr_busy(vlan->port, addr->sa_data)) 282 if (macvlan_addr_busy(vlan->port, addr->sa_data))
283 return -EBUSY; 283 return -EBUSY;
284 284
285 if ((err = dev_unicast_add(lowerdev, addr->sa_data, ETH_ALEN))) 285 err = dev_unicast_add(lowerdev, addr->sa_data);
286 if (err)
286 return err; 287 return err;
287 288
288 dev_unicast_delete(lowerdev, dev->dev_addr, ETH_ALEN); 289 dev_unicast_delete(lowerdev, dev->dev_addr);
289 290
290 macvlan_hash_change_addr(vlan, addr->sa_data); 291 macvlan_hash_change_addr(vlan, addr->sa_data);
291 } 292 }
@@ -358,6 +359,7 @@ static int macvlan_init(struct net_device *dev)
358 (lowerdev->state & MACVLAN_STATE_MASK); 359 (lowerdev->state & MACVLAN_STATE_MASK);
359 dev->features = lowerdev->features & MACVLAN_FEATURES; 360 dev->features = lowerdev->features & MACVLAN_FEATURES;
360 dev->iflink = lowerdev->ifindex; 361 dev->iflink = lowerdev->ifindex;
362 dev->hard_header_len = lowerdev->hard_header_len;
361 363
362 macvlan_set_lockdep_class(dev); 364 macvlan_set_lockdep_class(dev);
363 365
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 66483035f683..dc45e9856c35 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -296,6 +296,23 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
296 ecmd->duplex = (reg & MDIO_CTRL1_FULLDPLX || 296 ecmd->duplex = (reg & MDIO_CTRL1_FULLDPLX ||
297 ecmd->speed == SPEED_10000); 297 ecmd->speed == SPEED_10000);
298 } 298 }
299
300 /* 10GBASE-T MDI/MDI-X */
301 if (ecmd->port == PORT_TP && ecmd->speed == SPEED_10000) {
302 switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
303 MDIO_PMA_10GBT_SWAPPOL)) {
304 case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
305 ecmd->eth_tp_mdix = ETH_TP_MDI;
306 break;
307 case 0:
308 ecmd->eth_tp_mdix = ETH_TP_MDI_X;
309 break;
310 default:
311 /* It's complicated... */
312 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
313 break;
314 }
315 }
299} 316}
300EXPORT_SYMBOL(mdio45_ethtool_gset_npage); 317EXPORT_SYMBOL(mdio45_ethtool_gset_npage);
301 318
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 21040a0d81fe..1fd068e1d930 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -5,5 +5,5 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
5 5
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o 6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \ 8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o 9 en_resources.o en_netdev.o
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_ethtool.c
index c1bd040b9e05..091f99052c91 100644
--- a/drivers/net/mlx4/en_params.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -38,64 +38,6 @@
38#include "mlx4_en.h" 38#include "mlx4_en.h"
39#include "en_port.h" 39#include "en_port.h"
40 40
41#define MLX4_EN_PARM_INT(X, def_val, desc) \
42 static unsigned int X = def_val;\
43 module_param(X , uint, 0444); \
44 MODULE_PARM_DESC(X, desc);
45
46
47/*
48 * Device scope module parameters
49 */
50
51
52/* Use a XOR rathern than Toeplitz hash function for RSS */
53MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
54
55/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
56MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
57
58/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
59MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
60 "Number of LRO sessions per ring or disabled (0)");
61
62/* Priority pausing */
63MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
64 " Per priority bit mask");
65MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
66 " Per priority bit mask");
67
68int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
69{
70 struct mlx4_en_profile *params = &mdev->profile;
71 int i;
72
73 params->rss_xor = (rss_xor != 0);
74 params->rss_mask = rss_mask & 0x1f;
75 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
76 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
77 params->prof[i].rx_pause = 1;
78 params->prof[i].rx_ppp = pfcrx;
79 params->prof[i].tx_pause = 1;
80 params->prof[i].tx_ppp = pfctx;
81 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
82 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
83 }
84 if (pfcrx || pfctx) {
85 params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
86 params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
87 } else {
88 params->prof[1].tx_ring_num = 1;
89 params->prof[2].tx_ring_num = 1;
90 }
91
92 return 0;
93}
94
95
96/*
97 * Ethtool support
98 */
99 41
100static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv) 42static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
101{ 43{
@@ -326,8 +268,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
326 268
327 priv->rx_frames = (coal->rx_max_coalesced_frames == 269 priv->rx_frames = (coal->rx_max_coalesced_frames ==
328 MLX4_EN_AUTO_CONF) ? 270 MLX4_EN_AUTO_CONF) ?
329 MLX4_EN_RX_COAL_TARGET / 271 MLX4_EN_RX_COAL_TARGET :
330 priv->dev->mtu + 1 :
331 coal->rx_max_coalesced_frames; 272 coal->rx_max_coalesced_frames;
332 priv->rx_usecs = (coal->rx_coalesce_usecs == 273 priv->rx_usecs = (coal->rx_coalesce_usecs ==
333 MLX4_EN_AUTO_CONF) ? 274 MLX4_EN_AUTO_CONF) ?
@@ -371,7 +312,7 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
371 priv->prof->rx_pause, 312 priv->prof->rx_pause,
372 priv->prof->rx_ppp); 313 priv->prof->rx_ppp);
373 if (err) 314 if (err)
374 mlx4_err(mdev, "Failed setting pause params to\n"); 315 en_err(priv, "Failed setting pause params\n");
375 316
376 return err; 317 return err;
377} 318}
@@ -421,13 +362,13 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
421 362
422 err = mlx4_en_alloc_resources(priv); 363 err = mlx4_en_alloc_resources(priv);
423 if (err) { 364 if (err) {
424 mlx4_err(mdev, "Failed reallocating port resources\n"); 365 en_err(priv, "Failed reallocating port resources\n");
425 goto out; 366 goto out;
426 } 367 }
427 if (port_up) { 368 if (port_up) {
428 err = mlx4_en_start_port(dev); 369 err = mlx4_en_start_port(dev);
429 if (err) 370 if (err)
430 mlx4_err(mdev, "Failed starting port\n"); 371 en_err(priv, "Failed starting port\n");
431 } 372 }
432 373
433out: 374out:
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 510633fd57f6..9ed4a158f895 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -51,6 +51,55 @@ static const char mlx4_en_version[] =
51 DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v" 51 DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
52 DRV_VERSION " (" DRV_RELDATE ")\n"; 52 DRV_VERSION " (" DRV_RELDATE ")\n";
53 53
54#define MLX4_EN_PARM_INT(X, def_val, desc) \
55 static unsigned int X = def_val;\
56 module_param(X , uint, 0444); \
57 MODULE_PARM_DESC(X, desc);
58
59
60/*
61 * Device scope module parameters
62 */
63
64
65/* Use a XOR rathern than Toeplitz hash function for RSS */
66MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
67
68/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
69MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
70
71/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
72MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
73 "Number of LRO sessions per ring or disabled (0)");
74
75/* Priority pausing */
76MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
77 " Per priority bit mask");
78MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
79 " Per priority bit mask");
80
81static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
82{
83 struct mlx4_en_profile *params = &mdev->profile;
84 int i;
85
86 params->rss_xor = (rss_xor != 0);
87 params->rss_mask = rss_mask & 0x1f;
88 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
89 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
90 params->prof[i].rx_pause = 1;
91 params->prof[i].rx_ppp = pfcrx;
92 params->prof[i].tx_pause = 1;
93 params->prof[i].tx_ppp = pfctx;
94 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
95 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
96 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
97 (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
98 }
99
100 return 0;
101}
102
54static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, 103static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
55 enum mlx4_dev_event event, int port) 104 enum mlx4_dev_event event, int port)
56{ 105{
@@ -194,28 +243,11 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
194 /* Create a netdev for each port */ 243 /* Create a netdev for each port */
195 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 244 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
196 mlx4_info(mdev, "Activating port:%d\n", i); 245 mlx4_info(mdev, "Activating port:%d\n", i);
197 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) { 246 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
198 mdev->pndev[i] = NULL; 247 mdev->pndev[i] = NULL;
199 goto err_free_netdev;
200 }
201 } 248 }
202 return mdev; 249 return mdev;
203 250
204
205err_free_netdev:
206 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
207 if (mdev->pndev[i])
208 mlx4_en_destroy_netdev(mdev->pndev[i]);
209 }
210
211 mutex_lock(&mdev->state_lock);
212 mdev->device_up = false;
213 mutex_unlock(&mdev->state_lock);
214 flush_workqueue(mdev->workqueue);
215
216 /* Stop event queue before we drop down to release shared SW state */
217 destroy_workqueue(mdev->workqueue);
218
219err_mr: 251err_mr:
220 mlx4_mr_free(dev, &mdev->mr); 252 mlx4_mr_free(dev, &mdev->mr);
221err_uar: 253err_uar:
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 0cd185a2e089..0a7e78ade63f 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -51,14 +51,14 @@ static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *
51 struct mlx4_en_dev *mdev = priv->mdev; 51 struct mlx4_en_dev *mdev = priv->mdev;
52 int err; 52 int err;
53 53
54 mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp); 54 en_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
55 priv->vlgrp = grp; 55 priv->vlgrp = grp;
56 56
57 mutex_lock(&mdev->state_lock); 57 mutex_lock(&mdev->state_lock);
58 if (mdev->device_up && priv->port_up) { 58 if (mdev->device_up && priv->port_up) {
59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp); 59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
60 if (err) 60 if (err)
61 mlx4_err(mdev, "Failed configuring VLAN filter\n"); 61 en_err(priv, "Failed configuring VLAN filter\n");
62 } 62 }
63 mutex_unlock(&mdev->state_lock); 63 mutex_unlock(&mdev->state_lock);
64} 64}
@@ -72,15 +72,15 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
72 if (!priv->vlgrp) 72 if (!priv->vlgrp)
73 return; 73 return;
74 74
75 mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", 75 en_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
76 vid, vlan_group_get_device(priv->vlgrp, vid)); 76 vid, vlan_group_get_device(priv->vlgrp, vid));
77 77
78 /* Add VID to port VLAN filter */ 78 /* Add VID to port VLAN filter */
79 mutex_lock(&mdev->state_lock); 79 mutex_lock(&mdev->state_lock);
80 if (mdev->device_up && priv->port_up) { 80 if (mdev->device_up && priv->port_up) {
81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
82 if (err) 82 if (err)
83 mlx4_err(mdev, "Failed configuring VLAN filter\n"); 83 en_err(priv, "Failed configuring VLAN filter\n");
84 } 84 }
85 mutex_unlock(&mdev->state_lock); 85 mutex_unlock(&mdev->state_lock);
86} 86}
@@ -94,9 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
94 if (!priv->vlgrp) 94 if (!priv->vlgrp)
95 return; 95 return;
96 96
97 mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp " 97 en_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n",
98 "entry:%p)\n", vid, priv->vlgrp, 98 vid, priv->vlgrp, vlan_group_get_device(priv->vlgrp, vid));
99 vlan_group_get_device(priv->vlgrp, vid));
100 vlan_group_set_device(priv->vlgrp, vid, NULL); 99 vlan_group_set_device(priv->vlgrp, vid, NULL);
101 100
102 /* Remove VID from port VLAN filter */ 101 /* Remove VID from port VLAN filter */
@@ -104,7 +103,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
104 if (mdev->device_up && priv->port_up) { 103 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 104 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err) 105 if (err)
107 mlx4_err(mdev, "Failed configuring VLAN filter\n"); 106 en_err(priv, "Failed configuring VLAN filter\n");
108 } 107 }
109 mutex_unlock(&mdev->state_lock); 108 mutex_unlock(&mdev->state_lock);
110} 109}
@@ -150,9 +149,10 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
150 err = mlx4_register_mac(mdev->dev, priv->port, 149 err = mlx4_register_mac(mdev->dev, priv->port,
151 priv->mac, &priv->mac_index); 150 priv->mac, &priv->mac_index);
152 if (err) 151 if (err)
153 mlx4_err(mdev, "Failed changing HW MAC address\n"); 152 en_err(priv, "Failed changing HW MAC address\n");
154 } else 153 } else
155 mlx4_dbg(HW, priv, "Port is down, exiting...\n"); 154 en_dbg(HW, priv, "Port is down while "
155 "registering mac, exiting...\n");
156 156
157 mutex_unlock(&mdev->state_lock); 157 mutex_unlock(&mdev->state_lock);
158} 158}
@@ -174,7 +174,6 @@ static void mlx4_en_clear_list(struct net_device *dev)
174static void mlx4_en_cache_mclist(struct net_device *dev) 174static void mlx4_en_cache_mclist(struct net_device *dev)
175{ 175{
176 struct mlx4_en_priv *priv = netdev_priv(dev); 176 struct mlx4_en_priv *priv = netdev_priv(dev);
177 struct mlx4_en_dev *mdev = priv->mdev;
178 struct dev_mc_list *mclist; 177 struct dev_mc_list *mclist;
179 struct dev_mc_list *tmp; 178 struct dev_mc_list *tmp;
180 struct dev_mc_list *plist = NULL; 179 struct dev_mc_list *plist = NULL;
@@ -182,7 +181,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
182 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 181 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
183 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); 182 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
184 if (!tmp) { 183 if (!tmp) {
185 mlx4_err(mdev, "failed to allocate multicast list\n"); 184 en_err(priv, "failed to allocate multicast list\n");
186 mlx4_en_clear_list(dev); 185 mlx4_en_clear_list(dev);
187 return; 186 return;
188 } 187 }
@@ -219,13 +218,13 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
219 218
220 mutex_lock(&mdev->state_lock); 219 mutex_lock(&mdev->state_lock);
221 if (!mdev->device_up) { 220 if (!mdev->device_up) {
222 mlx4_dbg(HW, priv, "Card is not up, ignoring " 221 en_dbg(HW, priv, "Card is not up, "
223 "multicast change.\n"); 222 "ignoring multicast change.\n");
224 goto out; 223 goto out;
225 } 224 }
226 if (!priv->port_up) { 225 if (!priv->port_up) {
227 mlx4_dbg(HW, priv, "Port is down, ignoring " 226 en_dbg(HW, priv, "Port is down, "
228 "multicast change.\n"); 227 "ignoring multicast change.\n");
229 goto out; 228 goto out;
230 } 229 }
231 230
@@ -236,29 +235,27 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
236 if (dev->flags & IFF_PROMISC) { 235 if (dev->flags & IFF_PROMISC) {
237 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 236 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
238 if (netif_msg_rx_status(priv)) 237 if (netif_msg_rx_status(priv))
239 mlx4_warn(mdev, "Port:%d entering promiscuous mode\n", 238 en_warn(priv, "Entering promiscuous mode\n");
240 priv->port);
241 priv->flags |= MLX4_EN_FLAG_PROMISC; 239 priv->flags |= MLX4_EN_FLAG_PROMISC;
242 240
243 /* Enable promiscouos mode */ 241 /* Enable promiscouos mode */
244 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 242 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
245 priv->base_qpn, 1); 243 priv->base_qpn, 1);
246 if (err) 244 if (err)
247 mlx4_err(mdev, "Failed enabling " 245 en_err(priv, "Failed enabling "
248 "promiscous mode\n"); 246 "promiscous mode\n");
249 247
250 /* Disable port multicast filter (unconditionally) */ 248 /* Disable port multicast filter (unconditionally) */
251 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 249 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
252 0, MLX4_MCAST_DISABLE); 250 0, MLX4_MCAST_DISABLE);
253 if (err) 251 if (err)
254 mlx4_err(mdev, "Failed disabling " 252 en_err(priv, "Failed disabling "
255 "multicast filter\n"); 253 "multicast filter\n");
256 254
257 /* Disable port VLAN filter */ 255 /* Disable port VLAN filter */
258 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 256 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
259 if (err) 257 if (err)
260 mlx4_err(mdev, "Failed disabling " 258 en_err(priv, "Failed disabling VLAN filter\n");
261 "VLAN filter\n");
262 } 259 }
263 goto out; 260 goto out;
264 } 261 }
@@ -269,20 +266,19 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
269 266
270 if (priv->flags & MLX4_EN_FLAG_PROMISC) { 267 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
271 if (netif_msg_rx_status(priv)) 268 if (netif_msg_rx_status(priv))
272 mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n", 269 en_warn(priv, "Leaving promiscuous mode\n");
273 priv->port);
274 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 270 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
275 271
276 /* Disable promiscouos mode */ 272 /* Disable promiscouos mode */
277 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 273 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
278 priv->base_qpn, 0); 274 priv->base_qpn, 0);
279 if (err) 275 if (err)
280 mlx4_err(mdev, "Failed disabling promiscous mode\n"); 276 en_err(priv, "Failed disabling promiscous mode\n");
281 277
282 /* Enable port VLAN filter */ 278 /* Enable port VLAN filter */
283 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 279 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
284 if (err) 280 if (err)
285 mlx4_err(mdev, "Failed enabling VLAN filter\n"); 281 en_err(priv, "Failed enabling VLAN filter\n");
286 } 282 }
287 283
288 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 284 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
@@ -290,12 +286,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
290 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 286 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
291 0, MLX4_MCAST_DISABLE); 287 0, MLX4_MCAST_DISABLE);
292 if (err) 288 if (err)
293 mlx4_err(mdev, "Failed disabling multicast filter\n"); 289 en_err(priv, "Failed disabling multicast filter\n");
294 } else { 290 } else {
295 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 291 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
296 0, MLX4_MCAST_DISABLE); 292 0, MLX4_MCAST_DISABLE);
297 if (err) 293 if (err)
298 mlx4_err(mdev, "Failed disabling multicast filter\n"); 294 en_err(priv, "Failed disabling multicast filter\n");
299 295
300 /* Flush mcast filter and init it with broadcast address */ 296 /* Flush mcast filter and init it with broadcast address */
301 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 297 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
@@ -314,7 +310,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
314 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 310 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
315 0, MLX4_MCAST_ENABLE); 311 0, MLX4_MCAST_ENABLE);
316 if (err) 312 if (err)
317 mlx4_err(mdev, "Failed enabling multicast filter\n"); 313 en_err(priv, "Failed enabling multicast filter\n");
318 314
319 mlx4_en_clear_list(dev); 315 mlx4_en_clear_list(dev);
320 } 316 }
@@ -346,10 +342,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
346 struct mlx4_en_dev *mdev = priv->mdev; 342 struct mlx4_en_dev *mdev = priv->mdev;
347 343
348 if (netif_msg_timer(priv)) 344 if (netif_msg_timer(priv))
349 mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port); 345 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
350 346
351 priv->port_stats.tx_timeout++; 347 priv->port_stats.tx_timeout++;
352 mlx4_dbg(DRV, priv, "Scheduling watchdog\n"); 348 en_dbg(DRV, priv, "Scheduling watchdog\n");
353 queue_work(mdev->workqueue, &priv->watchdog_task); 349 queue_work(mdev->workqueue, &priv->watchdog_task);
354} 350}
355 351
@@ -376,10 +372,10 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
376 * satisfy our coelsing target. 372 * satisfy our coelsing target.
377 * - moder_time is set to a fixed value. 373 * - moder_time is set to a fixed value.
378 */ 374 */
379 priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1; 375 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
380 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 376 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
381 mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 377 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
382 "rx_frames:%d rx_usecs:%d\n", 378 "rx_frames:%d rx_usecs:%d\n",
383 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 379 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
384 380
385 /* Setup cq moderation params */ 381 /* Setup cq moderation params */
@@ -412,7 +408,6 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
412static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 408static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
413{ 409{
414 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 410 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
415 struct mlx4_en_dev *mdev = priv->mdev;
416 struct mlx4_en_cq *cq; 411 struct mlx4_en_cq *cq;
417 unsigned long packets; 412 unsigned long packets;
418 unsigned long rate; 413 unsigned long rate;
@@ -472,11 +467,11 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
472 moder_time = priv->rx_usecs; 467 moder_time = priv->rx_usecs;
473 } 468 }
474 469
475 mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", 470 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
476 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); 471 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
477 472
478 mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " 473 en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
479 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", 474 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
480 priv->last_moder_time, moder_time, period, packets, 475 priv->last_moder_time, moder_time, period, packets,
481 avg_pkt_size, rate); 476 avg_pkt_size, rate);
482 477
@@ -487,8 +482,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
487 cq->moder_time = moder_time; 482 cq->moder_time = moder_time;
488 err = mlx4_en_set_cq_moder(priv, cq); 483 err = mlx4_en_set_cq_moder(priv, cq);
489 if (err) { 484 if (err) {
490 mlx4_err(mdev, "Failed modifying moderation for cq:%d " 485 en_err(priv, "Failed modifying moderation for cq:%d\n", i);
491 "on port:%d\n", i, priv->port);
492 break; 486 break;
493 } 487 }
494 } 488 }
@@ -511,8 +505,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
511 505
512 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 506 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
513 if (err) 507 if (err)
514 mlx4_dbg(HW, priv, "Could not update stats for " 508 en_dbg(HW, priv, "Could not update stats \n");
515 "port:%d\n", priv->port);
516 509
517 mutex_lock(&mdev->state_lock); 510 mutex_lock(&mdev->state_lock);
518 if (mdev->device_up) { 511 if (mdev->device_up) {
@@ -536,12 +529,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
536 * report to system log */ 529 * report to system log */
537 if (priv->last_link_state != linkstate) { 530 if (priv->last_link_state != linkstate) {
538 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 531 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
539 if (netif_msg_link(priv)) 532 en_dbg(LINK, priv, "Link Down\n");
540 mlx4_info(mdev, "Port %d - link down\n", priv->port);
541 netif_carrier_off(priv->dev); 533 netif_carrier_off(priv->dev);
542 } else { 534 } else {
543 if (netif_msg_link(priv)) 535 en_dbg(LINK, priv, "Link Up\n");
544 mlx4_info(mdev, "Port %d - link up\n", priv->port);
545 netif_carrier_on(priv->dev); 536 netif_carrier_on(priv->dev);
546 } 537 }
547 } 538 }
@@ -563,19 +554,19 @@ int mlx4_en_start_port(struct net_device *dev)
563 int j; 554 int j;
564 555
565 if (priv->port_up) { 556 if (priv->port_up) {
566 mlx4_dbg(DRV, priv, "start port called while port already up\n"); 557 en_dbg(DRV, priv, "start port called while port already up\n");
567 return 0; 558 return 0;
568 } 559 }
569 560
570 /* Calculate Rx buf size */ 561 /* Calculate Rx buf size */
571 dev->mtu = min(dev->mtu, priv->max_mtu); 562 dev->mtu = min(dev->mtu, priv->max_mtu);
572 mlx4_en_calc_rx_buf(dev); 563 mlx4_en_calc_rx_buf(dev);
573 mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 564 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
574 565
575 /* Configure rx cq's and rings */ 566 /* Configure rx cq's and rings */
576 err = mlx4_en_activate_rx_rings(priv); 567 err = mlx4_en_activate_rx_rings(priv);
577 if (err) { 568 if (err) {
578 mlx4_err(mdev, "Failed to activate RX rings\n"); 569 en_err(priv, "Failed to activate RX rings\n");
579 return err; 570 return err;
580 } 571 }
581 for (i = 0; i < priv->rx_ring_num; i++) { 572 for (i = 0; i < priv->rx_ring_num; i++) {
@@ -583,14 +574,14 @@ int mlx4_en_start_port(struct net_device *dev)
583 574
584 err = mlx4_en_activate_cq(priv, cq); 575 err = mlx4_en_activate_cq(priv, cq);
585 if (err) { 576 if (err) {
586 mlx4_err(mdev, "Failed activating Rx CQ\n"); 577 en_err(priv, "Failed activating Rx CQ\n");
587 goto cq_err; 578 goto cq_err;
588 } 579 }
589 for (j = 0; j < cq->size; j++) 580 for (j = 0; j < cq->size; j++)
590 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 581 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
591 err = mlx4_en_set_cq_moder(priv, cq); 582 err = mlx4_en_set_cq_moder(priv, cq);
592 if (err) { 583 if (err) {
593 mlx4_err(mdev, "Failed setting cq moderation parameters"); 584 en_err(priv, "Failed setting cq moderation parameters");
594 mlx4_en_deactivate_cq(priv, cq); 585 mlx4_en_deactivate_cq(priv, cq);
595 goto cq_err; 586 goto cq_err;
596 } 587 }
@@ -601,7 +592,7 @@ int mlx4_en_start_port(struct net_device *dev)
601 592
602 err = mlx4_en_config_rss_steer(priv); 593 err = mlx4_en_config_rss_steer(priv);
603 if (err) { 594 if (err) {
604 mlx4_err(mdev, "Failed configuring rss steering\n"); 595 en_err(priv, "Failed configuring rss steering\n");
605 goto cq_err; 596 goto cq_err;
606 } 597 }
607 598
@@ -611,16 +602,16 @@ int mlx4_en_start_port(struct net_device *dev)
611 cq = &priv->tx_cq[i]; 602 cq = &priv->tx_cq[i];
612 err = mlx4_en_activate_cq(priv, cq); 603 err = mlx4_en_activate_cq(priv, cq);
613 if (err) { 604 if (err) {
614 mlx4_err(mdev, "Failed allocating Tx CQ\n"); 605 en_err(priv, "Failed allocating Tx CQ\n");
615 goto tx_err; 606 goto tx_err;
616 } 607 }
617 err = mlx4_en_set_cq_moder(priv, cq); 608 err = mlx4_en_set_cq_moder(priv, cq);
618 if (err) { 609 if (err) {
619 mlx4_err(mdev, "Failed setting cq moderation parameters"); 610 en_err(priv, "Failed setting cq moderation parameters");
620 mlx4_en_deactivate_cq(priv, cq); 611 mlx4_en_deactivate_cq(priv, cq);
621 goto tx_err; 612 goto tx_err;
622 } 613 }
623 mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 614 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
624 cq->buf->wqe_index = cpu_to_be16(0xffff); 615 cq->buf->wqe_index = cpu_to_be16(0xffff);
625 616
626 /* Configure ring */ 617 /* Configure ring */
@@ -628,7 +619,7 @@ int mlx4_en_start_port(struct net_device *dev)
628 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 619 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
629 priv->rx_ring[0].srq.srqn); 620 priv->rx_ring[0].srq.srqn);
630 if (err) { 621 if (err) {
631 mlx4_err(mdev, "Failed allocating Tx ring\n"); 622 en_err(priv, "Failed allocating Tx ring\n");
632 mlx4_en_deactivate_cq(priv, cq); 623 mlx4_en_deactivate_cq(priv, cq);
633 goto tx_err; 624 goto tx_err;
634 } 625 }
@@ -646,30 +637,30 @@ int mlx4_en_start_port(struct net_device *dev)
646 priv->prof->rx_pause, 637 priv->prof->rx_pause,
647 priv->prof->rx_ppp); 638 priv->prof->rx_ppp);
648 if (err) { 639 if (err) {
649 mlx4_err(mdev, "Failed setting port general configurations" 640 en_err(priv, "Failed setting port general configurations "
650 " for port %d, with error %d\n", priv->port, err); 641 "for port %d, with error %d\n", priv->port, err);
651 goto tx_err; 642 goto tx_err;
652 } 643 }
653 /* Set default qp number */ 644 /* Set default qp number */
654 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 645 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
655 if (err) { 646 if (err) {
656 mlx4_err(mdev, "Failed setting default qp numbers\n"); 647 en_err(priv, "Failed setting default qp numbers\n");
657 goto tx_err; 648 goto tx_err;
658 } 649 }
659 /* Set port mac number */ 650 /* Set port mac number */
660 mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); 651 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
661 err = mlx4_register_mac(mdev->dev, priv->port, 652 err = mlx4_register_mac(mdev->dev, priv->port,
662 priv->mac, &priv->mac_index); 653 priv->mac, &priv->mac_index);
663 if (err) { 654 if (err) {
664 mlx4_err(mdev, "Failed setting port mac\n"); 655 en_err(priv, "Failed setting port mac\n");
665 goto tx_err; 656 goto tx_err;
666 } 657 }
667 658
668 /* Init port */ 659 /* Init port */
669 mlx4_dbg(HW, priv, "Initializing port\n"); 660 en_dbg(HW, priv, "Initializing port\n");
670 err = mlx4_INIT_PORT(mdev->dev, priv->port); 661 err = mlx4_INIT_PORT(mdev->dev, priv->port);
671 if (err) { 662 if (err) {
672 mlx4_err(mdev, "Failed Initializing port\n"); 663 en_err(priv, "Failed Initializing port\n");
673 goto mac_err; 664 goto mac_err;
674 } 665 }
675 666
@@ -706,8 +697,7 @@ void mlx4_en_stop_port(struct net_device *dev)
706 int i; 697 int i;
707 698
708 if (!priv->port_up) { 699 if (!priv->port_up) {
709 mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n", 700 en_dbg(DRV, priv, "stop port called while port already down\n");
710 priv->port);
711 return; 701 return;
712 } 702 }
713 netif_stop_queue(dev); 703 netif_stop_queue(dev);
@@ -752,13 +742,13 @@ static void mlx4_en_restart(struct work_struct *work)
752 struct mlx4_en_dev *mdev = priv->mdev; 742 struct mlx4_en_dev *mdev = priv->mdev;
753 struct net_device *dev = priv->dev; 743 struct net_device *dev = priv->dev;
754 744
755 mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 745 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
756 746
757 mutex_lock(&mdev->state_lock); 747 mutex_lock(&mdev->state_lock);
758 if (priv->port_up) { 748 if (priv->port_up) {
759 mlx4_en_stop_port(dev); 749 mlx4_en_stop_port(dev);
760 if (mlx4_en_start_port(dev)) 750 if (mlx4_en_start_port(dev))
761 mlx4_err(mdev, "Failed restarting port %d\n", priv->port); 751 en_err(priv, "Failed restarting port %d\n", priv->port);
762 } 752 }
763 mutex_unlock(&mdev->state_lock); 753 mutex_unlock(&mdev->state_lock);
764} 754}
@@ -774,14 +764,14 @@ static int mlx4_en_open(struct net_device *dev)
774 mutex_lock(&mdev->state_lock); 764 mutex_lock(&mdev->state_lock);
775 765
776 if (!mdev->device_up) { 766 if (!mdev->device_up) {
777 mlx4_err(mdev, "Cannot open - device down/disabled\n"); 767 en_err(priv, "Cannot open - device down/disabled\n");
778 err = -EBUSY; 768 err = -EBUSY;
779 goto out; 769 goto out;
780 } 770 }
781 771
782 /* Reset HW statistics and performance counters */ 772 /* Reset HW statistics and performance counters */
783 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 773 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
784 mlx4_dbg(HW, priv, "Failed dumping statistics\n"); 774 en_dbg(HW, priv, "Failed dumping statistics\n");
785 775
786 memset(&priv->stats, 0, sizeof(priv->stats)); 776 memset(&priv->stats, 0, sizeof(priv->stats));
787 memset(&priv->pstats, 0, sizeof(priv->pstats)); 777 memset(&priv->pstats, 0, sizeof(priv->pstats));
@@ -798,7 +788,7 @@ static int mlx4_en_open(struct net_device *dev)
798 mlx4_en_set_default_moderation(priv); 788 mlx4_en_set_default_moderation(priv);
799 err = mlx4_en_start_port(dev); 789 err = mlx4_en_start_port(dev);
800 if (err) 790 if (err)
801 mlx4_err(mdev, "Failed starting port:%d\n", priv->port); 791 en_err(priv, "Failed starting port:%d\n", priv->port);
802 792
803out: 793out:
804 mutex_unlock(&mdev->state_lock); 794 mutex_unlock(&mdev->state_lock);
@@ -811,8 +801,7 @@ static int mlx4_en_close(struct net_device *dev)
811 struct mlx4_en_priv *priv = netdev_priv(dev); 801 struct mlx4_en_priv *priv = netdev_priv(dev);
812 struct mlx4_en_dev *mdev = priv->mdev; 802 struct mlx4_en_dev *mdev = priv->mdev;
813 803
814 if (netif_msg_ifdown(priv)) 804 en_dbg(IFDOWN, priv, "Close port called\n");
815 mlx4_info(mdev, "Close called for port:%d\n", priv->port);
816 805
817 mutex_lock(&mdev->state_lock); 806 mutex_lock(&mdev->state_lock);
818 807
@@ -844,7 +833,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
844 833
845int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 834int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
846{ 835{
847 struct mlx4_en_dev *mdev = priv->mdev;
848 struct mlx4_en_port_profile *prof = priv->prof; 836 struct mlx4_en_port_profile *prof = priv->prof;
849 int i; 837 int i;
850 838
@@ -873,7 +861,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
873 return 0; 861 return 0;
874 862
875err: 863err:
876 mlx4_err(mdev, "Failed to allocate NIC resources\n"); 864 en_err(priv, "Failed to allocate NIC resources\n");
877 return -ENOMEM; 865 return -ENOMEM;
878} 866}
879 867
@@ -883,7 +871,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
883 struct mlx4_en_priv *priv = netdev_priv(dev); 871 struct mlx4_en_priv *priv = netdev_priv(dev);
884 struct mlx4_en_dev *mdev = priv->mdev; 872 struct mlx4_en_dev *mdev = priv->mdev;
885 873
886 mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 874 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
887 875
888 /* Unregister device - this will close the port if it was up */ 876 /* Unregister device - this will close the port if it was up */
889 if (priv->registered) 877 if (priv->registered)
@@ -912,11 +900,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
912 struct mlx4_en_dev *mdev = priv->mdev; 900 struct mlx4_en_dev *mdev = priv->mdev;
913 int err = 0; 901 int err = 0;
914 902
915 mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 903 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
916 dev->mtu, new_mtu); 904 dev->mtu, new_mtu);
917 905
918 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 906 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
919 mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu); 907 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
920 return -EPERM; 908 return -EPERM;
921 } 909 }
922 dev->mtu = new_mtu; 910 dev->mtu = new_mtu;
@@ -926,13 +914,13 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
926 if (!mdev->device_up) { 914 if (!mdev->device_up) {
927 /* NIC is probably restarting - let watchdog task reset 915 /* NIC is probably restarting - let watchdog task reset
928 * the port */ 916 * the port */
929 mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n"); 917 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
930 } else { 918 } else {
931 mlx4_en_stop_port(dev); 919 mlx4_en_stop_port(dev);
932 mlx4_en_set_default_moderation(priv); 920 mlx4_en_set_default_moderation(priv);
933 err = mlx4_en_start_port(dev); 921 err = mlx4_en_start_port(dev);
934 if (err) { 922 if (err) {
935 mlx4_err(mdev, "Failed restarting port:%d\n", 923 en_err(priv, "Failed restarting port:%d\n",
936 priv->port); 924 priv->port);
937 queue_work(mdev->workqueue, &priv->watchdog_task); 925 queue_work(mdev->workqueue, &priv->watchdog_task);
938 } 926 }
@@ -946,6 +934,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
946 .ndo_open = mlx4_en_open, 934 .ndo_open = mlx4_en_open,
947 .ndo_stop = mlx4_en_close, 935 .ndo_stop = mlx4_en_close,
948 .ndo_start_xmit = mlx4_en_xmit, 936 .ndo_start_xmit = mlx4_en_xmit,
937 .ndo_select_queue = mlx4_en_select_queue,
949 .ndo_get_stats = mlx4_en_get_stats, 938 .ndo_get_stats = mlx4_en_get_stats,
950 .ndo_set_multicast_list = mlx4_en_set_multicast, 939 .ndo_set_multicast_list = mlx4_en_set_multicast,
951 .ndo_set_mac_address = mlx4_en_set_mac, 940 .ndo_set_mac_address = mlx4_en_set_mac,
@@ -968,7 +957,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
968 int i; 957 int i;
969 int err; 958 int err;
970 959
971 dev = alloc_etherdev(sizeof(struct mlx4_en_priv)); 960 dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
972 if (dev == NULL) { 961 if (dev == NULL) {
973 mlx4_err(mdev, "Net device allocation failed\n"); 962 mlx4_err(mdev, "Net device allocation failed\n");
974 return -ENOMEM; 963 return -ENOMEM;
@@ -1006,7 +995,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1006 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 995 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
1007 priv->mac = mdev->dev->caps.def_mac[priv->port]; 996 priv->mac = mdev->dev->caps.def_mac[priv->port];
1008 if (ILLEGAL_MAC(priv->mac)) { 997 if (ILLEGAL_MAC(priv->mac)) {
1009 mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 998 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
1010 priv->port, priv->mac); 999 priv->port, priv->mac);
1011 err = -EINVAL; 1000 err = -EINVAL;
1012 goto out; 1001 goto out;
@@ -1025,19 +1014,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1025 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 1014 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1026 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 1015 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1027 if (err) { 1016 if (err) {
1028 mlx4_err(mdev, "Failed to allocate page for rx qps\n"); 1017 en_err(priv, "Failed to allocate page for rx qps\n");
1029 goto out; 1018 goto out;
1030 } 1019 }
1031 priv->allocated = 1; 1020 priv->allocated = 1;
1032 1021
1033 /* Populate Tx priority mappings */
1034 mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
1035
1036 /* 1022 /*
1037 * Initialize netdev entry points 1023 * Initialize netdev entry points
1038 */ 1024 */
1039 dev->netdev_ops = &mlx4_netdev_ops; 1025 dev->netdev_ops = &mlx4_netdev_ops;
1040 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1026 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1027 dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS;
1041 1028
1042 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1029 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1043 1030
@@ -1051,7 +1038,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1051 * Set driver features 1038 * Set driver features
1052 */ 1039 */
1053 dev->features |= NETIF_F_SG; 1040 dev->features |= NETIF_F_SG;
1041 dev->vlan_features |= NETIF_F_SG;
1054 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1042 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1043 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1055 dev->features |= NETIF_F_HIGHDMA; 1044 dev->features |= NETIF_F_HIGHDMA;
1056 dev->features |= NETIF_F_HW_VLAN_TX | 1045 dev->features |= NETIF_F_HW_VLAN_TX |
1057 NETIF_F_HW_VLAN_RX | 1046 NETIF_F_HW_VLAN_RX |
@@ -1061,6 +1050,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1061 if (mdev->LSO_support) { 1050 if (mdev->LSO_support) {
1062 dev->features |= NETIF_F_TSO; 1051 dev->features |= NETIF_F_TSO;
1063 dev->features |= NETIF_F_TSO6; 1052 dev->features |= NETIF_F_TSO6;
1053 dev->vlan_features |= NETIF_F_TSO;
1054 dev->vlan_features |= NETIF_F_TSO6;
1064 } 1055 }
1065 1056
1066 mdev->pndev[port] = dev; 1057 mdev->pndev[port] = dev;
@@ -1068,9 +1059,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1068 netif_carrier_off(dev); 1059 netif_carrier_off(dev);
1069 err = register_netdev(dev); 1060 err = register_netdev(dev);
1070 if (err) { 1061 if (err) {
1071 mlx4_err(mdev, "Netdev registration failed\n"); 1062 en_err(priv, "Netdev registration failed for port %d\n", port);
1072 goto out; 1063 goto out;
1073 } 1064 }
1065
1066 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1067 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1068
1074 priv->registered = 1; 1069 priv->registered = 1;
1075 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1070 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1076 return 0; 1071 return 0;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 6bfab6e5ba1d..5a14899c1e25 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -114,8 +114,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
114 goto out; 114 goto out;
115 115
116 page_alloc->offset = priv->frag_info[i].frag_align; 116 page_alloc->offset = priv->frag_info[i].frag_align;
117 mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", 117 en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
118 i, page_alloc->page); 118 i, page_alloc->page);
119 } 119 }
120 return 0; 120 return 0;
121 121
@@ -136,8 +136,8 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
136 136
137 for (i = 0; i < priv->num_frags; i++) { 137 for (i = 0; i < priv->num_frags; i++) {
138 page_alloc = &ring->page_alloc[i]; 138 page_alloc = &ring->page_alloc[i];
139 mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", 139 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
140 i, page_count(page_alloc->page)); 140 i, page_count(page_alloc->page));
141 141
142 put_page(page_alloc->page); 142 put_page(page_alloc->page);
143 page_alloc->page = NULL; 143 page_alloc->page = NULL;
@@ -214,10 +214,10 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
214 214
215 skb_frags = ring->rx_info + (index << priv->log_rx_info); 215 skb_frags = ring->rx_info + (index << priv->log_rx_info);
216 for (nr = 0; nr < priv->num_frags; nr++) { 216 for (nr = 0; nr < priv->num_frags; nr++) {
217 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr); 217 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
218 dma = be64_to_cpu(rx_desc->data[nr].addr); 218 dma = be64_to_cpu(rx_desc->data[nr].addr);
219 219
220 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); 220 en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
221 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, 221 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
222 PCI_DMA_FROMDEVICE); 222 PCI_DMA_FROMDEVICE);
223 put_page(skb_frags[nr].page); 223 put_page(skb_frags[nr].page);
@@ -226,7 +226,6 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
226 226
227static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) 227static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
228{ 228{
229 struct mlx4_en_dev *mdev = priv->mdev;
230 struct mlx4_en_rx_ring *ring; 229 struct mlx4_en_rx_ring *ring;
231 int ring_ind; 230 int ring_ind;
232 int buf_ind; 231 int buf_ind;
@@ -239,14 +238,14 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
239 if (mlx4_en_prepare_rx_desc(priv, ring, 238 if (mlx4_en_prepare_rx_desc(priv, ring,
240 ring->actual_size)) { 239 ring->actual_size)) {
241 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 240 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
242 mlx4_err(mdev, "Failed to allocate " 241 en_err(priv, "Failed to allocate "
243 "enough rx buffers\n"); 242 "enough rx buffers\n");
244 return -ENOMEM; 243 return -ENOMEM;
245 } else { 244 } else {
246 new_size = rounddown_pow_of_two(ring->actual_size); 245 new_size = rounddown_pow_of_two(ring->actual_size);
247 mlx4_warn(mdev, "Only %d buffers allocated " 246 en_warn(priv, "Only %d buffers allocated "
248 "reducing ring size to %d", 247 "reducing ring size to %d",
249 ring->actual_size, new_size); 248 ring->actual_size, new_size);
250 goto reduce_rings; 249 goto reduce_rings;
251 } 250 }
252 } 251 }
@@ -282,8 +281,7 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev,
282 ring->size_mask); 281 ring->size_mask);
283 if (err) { 282 if (err) {
284 if (netif_msg_rx_err(priv)) 283 if (netif_msg_rx_err(priv))
285 mlx4_warn(priv->mdev, 284 en_warn(priv, "Failed preparing rx descriptor\n");
286 "Failed preparing rx descriptor\n");
287 priv->port_stats.rx_alloc_failed++; 285 priv->port_stats.rx_alloc_failed++;
288 break; 286 break;
289 } 287 }
@@ -301,14 +299,14 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
301{ 299{
302 int index; 300 int index;
303 301
304 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", 302 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
305 ring->cons, ring->prod); 303 ring->cons, ring->prod);
306 304
307 /* Unmap and free Rx buffers */ 305 /* Unmap and free Rx buffers */
308 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); 306 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
309 while (ring->cons != ring->prod) { 307 while (ring->cons != ring->prod) {
310 index = ring->cons & ring->size_mask; 308 index = ring->cons & ring->size_mask;
311 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index); 309 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
312 mlx4_en_free_rx_desc(priv, ring, index); 310 mlx4_en_free_rx_desc(priv, ring, index);
313 ++ring->cons; 311 ++ring->cons;
314 } 312 }
@@ -373,10 +371,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
373 sizeof(struct skb_frag_struct)); 371 sizeof(struct skb_frag_struct));
374 ring->rx_info = vmalloc(tmp); 372 ring->rx_info = vmalloc(tmp);
375 if (!ring->rx_info) { 373 if (!ring->rx_info) {
376 mlx4_err(mdev, "Failed allocating rx_info ring\n"); 374 en_err(priv, "Failed allocating rx_info ring\n");
377 return -ENOMEM; 375 return -ENOMEM;
378 } 376 }
379 mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 377 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
380 ring->rx_info, tmp); 378 ring->rx_info, tmp);
381 379
382 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 380 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
@@ -386,7 +384,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
386 384
387 err = mlx4_en_map_buffer(&ring->wqres.buf); 385 err = mlx4_en_map_buffer(&ring->wqres.buf);
388 if (err) { 386 if (err) {
389 mlx4_err(mdev, "Failed to map RX buffer\n"); 387 en_err(priv, "Failed to map RX buffer\n");
390 goto err_hwq; 388 goto err_hwq;
391 } 389 }
392 ring->buf = ring->wqres.buf.direct.buf; 390 ring->buf = ring->wqres.buf.direct.buf;
@@ -404,7 +402,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
404 sizeof(struct net_lro_desc), 402 sizeof(struct net_lro_desc),
405 GFP_KERNEL); 403 GFP_KERNEL);
406 if (!ring->lro.lro_arr) { 404 if (!ring->lro.lro_arr) {
407 mlx4_err(mdev, "Failed to allocate lro array\n"); 405 en_err(priv, "Failed to allocate lro array\n");
408 goto err_map; 406 goto err_map;
409 } 407 }
410 ring->lro.get_frag_header = mlx4_en_get_frag_header; 408 ring->lro.get_frag_header = mlx4_en_get_frag_header;
@@ -455,7 +453,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
455 /* Initialize page allocators */ 453 /* Initialize page allocators */
456 err = mlx4_en_init_allocator(priv, ring); 454 err = mlx4_en_init_allocator(priv, ring);
457 if (err) { 455 if (err) {
458 mlx4_err(mdev, "Failed initializing ring allocator\n"); 456 en_err(priv, "Failed initializing ring allocator\n");
459 ring_ind--; 457 ring_ind--;
460 goto err_allocator; 458 goto err_allocator;
461 } 459 }
@@ -486,7 +484,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
486 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, 484 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
487 ring->wqres.db.dma, &ring->srq); 485 ring->wqres.db.dma, &ring->srq);
488 if (err){ 486 if (err){
489 mlx4_err(mdev, "Failed to allocate srq\n"); 487 en_err(priv, "Failed to allocate srq\n");
490 ring_ind--; 488 ring_ind--;
491 goto err_srq; 489 goto err_srq;
492 } 490 }
@@ -601,7 +599,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
601 599
602 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); 600 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
603 if (!skb) { 601 if (!skb) {
604 mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n"); 602 en_dbg(RX_ERR, priv, "Failed allocating skb\n");
605 return NULL; 603 return NULL;
606 } 604 }
607 skb->dev = priv->dev; 605 skb->dev = priv->dev;
@@ -680,7 +678,6 @@ static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
680int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 678int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
681{ 679{
682 struct mlx4_en_priv *priv = netdev_priv(dev); 680 struct mlx4_en_priv *priv = netdev_priv(dev);
683 struct mlx4_en_dev *mdev = priv->mdev;
684 struct mlx4_cqe *cqe; 681 struct mlx4_cqe *cqe;
685 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 682 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
686 struct skb_frag_struct *skb_frags; 683 struct skb_frag_struct *skb_frags;
@@ -717,14 +714,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
717 /* Drop packet on bad receive or bad checksum */ 714 /* Drop packet on bad receive or bad checksum */
718 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 715 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
719 MLX4_CQE_OPCODE_ERROR)) { 716 MLX4_CQE_OPCODE_ERROR)) {
720 mlx4_err(mdev, "CQE completed in error - vendor " 717 en_err(priv, "CQE completed in error - vendor "
721 "syndrom:%d syndrom:%d\n", 718 "syndrom:%d syndrom:%d\n",
722 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, 719 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
723 ((struct mlx4_err_cqe *) cqe)->syndrome); 720 ((struct mlx4_err_cqe *) cqe)->syndrome);
724 goto next; 721 goto next;
725 } 722 }
726 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 723 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
727 mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); 724 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
728 goto next; 725 goto next;
729 } 726 }
730 727
@@ -874,7 +871,7 @@ static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16
874 u16 res = MLX4_EN_ALLOC_SIZE % stride; 871 u16 res = MLX4_EN_ALLOC_SIZE % stride;
875 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; 872 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
876 873
877 mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " 874 en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
878 "res:%d offset:%d\n", stride, align, res, offset); 875 "res:%d offset:%d\n", stride, align, res, offset);
879 return offset; 876 return offset;
880} 877}
@@ -919,10 +916,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
919 priv->rx_skb_size = eff_mtu; 916 priv->rx_skb_size = eff_mtu;
920 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); 917 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
921 918
922 mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 919 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
923 "num_frags:%d):\n", eff_mtu, priv->num_frags); 920 "num_frags:%d):\n", eff_mtu, priv->num_frags);
924 for (i = 0; i < priv->num_frags; i++) { 921 for (i = 0; i < priv->num_frags; i++) {
925 mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " 922 en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
926 "stride:%d last_offset:%d\n", i, 923 "stride:%d last_offset:%d\n", i,
927 priv->frag_info[i].frag_size, 924 priv->frag_info[i].frag_size,
928 priv->frag_info[i].frag_prefix_size, 925 priv->frag_info[i].frag_prefix_size,
@@ -942,12 +939,12 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
942 int i; 939 int i;
943 940
944 rss_map->size = roundup_pow_of_two(num_entries); 941 rss_map->size = roundup_pow_of_two(num_entries);
945 mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n", 942 en_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
946 rss_map->size); 943 rss_map->size);
947 944
948 for (i = 0; i < rss_map->size; i++) { 945 for (i = 0; i < rss_map->size; i++) {
949 rss_map->map[i] = i % num_rings; 946 rss_map->map[i] = i % num_rings;
950 mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]); 947 en_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
951 } 948 }
952} 949}
953 950
@@ -962,13 +959,13 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
962 959
963 context = kmalloc(sizeof *context , GFP_KERNEL); 960 context = kmalloc(sizeof *context , GFP_KERNEL);
964 if (!context) { 961 if (!context) {
965 mlx4_err(mdev, "Failed to allocate qp context\n"); 962 en_err(priv, "Failed to allocate qp context\n");
966 return -ENOMEM; 963 return -ENOMEM;
967 } 964 }
968 965
969 err = mlx4_qp_alloc(mdev->dev, qpn, qp); 966 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
970 if (err) { 967 if (err) {
971 mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); 968 en_err(priv, "Failed to allocate qp #%x\n", qpn);
972 goto out; 969 goto out;
973 } 970 }
974 qp->event = mlx4_en_sqp_event; 971 qp->event = mlx4_en_sqp_event;
@@ -1000,12 +997,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1000 int err = 0; 997 int err = 0;
1001 int good_qps = 0; 998 int good_qps = 0;
1002 999
1003 mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port); 1000 en_dbg(DRV, priv, "Configuring rss steering\n");
1004 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size, 1001 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
1005 rss_map->size, &rss_map->base_qpn); 1002 rss_map->size, &rss_map->base_qpn);
1006 if (err) { 1003 if (err) {
1007 mlx4_err(mdev, "Failed reserving %d qps for port %u\n", 1004 en_err(priv, "Failed reserving %d qps\n", rss_map->size);
1008 rss_map->size, priv->port);
1009 return err; 1005 return err;
1010 } 1006 }
1011 1007
@@ -1025,13 +1021,13 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1025 /* Configure RSS indirection qp */ 1021 /* Configure RSS indirection qp */
1026 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); 1022 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
1027 if (err) { 1023 if (err) {
1028 mlx4_err(mdev, "Failed to reserve range for RSS " 1024 en_err(priv, "Failed to reserve range for RSS "
1029 "indirection qp\n"); 1025 "indirection qp\n");
1030 goto rss_err; 1026 goto rss_err;
1031 } 1027 }
1032 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 1028 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
1033 if (err) { 1029 if (err) {
1034 mlx4_err(mdev, "Failed to allocate RSS indirection QP\n"); 1030 en_err(priv, "Failed to allocate RSS indirection QP\n");
1035 goto reserve_err; 1031 goto reserve_err;
1036 } 1032 }
1037 rss_map->indir_qp.event = mlx4_en_sqp_event; 1033 rss_map->indir_qp.event = mlx4_en_sqp_event;
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index ac6fc499b280..5dc7466ad035 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -68,15 +68,15 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
68 tmp = size * sizeof(struct mlx4_en_tx_info); 68 tmp = size * sizeof(struct mlx4_en_tx_info);
69 ring->tx_info = vmalloc(tmp); 69 ring->tx_info = vmalloc(tmp);
70 if (!ring->tx_info) { 70 if (!ring->tx_info) {
71 mlx4_err(mdev, "Failed allocating tx_info ring\n"); 71 en_err(priv, "Failed allocating tx_info ring\n");
72 return -ENOMEM; 72 return -ENOMEM;
73 } 73 }
74 mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 74 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
75 ring->tx_info, tmp); 75 ring->tx_info, tmp);
76 76
77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
78 if (!ring->bounce_buf) { 78 if (!ring->bounce_buf) {
79 mlx4_err(mdev, "Failed allocating bounce buffer\n"); 79 en_err(priv, "Failed allocating bounce buffer\n");
80 err = -ENOMEM; 80 err = -ENOMEM;
81 goto err_tx; 81 goto err_tx;
82 } 82 }
@@ -85,31 +85,31 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE); 86 2 * PAGE_SIZE);
87 if (err) { 87 if (err) {
88 mlx4_err(mdev, "Failed allocating hwq resources\n"); 88 en_err(priv, "Failed allocating hwq resources\n");
89 goto err_bounce; 89 goto err_bounce;
90 } 90 }
91 91
92 err = mlx4_en_map_buffer(&ring->wqres.buf); 92 err = mlx4_en_map_buffer(&ring->wqres.buf);
93 if (err) { 93 if (err) {
94 mlx4_err(mdev, "Failed to map TX buffer\n"); 94 en_err(priv, "Failed to map TX buffer\n");
95 goto err_hwq_res; 95 goto err_hwq_res;
96 } 96 }
97 97
98 ring->buf = ring->wqres.buf.direct.buf; 98 ring->buf = ring->wqres.buf.direct.buf;
99 99
100 mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 100 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
103 103
104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
105 if (err) { 105 if (err) {
106 mlx4_err(mdev, "Failed reserving qp for tx ring.\n"); 106 en_err(priv, "Failed reserving qp for tx ring.\n");
107 goto err_map; 107 goto err_map;
108 } 108 }
109 109
110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
111 if (err) { 111 if (err) {
112 mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn); 112 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
113 goto err_reserve; 113 goto err_reserve;
114 } 114 }
115 ring->qp.event = mlx4_en_sqp_event; 115 ring->qp.event = mlx4_en_sqp_event;
@@ -135,7 +135,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
135 struct mlx4_en_tx_ring *ring) 135 struct mlx4_en_tx_ring *ring)
136{ 136{
137 struct mlx4_en_dev *mdev = priv->mdev; 137 struct mlx4_en_dev *mdev = priv->mdev;
138 mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 138 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
139 139
140 mlx4_qp_remove(mdev->dev, &ring->qp); 140 mlx4_qp_remove(mdev->dev, &ring->qp);
141 mlx4_qp_free(mdev->dev, &ring->qp); 141 mlx4_qp_free(mdev->dev, &ring->qp);
@@ -274,12 +274,12 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
274 274
275 /* Skip last polled descriptor */ 275 /* Skip last polled descriptor */
276 ring->cons += ring->last_nr_txbb; 276 ring->cons += ring->last_nr_txbb;
277 mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", 277 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
278 ring->cons, ring->prod); 278 ring->cons, ring->prod);
279 279
280 if ((u32) (ring->prod - ring->cons) > ring->size) { 280 if ((u32) (ring->prod - ring->cons) > ring->size) {
281 if (netif_msg_tx_err(priv)) 281 if (netif_msg_tx_err(priv))
282 mlx4_warn(priv->mdev, "Tx consumer passed producer!\n"); 282 en_warn(priv, "Tx consumer passed producer!\n");
283 return 0; 283 return 0;
284 } 284 }
285 285
@@ -292,39 +292,11 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
292 } 292 }
293 293
294 if (cnt) 294 if (cnt)
295 mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); 295 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
296 296
297 return cnt; 297 return cnt;
298} 298}
299 299
300void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
301{
302 int block = 8 / ring_num;
303 int extra = 8 - (block * ring_num);
304 int num = 0;
305 u16 ring = 1;
306 int prio;
307
308 if (ring_num == 1) {
309 for (prio = 0; prio < 8; prio++)
310 prio_map[prio] = 0;
311 return;
312 }
313
314 for (prio = 0; prio < 8; prio++) {
315 if (extra && (num == block + 1)) {
316 ring++;
317 num = 0;
318 extra--;
319 } else if (!extra && (num == block)) {
320 ring++;
321 num = 0;
322 }
323 prio_map[prio] = ring;
324 mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
325 num++;
326 }
327}
328 300
329static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) 301static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
330{ 302{
@@ -386,18 +358,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
386 if (unlikely(ring->blocked)) { 358 if (unlikely(ring->blocked)) {
387 if ((u32) (ring->prod - ring->cons) <= 359 if ((u32) (ring->prod - ring->cons) <=
388 ring->size - HEADROOM - MAX_DESC_TXBBS) { 360 ring->size - HEADROOM - MAX_DESC_TXBBS) {
389
390 /* TODO: support multiqueue netdevs. Currently, we block
391 * when *any* ring is full. Note that:
392 * - 2 Tx rings can unblock at the same time and call
393 * netif_wake_queue(), which is OK since this
394 * operation is idempotent.
395 * - We might wake the queue just after another ring
396 * stopped it. This is no big deal because the next
397 * transmission on that ring would stop the queue.
398 */
399 ring->blocked = 0; 361 ring->blocked = 0;
400 netif_wake_queue(dev); 362 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
401 priv->port_stats.wake_queue++; 363 priv->port_stats.wake_queue++;
402 } 364 }
403 } 365 }
@@ -426,7 +388,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
426 388
427 INC_PERF_COUNTER(priv->pstats.tx_poll); 389 INC_PERF_COUNTER(priv->pstats.tx_poll);
428 390
429 if (!spin_trylock(&ring->comp_lock)) { 391 if (!spin_trylock_irq(&ring->comp_lock)) {
430 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 392 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
431 return; 393 return;
432 } 394 }
@@ -439,7 +401,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
439 if (inflight && priv->port_up) 401 if (inflight && priv->port_up)
440 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 402 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
441 403
442 spin_unlock(&ring->comp_lock); 404 spin_unlock_irq(&ring->comp_lock);
443} 405}
444 406
445static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 407static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
@@ -482,9 +444,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
482 444
483 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 445 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
484 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 446 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
485 if (spin_trylock(&ring->comp_lock)) { 447 if (spin_trylock_irq(&ring->comp_lock)) {
486 mlx4_en_process_tx_cq(priv->dev, cq); 448 mlx4_en_process_tx_cq(priv->dev, cq);
487 spin_unlock(&ring->comp_lock); 449 spin_unlock_irq(&ring->comp_lock);
488 } 450 }
489} 451}
490 452
@@ -539,7 +501,6 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
539 int *lso_header_size) 501 int *lso_header_size)
540{ 502{
541 struct mlx4_en_priv *priv = netdev_priv(dev); 503 struct mlx4_en_priv *priv = netdev_priv(dev);
542 struct mlx4_en_dev *mdev = priv->mdev;
543 int real_size; 504 int real_size;
544 505
545 if (skb_is_gso(skb)) { 506 if (skb_is_gso(skb)) {
@@ -553,14 +514,14 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
553 real_size += DS_SIZE; 514 real_size += DS_SIZE;
554 else { 515 else {
555 if (netif_msg_tx_err(priv)) 516 if (netif_msg_tx_err(priv))
556 mlx4_warn(mdev, "Non-linear headers\n"); 517 en_warn(priv, "Non-linear headers\n");
557 dev_kfree_skb_any(skb); 518 dev_kfree_skb_any(skb);
558 return 0; 519 return 0;
559 } 520 }
560 } 521 }
561 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) { 522 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
562 if (netif_msg_tx_err(priv)) 523 if (netif_msg_tx_err(priv))
563 mlx4_warn(mdev, "LSO header size too big\n"); 524 en_warn(priv, "LSO header size too big\n");
564 dev_kfree_skb_any(skb); 525 dev_kfree_skb_any(skb);
565 return 0; 526 return 0;
566 } 527 }
@@ -617,21 +578,20 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
617 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; 578 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
618} 579}
619 580
620static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb, 581u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
621 u16 *vlan_tag)
622{ 582{
623 int tx_ind; 583 struct mlx4_en_priv *priv = netdev_priv(dev);
584 u16 vlan_tag = 0;
624 585
625 /* Obtain VLAN information if present */ 586 /* If we support per priority flow control and the packet contains
626 if (priv->vlgrp && vlan_tx_tag_present(skb)) { 587 * a vlan tag, send the packet to the TX ring assigned to that priority
627 *vlan_tag = vlan_tx_tag_get(skb); 588 */
628 /* Set the Tx ring to use according to vlan priority */ 589 if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) {
629 tx_ind = priv->tx_prio_map[*vlan_tag >> 13]; 590 vlan_tag = vlan_tx_tag_get(skb);
630 } else { 591 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
631 *vlan_tag = 0;
632 tx_ind = 0;
633 } 592 }
634 return tx_ind; 593
594 return skb_tx_hash(dev, skb);
635} 595}
636 596
637int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 597int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -651,7 +611,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
651 dma_addr_t dma; 611 dma_addr_t dma;
652 u32 index; 612 u32 index;
653 __be32 op_own; 613 __be32 op_own;
654 u16 vlan_tag; 614 u16 vlan_tag = 0;
655 int i; 615 int i;
656 int lso_header_size; 616 int lso_header_size;
657 void *fragptr; 617 void *fragptr;
@@ -669,20 +629,21 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
669 nr_txbb = desc_size / TXBB_SIZE; 629 nr_txbb = desc_size / TXBB_SIZE;
670 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 630 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
671 if (netif_msg_tx_err(priv)) 631 if (netif_msg_tx_err(priv))
672 mlx4_warn(mdev, "Oversized header or SG list\n"); 632 en_warn(priv, "Oversized header or SG list\n");
673 dev_kfree_skb_any(skb); 633 dev_kfree_skb_any(skb);
674 return NETDEV_TX_OK; 634 return NETDEV_TX_OK;
675 } 635 }
676 636
677 tx_ind = get_vlan_info(priv, skb, &vlan_tag); 637 tx_ind = skb->queue_mapping;
678 ring = &priv->tx_ring[tx_ind]; 638 ring = &priv->tx_ring[tx_ind];
639 if (priv->vlgrp && vlan_tx_tag_present(skb))
640 vlan_tag = vlan_tx_tag_get(skb);
679 641
680 /* Check available TXBBs And 2K spare for prefetch */ 642 /* Check available TXBBs And 2K spare for prefetch */
681 if (unlikely(((int)(ring->prod - ring->cons)) > 643 if (unlikely(((int)(ring->prod - ring->cons)) >
682 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 644 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
683 /* every full Tx ring stops queue. 645 /* every full Tx ring stops queue */
684 * TODO: implement multi-queue support (per-queue stop) */ 646 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
685 netif_stop_queue(dev);
686 ring->blocked = 1; 647 ring->blocked = 1;
687 priv->port_stats.queue_stopped++; 648 priv->port_stats.queue_stopped++;
688 649
@@ -695,7 +656,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
695 /* Now that we know what Tx ring to use */ 656 /* Now that we know what Tx ring to use */
696 if (unlikely(!priv->port_up)) { 657 if (unlikely(!priv->port_up)) {
697 if (netif_msg_tx_err(priv)) 658 if (netif_msg_tx_err(priv))
698 mlx4_warn(mdev, "xmit: port down!\n"); 659 en_warn(priv, "xmit: port down!\n");
699 dev_kfree_skb_any(skb); 660 dev_kfree_skb_any(skb);
700 return NETDEV_TX_OK; 661 return NETDEV_TX_OK;
701 } 662 }
@@ -819,7 +780,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
819 /* Ring doorbell! */ 780 /* Ring doorbell! */
820 wmb(); 781 wmb();
821 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); 782 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
822 dev->trans_start = jiffies;
823 783
824 /* Poll CQ here */ 784 /* Poll CQ here */
825 mlx4_en_xmit_poll(priv, tx_ind); 785 mlx4_en_xmit_poll(priv, tx_ind);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8830dcb92ec8..dee188761a3c 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -623,8 +623,10 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
623 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, 623 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
624 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 624 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
625 &priv->eq_table.eq[i]); 625 &priv->eq_table.eq[i]);
626 if (err) 626 if (err) {
627 --i;
627 goto err_out_unmap; 628 goto err_out_unmap;
629 }
628 } 630 }
629 631
630 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 632 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index ef840abbcd39..d43a9e4c2aea 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -49,26 +49,42 @@
49#include "en_port.h" 49#include "en_port.h"
50 50
51#define DRV_NAME "mlx4_en" 51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.4.0" 52#define DRV_VERSION "1.4.1.1"
53#define DRV_RELDATE "Sep 2008" 53#define DRV_RELDATE "June 2009"
54 54
55 55
56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
57 57
58#define mlx4_dbg(mlevel, priv, format, arg...) \ 58#define en_print(level, priv, format, arg...) \
59 if (NETIF_MSG_##mlevel & priv->msg_enable) \ 59 { \
60 printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\ 60 if ((priv)->registered) \
61 (dev_name(&priv->mdev->pdev->dev)) , ## arg) 61 printk(level "%s: %s: " format, DRV_NAME, \
62 (priv->dev)->name, ## arg); \
63 else \
64 printk(level "%s: %s: Port %d: " format, \
65 DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
66 (priv)->port, ## arg); \
67 }
68
69#define en_dbg(mlevel, priv, format, arg...) \
70 { \
71 if (NETIF_MSG_##mlevel & priv->msg_enable) \
72 en_print(KERN_DEBUG, priv, format, ## arg) \
73 }
74#define en_warn(priv, format, arg...) \
75 en_print(KERN_WARNING, priv, format, ## arg)
76#define en_err(priv, format, arg...) \
77 en_print(KERN_ERR, priv, format, ## arg)
62 78
63#define mlx4_err(mdev, format, arg...) \ 79#define mlx4_err(mdev, format, arg...) \
64 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\ 80 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
65 (dev_name(&mdev->pdev->dev)) , ## arg) 81 dev_name(&mdev->pdev->dev) , ## arg)
66#define mlx4_info(mdev, format, arg...) \ 82#define mlx4_info(mdev, format, arg...) \
67 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\ 83 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
68 (dev_name(&mdev->pdev->dev)) , ## arg) 84 dev_name(&mdev->pdev->dev) , ## arg)
69#define mlx4_warn(mdev, format, arg...) \ 85#define mlx4_warn(mdev, format, arg...) \
70 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\ 86 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
71 (dev_name(&mdev->pdev->dev)) , ## arg) 87 dev_name(&mdev->pdev->dev) , ## arg)
72 88
73/* 89/*
74 * Device constants 90 * Device constants
@@ -123,12 +139,14 @@ enum {
123#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) 139#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
124#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) 140#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
125 141
126#define MLX4_EN_TX_RING_NUM 9 142#define MLX4_EN_SMALL_PKT_SIZE 64
127#define MLX4_EN_DEF_TX_RING_SIZE 1024 143#define MLX4_EN_NUM_TX_RINGS 8
144#define MLX4_EN_NUM_PPP_RINGS 8
145#define MLX4_EN_DEF_TX_RING_SIZE 512
128#define MLX4_EN_DEF_RX_RING_SIZE 1024 146#define MLX4_EN_DEF_RX_RING_SIZE 1024
129 147
130/* Target number of bytes to coalesce with interrupt moderation */ 148/* Target number of packets to coalesce with interrupt moderation */
131#define MLX4_EN_RX_COAL_TARGET 0x20000 149#define MLX4_EN_RX_COAL_TARGET 44
132#define MLX4_EN_RX_COAL_TIME 0x10 150#define MLX4_EN_RX_COAL_TIME 0x10
133 151
134#define MLX4_EN_TX_COAL_PKTS 5 152#define MLX4_EN_TX_COAL_PKTS 5
@@ -462,7 +480,6 @@ struct mlx4_en_priv {
462 int base_qpn; 480 int base_qpn;
463 481
464 struct mlx4_en_rss_map rss_map; 482 struct mlx4_en_rss_map rss_map;
465 u16 tx_prio_map[8];
466 u32 flags; 483 u32 flags;
467#define MLX4_EN_FLAG_PROMISC 0x1 484#define MLX4_EN_FLAG_PROMISC 0x1
468 u32 tx_ring_num; 485 u32 tx_ring_num;
@@ -500,8 +517,6 @@ void mlx4_en_stop_port(struct net_device *dev);
500void mlx4_en_free_resources(struct mlx4_en_priv *priv); 517void mlx4_en_free_resources(struct mlx4_en_priv *priv);
501int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 518int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
502 519
503int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
504
505int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 520int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
506 int entries, int ring, enum cq_type mode); 521 int entries, int ring, enum cq_type mode);
507void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 522void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -512,6 +527,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
512 527
513void mlx4_en_poll_tx_cq(unsigned long data); 528void mlx4_en_poll_tx_cq(unsigned long data);
514void mlx4_en_tx_irq(struct mlx4_cq *mcq); 529void mlx4_en_tx_irq(struct mlx4_cq *mcq);
530u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
515int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 531int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
516 532
517int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 533int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
@@ -546,7 +562,6 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
546void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, 562void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
547 struct mlx4_en_rss_map *rss_map, 563 struct mlx4_en_rss_map *rss_map,
548 int num_entries, int num_rings); 564 int num_entries, int num_rings);
549void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
550int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); 565int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
551void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); 566void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
552int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); 567int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 0caf74cae8bc..0a467785f065 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -402,7 +402,8 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
402 for (i = 0; i < npages; ++i) 402 for (i = 0; i < npages; ++i)
403 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 403 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
404 404
405 dma_sync_single(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); 405 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
406 npages * sizeof (u64), DMA_TO_DEVICE);
406 407
407 return 0; 408 return 0;
408} 409}
@@ -549,8 +550,8 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
549 for (i = 0; i < npages; ++i) 550 for (i = 0; i < npages; ++i)
550 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 551 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
551 552
552 dma_sync_single(&dev->pdev->dev, fmr->dma_handle, 553 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
553 npages * sizeof(u64), DMA_TO_DEVICE); 554 npages * sizeof(u64), DMA_TO_DEVICE);
554 555
555 fmr->mpt->key = cpu_to_be32(key); 556 fmr->mpt->key = cpu_to_be32(key);
556 fmr->mpt->lkey = cpu_to_be32(key); 557 fmr->mpt->lkey = cpu_to_be32(key);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 1361ddc8d31f..b4e18a58cb1b 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,6 +55,7 @@
55#include <linux/types.h> 55#include <linux/types.h>
56#include <linux/inet_lro.h> 56#include <linux/inet_lro.h>
57#include <asm/system.h> 57#include <asm/system.h>
58#include <linux/list.h>
58 59
59static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 60static char mv643xx_eth_driver_name[] = "mv643xx_eth";
60static char mv643xx_eth_driver_version[] = "1.4"; 61static char mv643xx_eth_driver_version[] = "1.4";
@@ -1721,20 +1722,20 @@ static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1721 1722
1722static u32 uc_addr_filter_mask(struct net_device *dev) 1723static u32 uc_addr_filter_mask(struct net_device *dev)
1723{ 1724{
1724 struct dev_addr_list *uc_ptr; 1725 struct netdev_hw_addr *ha;
1725 u32 nibbles; 1726 u32 nibbles;
1726 1727
1727 if (dev->flags & IFF_PROMISC) 1728 if (dev->flags & IFF_PROMISC)
1728 return 0; 1729 return 0;
1729 1730
1730 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1731 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1731 for (uc_ptr = dev->uc_list; uc_ptr != NULL; uc_ptr = uc_ptr->next) { 1732 list_for_each_entry(ha, &dev->uc_list, list) {
1732 if (memcmp(dev->dev_addr, uc_ptr->da_addr, 5)) 1733 if (memcmp(dev->dev_addr, ha->addr, 5))
1733 return 0; 1734 return 0;
1734 if ((dev->dev_addr[5] ^ uc_ptr->da_addr[5]) & 0xf0) 1735 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
1735 return 0; 1736 return 0;
1736 1737
1737 nibbles |= 1 << (uc_ptr->da_addr[5] & 0x0f); 1738 nibbles |= 1 << (ha->addr[5] & 0x0f);
1738 } 1739 }
1739 1740
1740 return nibbles; 1741 return nibbles;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 7e28b4610122..c9a30d3a66fb 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2892,7 +2892,6 @@ again:
2892 tx->stop_queue++; 2892 tx->stop_queue++;
2893 netif_tx_stop_queue(netdev_queue); 2893 netif_tx_stop_queue(netdev_queue);
2894 } 2894 }
2895 dev->trans_start = jiffies;
2896 return 0; 2895 return 0;
2897 2896
2898abort_linearize: 2897abort_linearize:
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 4a51c31330da..6f77ad58e3b3 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -178,10 +178,8 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter)
178 178
179 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 179 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
180 rds_ring = &recv_ctx->rds_rings[ring]; 180 rds_ring = &recv_ctx->rds_rings[ring];
181 if (rds_ring->rx_buf_arr) { 181 vfree(rds_ring->rx_buf_arr);
182 vfree(rds_ring->rx_buf_arr); 182 rds_ring->rx_buf_arr = NULL;
183 rds_ring->rx_buf_arr = NULL;
184 }
185 } 183 }
186 kfree(recv_ctx->rds_rings); 184 kfree(recv_ctx->rds_rings);
187 185
@@ -190,8 +188,7 @@ skip_rds:
190 return; 188 return;
191 189
192 tx_ring = adapter->tx_ring; 190 tx_ring = adapter->tx_ring;
193 if (tx_ring->cmd_buf_arr) 191 vfree(tx_ring->cmd_buf_arr);
194 vfree(tx_ring->cmd_buf_arr);
195} 192}
196 193
197int netxen_alloc_sw_resources(struct netxen_adapter *adapter) 194int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 50477f5c3ecb..98737ef72936 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1496,7 +1496,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1496 netxen_nic_update_cmd_producer(adapter, tx_ring, producer); 1496 netxen_nic_update_cmd_producer(adapter, tx_ring, producer);
1497 1497
1498 adapter->stats.xmitcalled++; 1498 adapter->stats.xmitcalled++;
1499 netdev->trans_start = jiffies;
1500 1499
1501 return NETDEV_TX_OK; 1500 return NETDEV_TX_OK;
1502 1501
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 0d9de5ac4130..fa61a12c5e15 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -22,6 +22,7 @@
22#include <linux/log2.h> 22#include <linux/log2.h>
23#include <linux/jiffies.h> 23#include <linux/jiffies.h>
24#include <linux/crc32.h> 24#include <linux/crc32.h>
25#include <linux/list.h>
25 26
26#include <linux/io.h> 27#include <linux/io.h>
27 28
@@ -6362,6 +6363,7 @@ static void niu_set_rx_mode(struct net_device *dev)
6362 struct niu *np = netdev_priv(dev); 6363 struct niu *np = netdev_priv(dev);
6363 int i, alt_cnt, err; 6364 int i, alt_cnt, err;
6364 struct dev_addr_list *addr; 6365 struct dev_addr_list *addr;
6366 struct netdev_hw_addr *ha;
6365 unsigned long flags; 6367 unsigned long flags;
6366 u16 hash[16] = { 0, }; 6368 u16 hash[16] = { 0, };
6367 6369
@@ -6383,9 +6385,8 @@ static void niu_set_rx_mode(struct net_device *dev)
6383 if (alt_cnt) { 6385 if (alt_cnt) {
6384 int index = 0; 6386 int index = 0;
6385 6387
6386 for (addr = dev->uc_list; addr; addr = addr->next) { 6388 list_for_each_entry(ha, &dev->uc_list, list) {
6387 err = niu_set_alt_mac(np, index, 6389 err = niu_set_alt_mac(np, index, ha->addr);
6388 addr->da_addr);
6389 if (err) 6390 if (err)
6390 printk(KERN_WARNING PFX "%s: Error %d " 6391 printk(KERN_WARNING PFX "%s: Error %d "
6391 "adding alt mac %d\n", 6392 "adding alt mac %d\n",
@@ -6777,8 +6778,6 @@ static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
6777 netif_tx_wake_queue(txq); 6778 netif_tx_wake_queue(txq);
6778 } 6779 }
6779 6780
6780 dev->trans_start = jiffies;
6781
6782out: 6781out:
6783 return NETDEV_TX_OK; 6782 return NETDEV_TX_OK;
6784 6783
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index d531614a90b5..940962ae8f23 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1204,9 +1204,7 @@ again:
1204 if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev)) 1204 if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
1205 netif_start_queue(ndev); 1205 netif_start_queue(ndev);
1206 1206
1207 /* set the transmit start time to catch transmit timeouts */ 1207 return NETDEV_TX_OK;
1208 ndev->trans_start = jiffies;
1209 return 0;
1210} 1208}
1211 1209
1212static void ns83820_update_stats(struct ns83820 *dev) 1210static void ns83820_update_stats(struct ns83820 *dev)
@@ -1626,7 +1624,7 @@ static void ns83820_tx_watch(unsigned long data)
1626 ); 1624 );
1627#endif 1625#endif
1628 1626
1629 if (time_after(jiffies, ndev->trans_start + 1*HZ) && 1627 if (time_after(jiffies, dev_trans_start(ndev) + 1*HZ) &&
1630 dev->tx_done_idx != dev->tx_free_idx) { 1628 dev->tx_done_idx != dev->tx_free_idx) {
1631 printk(KERN_DEBUG "%s: ns83820_tx_watch: %u %u %d\n", 1629 printk(KERN_DEBUG "%s: ns83820_tx_watch: %u %u %d\n",
1632 ndev->name, 1630 ndev->name,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 7a3ec9d39a9a..dd6f54d1b495 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -243,6 +243,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
243 243
244 temp &= ~(MII_M1111_HWCFG_MODE_MASK); 244 temp &= ~(MII_M1111_HWCFG_MODE_MASK);
245 temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK; 245 temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK;
246 temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
246 247
247 err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); 248 err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
248 if (err < 0) 249 if (err < 0)
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 5981debcde5e..e7935d09c896 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -433,8 +433,7 @@ static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct s
433 * to the inner packet either 433 * to the inner packet either
434 */ 434 */
435 secpath_reset(skb); 435 secpath_reset(skb);
436 dst_release(skb->dst); 436 skb_dst_drop(skb);
437 skb->dst = NULL;
438 nf_reset(skb); 437 nf_reset(skb);
439 438
440 po = pppox_sk(session_sock); 439 po = pppox_sk(session_sock);
@@ -976,7 +975,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
976 /* Calculate UDP checksum if configured to do so */ 975 /* Calculate UDP checksum if configured to do so */
977 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT) 976 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
978 skb->ip_summed = CHECKSUM_NONE; 977 skb->ip_summed = CHECKSUM_NONE;
979 else if (!(skb->dst->dev->features & NETIF_F_V4_CSUM)) { 978 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
980 skb->ip_summed = CHECKSUM_COMPLETE; 979 skb->ip_summed = CHECKSUM_COMPLETE;
981 csum = skb_checksum(skb, 0, udp_len, 0); 980 csum = skb_checksum(skb, 0, udp_len, 0);
982 uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr, 981 uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
@@ -1172,14 +1171,14 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1172 nf_reset(skb); 1171 nf_reset(skb);
1173 1172
1174 /* Get routing info from the tunnel socket */ 1173 /* Get routing info from the tunnel socket */
1175 dst_release(skb->dst); 1174 skb_dst_drop(skb);
1176 skb->dst = dst_clone(__sk_dst_get(sk_tun)); 1175 skb_dst_set(skb, dst_clone(__sk_dst_get(sk_tun)));
1177 pppol2tp_skb_set_owner_w(skb, sk_tun); 1176 pppol2tp_skb_set_owner_w(skb, sk_tun);
1178 1177
1179 /* Calculate UDP checksum if configured to do so */ 1178 /* Calculate UDP checksum if configured to do so */
1180 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT) 1179 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
1181 skb->ip_summed = CHECKSUM_NONE; 1180 skb->ip_summed = CHECKSUM_NONE;
1182 else if (!(skb->dst->dev->features & NETIF_F_V4_CSUM)) { 1181 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
1183 skb->ip_summed = CHECKSUM_COMPLETE; 1182 skb->ip_summed = CHECKSUM_COMPLETE;
1184 csum = skb_checksum(skb, 0, udp_len, 0); 1183 csum = skb_checksum(skb, 0, udp_len, 0);
1185 uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr, 1184 uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index cadc32c94c1e..8a823ecc99a9 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2617,7 +2617,6 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2617 &port_regs->CommonRegs.reqQProducerIndex, 2617 &port_regs->CommonRegs.reqQProducerIndex,
2618 qdev->req_producer_index); 2618 qdev->req_producer_index);
2619 2619
2620 ndev->trans_start = jiffies;
2621 if (netif_msg_tx_queued(qdev)) 2620 if (netif_msg_tx_queued(qdev))
2622 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", 2621 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2623 ndev->name, qdev->req_producer_index, skb->len); 2622 ndev->name, qdev->req_producer_index, skb->len);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index fcb159e4df54..156e02e8905d 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -27,6 +27,8 @@
27 "%s: " fmt, __func__, ##args); \ 27 "%s: " fmt, __func__, ##args); \
28 } while (0) 28 } while (0)
29 29
30#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
31
30#define QLGE_VENDOR_ID 0x1077 32#define QLGE_VENDOR_ID 0x1077
31#define QLGE_DEVICE_ID_8012 0x8012 33#define QLGE_DEVICE_ID_8012 0x8012
32#define QLGE_DEVICE_ID_8000 0x8000 34#define QLGE_DEVICE_ID_8000 0x8000
@@ -39,7 +41,18 @@
39 41
40#define NUM_SMALL_BUFFERS 512 42#define NUM_SMALL_BUFFERS 512
41#define NUM_LARGE_BUFFERS 512 43#define NUM_LARGE_BUFFERS 512
44#define DB_PAGE_SIZE 4096
45
46/* Calculate the number of (4k) pages required to
47 * contain a buffer queue of the given length.
48 */
49#define MAX_DB_PAGES_PER_BQ(x) \
50 (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
51 (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
42 52
53#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
54 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
55 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
43#define SMALL_BUFFER_SIZE 256 56#define SMALL_BUFFER_SIZE 256
44#define LARGE_BUFFER_SIZE PAGE_SIZE 57#define LARGE_BUFFER_SIZE PAGE_SIZE
45#define MAX_SPLIT_SIZE 1023 58#define MAX_SPLIT_SIZE 1023
@@ -50,7 +63,7 @@
50#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ 63#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
51#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) 64#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
52#define UDELAY_COUNT 3 65#define UDELAY_COUNT 3
53#define UDELAY_DELAY 10 66#define UDELAY_DELAY 100
54 67
55 68
56#define TX_DESC_PER_IOCB 8 69#define TX_DESC_PER_IOCB 8
@@ -63,7 +76,16 @@
63#define TX_DESC_PER_OAL 0 76#define TX_DESC_PER_OAL 0
64#endif 77#endif
65 78
66#define DB_PAGE_SIZE 4096 79/* MPI test register definitions. This register
80 * is used for determining alternate NIC function's
81 * PCI->func number.
82 */
83enum {
84 MPI_TEST_FUNC_PORT_CFG = 0x1002,
85 MPI_TEST_NIC1_FUNC_SHIFT = 1,
86 MPI_TEST_NIC2_FUNC_SHIFT = 5,
87 MPI_TEST_NIC_FUNC_MASK = 0x00000007,
88};
67 89
68/* 90/*
69 * Processor Address Register (PROC_ADDR) bit definitions. 91 * Processor Address Register (PROC_ADDR) bit definitions.
@@ -1430,7 +1452,10 @@ struct ql_adapter {
1430 1452
1431 /* Hardware information */ 1453 /* Hardware information */
1432 u32 chip_rev_id; 1454 u32 chip_rev_id;
1455 u32 fw_rev_id;
1433 u32 func; /* PCI function for this adapter */ 1456 u32 func; /* PCI function for this adapter */
1457 u32 alt_func; /* PCI function for alternate adapter */
1458 u32 port; /* Port number this adapter */
1434 1459
1435 spinlock_t adapter_lock; 1460 spinlock_t adapter_lock;
1436 spinlock_t hw_lock; 1461 spinlock_t hw_lock;
@@ -1580,6 +1605,8 @@ void ql_mpi_idc_work(struct work_struct *work);
1580void ql_mpi_port_cfg_work(struct work_struct *work); 1605void ql_mpi_port_cfg_work(struct work_struct *work);
1581int ql_mb_get_fw_state(struct ql_adapter *qdev); 1606int ql_mb_get_fw_state(struct ql_adapter *qdev);
1582int ql_cam_route_initialize(struct ql_adapter *qdev); 1607int ql_cam_route_initialize(struct ql_adapter *qdev);
1608int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
1609int ql_mb_about_fw(struct ql_adapter *qdev);
1583 1610
1584#if 1 1611#if 1
1585#define QL_ALL_DUMP 1612#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 913b2a5fafc9..37c99fe79770 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -293,7 +293,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
293 struct ql_adapter *qdev = netdev_priv(ndev); 293 struct ql_adapter *qdev = netdev_priv(ndev);
294 strncpy(drvinfo->driver, qlge_driver_name, 32); 294 strncpy(drvinfo->driver, qlge_driver_name, 32);
295 strncpy(drvinfo->version, qlge_driver_version, 32); 295 strncpy(drvinfo->version, qlge_driver_version, 32);
296 strncpy(drvinfo->fw_version, "N/A", 32); 296 snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
297 (qdev->fw_rev_id & 0x00ff0000) >> 16,
298 (qdev->fw_rev_id & 0x0000ff00) >> 8,
299 (qdev->fw_rev_id & 0x000000ff));
297 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); 300 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
298 drvinfo->n_stats = 0; 301 drvinfo->n_stats = 0;
299 drvinfo->testinfo_len = 0; 302 drvinfo->testinfo_len = 0;
@@ -401,6 +404,7 @@ const struct ethtool_ops qlge_ethtool_ops = {
401 .get_rx_csum = ql_get_rx_csum, 404 .get_rx_csum = ql_get_rx_csum,
402 .set_rx_csum = ql_set_rx_csum, 405 .set_rx_csum = ql_set_rx_csum,
403 .get_tx_csum = ethtool_op_get_tx_csum, 406 .get_tx_csum = ethtool_op_get_tx_csum,
407 .set_tx_csum = ethtool_op_set_tx_csum,
404 .get_sg = ethtool_op_get_sg, 408 .get_sg = ethtool_op_get_sg,
405 .set_sg = ethtool_op_set_sg, 409 .set_sg = ethtool_op_set_sg,
406 .get_tso = ethtool_op_get_tso, 410 .get_tso = ethtool_op_get_tso,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index c92ced247947..b9a5f59d6c9b 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -675,11 +675,12 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
675 int status; 675 int status;
676 __le32 *p = (__le32 *)&qdev->flash; 676 __le32 *p = (__le32 *)&qdev->flash;
677 u32 offset; 677 u32 offset;
678 u8 mac_addr[6];
678 679
679 /* Get flash offset for function and adjust 680 /* Get flash offset for function and adjust
680 * for dword access. 681 * for dword access.
681 */ 682 */
682 if (!qdev->func) 683 if (!qdev->port)
683 offset = FUNC0_FLASH_OFFSET / sizeof(u32); 684 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
684 else 685 else
685 offset = FUNC1_FLASH_OFFSET / sizeof(u32); 686 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
@@ -705,14 +706,26 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
705 goto exit; 706 goto exit;
706 } 707 }
707 708
708 if (!is_valid_ether_addr(qdev->flash.flash_params_8000.mac_addr)) { 709 /* Extract either manufacturer or BOFM modified
710 * MAC address.
711 */
712 if (qdev->flash.flash_params_8000.data_type1 == 2)
713 memcpy(mac_addr,
714 qdev->flash.flash_params_8000.mac_addr1,
715 qdev->ndev->addr_len);
716 else
717 memcpy(mac_addr,
718 qdev->flash.flash_params_8000.mac_addr,
719 qdev->ndev->addr_len);
720
721 if (!is_valid_ether_addr(mac_addr)) {
709 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n"); 722 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
710 status = -EINVAL; 723 status = -EINVAL;
711 goto exit; 724 goto exit;
712 } 725 }
713 726
714 memcpy(qdev->ndev->dev_addr, 727 memcpy(qdev->ndev->dev_addr,
715 qdev->flash.flash_params_8000.mac_addr, 728 mac_addr,
716 qdev->ndev->addr_len); 729 qdev->ndev->addr_len);
717 730
718exit: 731exit:
@@ -731,7 +744,7 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
731 /* Second function's parameters follow the first 744 /* Second function's parameters follow the first
732 * function's. 745 * function's.
733 */ 746 */
734 if (qdev->func) 747 if (qdev->port)
735 offset = size; 748 offset = size;
736 749
737 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) 750 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
@@ -837,6 +850,13 @@ exit:
837static int ql_8000_port_initialize(struct ql_adapter *qdev) 850static int ql_8000_port_initialize(struct ql_adapter *qdev)
838{ 851{
839 int status; 852 int status;
853 /*
854 * Get MPI firmware version for driver banner
855 * and ethool info.
856 */
857 status = ql_mb_about_fw(qdev);
858 if (status)
859 goto exit;
840 status = ql_mb_get_fw_state(qdev); 860 status = ql_mb_get_fw_state(qdev);
841 if (status) 861 if (status)
842 goto exit; 862 goto exit;
@@ -1518,6 +1538,22 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1518 return; 1538 return;
1519 } 1539 }
1520 1540
1541 /* Frame error, so drop the packet. */
1542 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1543 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1544 ib_mac_rsp->flags2);
1545 dev_kfree_skb_any(skb);
1546 return;
1547 }
1548
1549 /* The max framesize filter on this chip is set higher than
1550 * MTU since FCoE uses 2k frames.
1551 */
1552 if (skb->len > ndev->mtu + ETH_HLEN) {
1553 dev_kfree_skb_any(skb);
1554 return;
1555 }
1556
1521 prefetch(skb->data); 1557 prefetch(skb->data);
1522 skb->dev = ndev; 1558 skb->dev = ndev;
1523 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { 1559 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
@@ -1540,7 +1576,6 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1540 * csum or frame errors. 1576 * csum or frame errors.
1541 */ 1577 */
1542 if (qdev->rx_csum && 1578 if (qdev->rx_csum &&
1543 !(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) &&
1544 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 1579 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1545 /* TCP frame. */ 1580 /* TCP frame. */
1546 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 1581 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -2108,7 +2143,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
2108 wmb(); 2143 wmb();
2109 2144
2110 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); 2145 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2111 ndev->trans_start = jiffies;
2112 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n", 2146 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2113 tx_ring->prod_idx, skb->len); 2147 tx_ring->prod_idx, skb->len);
2114 2148
@@ -2203,7 +2237,7 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2203 &tx_ring->wq_base_dma); 2237 &tx_ring->wq_base_dma);
2204 2238
2205 if ((tx_ring->wq_base == NULL) 2239 if ((tx_ring->wq_base == NULL)
2206 || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) { 2240 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2207 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n"); 2241 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2208 return -ENOMEM; 2242 return -ENOMEM;
2209 } 2243 }
@@ -2518,14 +2552,16 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2518{ 2552{
2519 struct cqicb *cqicb = &rx_ring->cqicb; 2553 struct cqicb *cqicb = &rx_ring->cqicb;
2520 void *shadow_reg = qdev->rx_ring_shadow_reg_area + 2554 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2521 (rx_ring->cq_id * sizeof(u64) * 4); 2555 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2522 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + 2556 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2523 (rx_ring->cq_id * sizeof(u64) * 4); 2557 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2524 void __iomem *doorbell_area = 2558 void __iomem *doorbell_area =
2525 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); 2559 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2526 int err = 0; 2560 int err = 0;
2527 u16 bq_len; 2561 u16 bq_len;
2528 u64 tmp; 2562 u64 tmp;
2563 __le64 *base_indirect_ptr;
2564 int page_entries;
2529 2565
2530 /* Set up the shadow registers for this ring. */ 2566 /* Set up the shadow registers for this ring. */
2531 rx_ring->prod_idx_sh_reg = shadow_reg; 2567 rx_ring->prod_idx_sh_reg = shadow_reg;
@@ -2534,8 +2570,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2534 shadow_reg_dma += sizeof(u64); 2570 shadow_reg_dma += sizeof(u64);
2535 rx_ring->lbq_base_indirect = shadow_reg; 2571 rx_ring->lbq_base_indirect = shadow_reg;
2536 rx_ring->lbq_base_indirect_dma = shadow_reg_dma; 2572 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2537 shadow_reg += sizeof(u64); 2573 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2538 shadow_reg_dma += sizeof(u64); 2574 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2539 rx_ring->sbq_base_indirect = shadow_reg; 2575 rx_ring->sbq_base_indirect = shadow_reg;
2540 rx_ring->sbq_base_indirect_dma = shadow_reg_dma; 2576 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2541 2577
@@ -2572,7 +2608,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2572 if (rx_ring->lbq_len) { 2608 if (rx_ring->lbq_len) {
2573 cqicb->flags |= FLAGS_LL; /* Load lbq values */ 2609 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2574 tmp = (u64)rx_ring->lbq_base_dma;; 2610 tmp = (u64)rx_ring->lbq_base_dma;;
2575 *((__le64 *) rx_ring->lbq_base_indirect) = cpu_to_le64(tmp); 2611 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2612 page_entries = 0;
2613 do {
2614 *base_indirect_ptr = cpu_to_le64(tmp);
2615 tmp += DB_PAGE_SIZE;
2616 base_indirect_ptr++;
2617 page_entries++;
2618 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2576 cqicb->lbq_addr = 2619 cqicb->lbq_addr =
2577 cpu_to_le64(rx_ring->lbq_base_indirect_dma); 2620 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
2578 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : 2621 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
@@ -2589,7 +2632,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2589 if (rx_ring->sbq_len) { 2632 if (rx_ring->sbq_len) {
2590 cqicb->flags |= FLAGS_LS; /* Load sbq values */ 2633 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2591 tmp = (u64)rx_ring->sbq_base_dma;; 2634 tmp = (u64)rx_ring->sbq_base_dma;;
2592 *((__le64 *) rx_ring->sbq_base_indirect) = cpu_to_le64(tmp); 2635 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2636 page_entries = 0;
2637 do {
2638 *base_indirect_ptr = cpu_to_le64(tmp);
2639 tmp += DB_PAGE_SIZE;
2640 base_indirect_ptr++;
2641 page_entries++;
2642 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
2593 cqicb->sbq_addr = 2643 cqicb->sbq_addr =
2594 cpu_to_le64(rx_ring->sbq_base_indirect_dma); 2644 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
2595 cqicb->sbq_buf_size = 2645 cqicb->sbq_buf_size =
@@ -3186,9 +3236,10 @@ static void ql_display_dev_info(struct net_device *ndev)
3186 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3236 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3187 3237
3188 QPRINTK(qdev, PROBE, INFO, 3238 QPRINTK(qdev, PROBE, INFO,
3189 "Function #%d, NIC Roll %d, NIC Rev = %d, " 3239 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3190 "XG Roll = %d, XG Rev = %d.\n", 3240 "XG Roll = %d, XG Rev = %d.\n",
3191 qdev->func, 3241 qdev->func,
3242 qdev->port,
3192 qdev->chip_rev_id & 0x0000000f, 3243 qdev->chip_rev_id & 0x0000000f,
3193 qdev->chip_rev_id >> 4 & 0x0000000f, 3244 qdev->chip_rev_id >> 4 & 0x0000000f,
3194 qdev->chip_rev_id >> 8 & 0x0000000f, 3245 qdev->chip_rev_id >> 8 & 0x0000000f,
@@ -3264,7 +3315,6 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3264 err = ql_adapter_initialize(qdev); 3315 err = ql_adapter_initialize(qdev);
3265 if (err) { 3316 if (err) {
3266 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n"); 3317 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3267 spin_unlock(&qdev->hw_lock);
3268 goto err_init; 3318 goto err_init;
3269 } 3319 }
3270 set_bit(QL_ADAPTER_UP, &qdev->flags); 3320 set_bit(QL_ADAPTER_UP, &qdev->flags);
@@ -3361,7 +3411,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3361 * completion handler rx_rings. 3411 * completion handler rx_rings.
3362 */ 3412 */
3363 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; 3413 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3364 netif_set_gso_max_size(qdev->ndev, 65536);
3365 3414
3366 for (i = 0; i < qdev->tx_ring_count; i++) { 3415 for (i = 0; i < qdev->tx_ring_count; i++) {
3367 tx_ring = &qdev->tx_ring[i]; 3416 tx_ring = &qdev->tx_ring[i];
@@ -3644,12 +3693,53 @@ static struct nic_operations qla8000_nic_ops = {
3644 .port_initialize = ql_8000_port_initialize, 3693 .port_initialize = ql_8000_port_initialize,
3645}; 3694};
3646 3695
3696/* Find the pcie function number for the other NIC
3697 * on this chip. Since both NIC functions share a
3698 * common firmware we have the lowest enabled function
3699 * do any common work. Examples would be resetting
3700 * after a fatal firmware error, or doing a firmware
3701 * coredump.
3702 */
3703static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
3704{
3705 int status = 0;
3706 u32 temp;
3707 u32 nic_func1, nic_func2;
3708
3709 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3710 &temp);
3711 if (status)
3712 return status;
3713
3714 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3715 MPI_TEST_NIC_FUNC_MASK);
3716 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3717 MPI_TEST_NIC_FUNC_MASK);
3718
3719 if (qdev->func == nic_func1)
3720 qdev->alt_func = nic_func2;
3721 else if (qdev->func == nic_func2)
3722 qdev->alt_func = nic_func1;
3723 else
3724 status = -EIO;
3725
3726 return status;
3727}
3647 3728
3648static void ql_get_board_info(struct ql_adapter *qdev) 3729static int ql_get_board_info(struct ql_adapter *qdev)
3649{ 3730{
3731 int status;
3650 qdev->func = 3732 qdev->func =
3651 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; 3733 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3652 if (qdev->func) { 3734 if (qdev->func > 3)
3735 return -EIO;
3736
3737 status = ql_get_alt_pcie_func(qdev);
3738 if (status)
3739 return status;
3740
3741 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
3742 if (qdev->port) {
3653 qdev->xg_sem_mask = SEM_XGMAC1_MASK; 3743 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3654 qdev->port_link_up = STS_PL1; 3744 qdev->port_link_up = STS_PL1;
3655 qdev->port_init = STS_PI1; 3745 qdev->port_init = STS_PI1;
@@ -3668,6 +3758,7 @@ static void ql_get_board_info(struct ql_adapter *qdev)
3668 qdev->nic_ops = &qla8012_nic_ops; 3758 qdev->nic_ops = &qla8012_nic_ops;
3669 else if (qdev->device_id == QLGE_DEVICE_ID_8000) 3759 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3670 qdev->nic_ops = &qla8000_nic_ops; 3760 qdev->nic_ops = &qla8000_nic_ops;
3761 return status;
3671} 3762}
3672 3763
3673static void ql_release_all(struct pci_dev *pdev) 3764static void ql_release_all(struct pci_dev *pdev)
@@ -3762,7 +3853,12 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3762 3853
3763 qdev->ndev = ndev; 3854 qdev->ndev = ndev;
3764 qdev->pdev = pdev; 3855 qdev->pdev = pdev;
3765 ql_get_board_info(qdev); 3856 err = ql_get_board_info(qdev);
3857 if (err) {
3858 dev_err(&pdev->dev, "Register access failed.\n");
3859 err = -EIO;
3860 goto err_out;
3861 }
3766 qdev->msg_enable = netif_msg_init(debug, default_msg); 3862 qdev->msg_enable = netif_msg_init(debug, default_msg);
3767 spin_lock_init(&qdev->hw_lock); 3863 spin_lock_init(&qdev->hw_lock);
3768 spin_lock_init(&qdev->stats_lock); 3864 spin_lock_init(&qdev->stats_lock);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 9f81b797f10b..a67c14a7befd 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -90,14 +90,14 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
90 */ 90 */
91static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev) 91static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
92{ 92{
93 int count = 50; /* TODO: arbitrary for now. */ 93 int count = 100;
94 u32 value; 94 u32 value;
95 95
96 do { 96 do {
97 value = ql_read32(qdev, STS); 97 value = ql_read32(qdev, STS);
98 if (value & STS_PI) 98 if (value & STS_PI)
99 return 0; 99 return 0;
100 udelay(UDELAY_DELAY); /* 10us */ 100 mdelay(UDELAY_DELAY); /* 100ms */
101 } while (--count); 101 } while (--count);
102 return -ETIMEDOUT; 102 return -ETIMEDOUT;
103} 103}
@@ -453,6 +453,13 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
453 } 453 }
454end: 454end:
455 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); 455 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
456 /* Restore the original mailbox count to
457 * what the caller asked for. This can get
458 * changed when a mailbox command is waiting
459 * for a response and an AEN arrives and
460 * is handled.
461 * */
462 mbcp->out_count = orig_count;
456 return status; 463 return status;
457} 464}
458 465
@@ -540,6 +547,40 @@ end:
540 return status; 547 return status;
541} 548}
542 549
550
551/* Get MPI firmware version. This will be used for
552 * driver banner and for ethtool info.
553 * Returns zero on success.
554 */
555int ql_mb_about_fw(struct ql_adapter *qdev)
556{
557 struct mbox_params mbc;
558 struct mbox_params *mbcp = &mbc;
559 int status = 0;
560
561 memset(mbcp, 0, sizeof(struct mbox_params));
562
563 mbcp->in_count = 1;
564 mbcp->out_count = 3;
565
566 mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
567
568 status = ql_mailbox_command(qdev, mbcp);
569 if (status)
570 return status;
571
572 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
573 QPRINTK(qdev, DRV, ERR,
574 "Failed about firmware command\n");
575 status = -EIO;
576 }
577
578 /* Store the firmware version */
579 qdev->fw_rev_id = mbcp->mbox_out[1];
580
581 return status;
582}
583
543/* Get functional state for MPI firmware. 584/* Get functional state for MPI firmware.
544 * Returns zero on success. 585 * Returns zero on success.
545 */ 586 */
@@ -754,7 +795,6 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
754{ 795{
755 struct ql_adapter *qdev = 796 struct ql_adapter *qdev =
756 container_of(work, struct ql_adapter, mpi_port_cfg_work.work); 797 container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
757 struct net_device *ndev = qdev->ndev;
758 int status; 798 int status;
759 799
760 status = ql_mb_get_port_cfg(qdev); 800 status = ql_mb_get_port_cfg(qdev);
@@ -764,9 +804,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
764 goto err; 804 goto err;
765 } 805 }
766 806
767 if (ndev->mtu <= 2500) 807 if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
768 goto end;
769 else if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
770 qdev->max_frame_size == 808 qdev->max_frame_size ==
771 CFG_DEFAULT_MAX_FRAME_SIZE) 809 CFG_DEFAULT_MAX_FRAME_SIZE)
772 goto end; 810 goto end;
@@ -831,13 +869,19 @@ void ql_mpi_work(struct work_struct *work)
831 container_of(work, struct ql_adapter, mpi_work.work); 869 container_of(work, struct ql_adapter, mpi_work.work);
832 struct mbox_params mbc; 870 struct mbox_params mbc;
833 struct mbox_params *mbcp = &mbc; 871 struct mbox_params *mbcp = &mbc;
872 int err = 0;
834 873
835 mutex_lock(&qdev->mpi_mutex); 874 mutex_lock(&qdev->mpi_mutex);
836 875
837 while (ql_read32(qdev, STS) & STS_PI) { 876 while (ql_read32(qdev, STS) & STS_PI) {
838 memset(mbcp, 0, sizeof(struct mbox_params)); 877 memset(mbcp, 0, sizeof(struct mbox_params));
839 mbcp->out_count = 1; 878 mbcp->out_count = 1;
840 ql_mpi_handler(qdev, mbcp); 879 /* Don't continue if an async event
880 * did not complete properly.
881 */
882 err = ql_mpi_handler(qdev, mbcp);
883 if (err)
884 break;
841 } 885 }
842 886
843 mutex_unlock(&qdev->mpi_mutex); 887 mutex_unlock(&qdev->mpi_mutex);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 1508b124e3d8..ed63d23a6452 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -401,6 +401,9 @@ static void r6040_init_mac_regs(struct net_device *dev)
401 * we may got called by r6040_tx_timeout which has left 401 * we may got called by r6040_tx_timeout which has left
402 * some unsent tx buffers */ 402 * some unsent tx buffers */
403 iowrite16(0x01, ioaddr + MTPR); 403 iowrite16(0x01, ioaddr + MTPR);
404
405 /* Check media */
406 mii_check_media(&lp->mii_if, 1, 1);
404} 407}
405 408
406static void r6040_tx_timeout(struct net_device *dev) 409static void r6040_tx_timeout(struct net_device *dev)
@@ -528,6 +531,8 @@ static int r6040_phy_mode_chk(struct net_device *dev)
528 phy_dat = 0x0000; 531 phy_dat = 0x0000;
529 } 532 }
530 533
534 mii_check_media(&lp->mii_if, 0, 1);
535
531 return phy_dat; 536 return phy_dat;
532}; 537};
533 538
@@ -810,7 +815,6 @@ static void r6040_timer(unsigned long data)
810 lp->phy_mode = phy_mode; 815 lp->phy_mode = phy_mode;
811 lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode; 816 lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode;
812 iowrite16(lp->mcr0, ioaddr); 817 iowrite16(lp->mcr0, ioaddr);
813 printk(KERN_INFO "Link Change %x \n", ioread16(ioaddr));
814 } 818 }
815 819
816 /* Timer active again */ 820 /* Timer active again */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0ec0605bcebd..007c881896d2 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3279,8 +3279,6 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
3279 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); 3279 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
3280 txd->opts1 = cpu_to_le32(status); 3280 txd->opts1 = cpu_to_le32(status);
3281 3281
3282 dev->trans_start = jiffies;
3283
3284 tp->cur_tx += frags + 1; 3282 tp->cur_tx += frags + 1;
3285 3283
3286 smp_wmb(); 3284 smp_wmb();
@@ -3381,7 +3379,7 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
3381 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); 3379 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
3382 3380
3383 if (status & LastFrag) { 3381 if (status & LastFrag) {
3384 dev_kfree_skb_irq(tx_skb->skb); 3382 dev_kfree_skb(tx_skb->skb);
3385 tx_skb->skb = NULL; 3383 tx_skb->skb = NULL;
3386 } 3384 }
3387 dirty_tx++; 3385 dirty_tx++;
@@ -3563,54 +3561,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
3563 int handled = 0; 3561 int handled = 0;
3564 int status; 3562 int status;
3565 3563
3564 /* loop handling interrupts until we have no new ones or
3565 * we hit a invalid/hotplug case.
3566 */
3566 status = RTL_R16(IntrStatus); 3567 status = RTL_R16(IntrStatus);
3568 while (status && status != 0xffff) {
3569 handled = 1;
3567 3570
3568 /* hotplug/major error/no more work/shared irq */ 3571 /* Handle all of the error cases first. These will reset
3569 if ((status == 0xffff) || !status) 3572 * the chip, so just exit the loop.
3570 goto out; 3573 */
3571 3574 if (unlikely(!netif_running(dev))) {
3572 handled = 1; 3575 rtl8169_asic_down(ioaddr);
3576 break;
3577 }
3573 3578
3574 if (unlikely(!netif_running(dev))) { 3579 /* Work around for rx fifo overflow */
3575 rtl8169_asic_down(ioaddr); 3580 if (unlikely(status & RxFIFOOver) &&
3576 goto out; 3581 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
3577 } 3582 netif_stop_queue(dev);
3583 rtl8169_tx_timeout(dev);
3584 break;
3585 }
3578 3586
3579 status &= tp->intr_mask; 3587 if (unlikely(status & SYSErr)) {
3580 RTL_W16(IntrStatus, 3588 rtl8169_pcierr_interrupt(dev);
3581 (status & RxFIFOOver) ? (status | RxOverflow) : status); 3589 break;
3590 }
3582 3591
3583 if (!(status & tp->intr_event)) 3592 if (status & LinkChg)
3584 goto out; 3593 rtl8169_check_link_status(dev, tp, ioaddr);
3585 3594
3586 /* Work around for rx fifo overflow */ 3595 /* We need to see the lastest version of tp->intr_mask to
3587 if (unlikely(status & RxFIFOOver) && 3596 * avoid ignoring an MSI interrupt and having to wait for
3588 (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 3597 * another event which may never come.
3589 netif_stop_queue(dev); 3598 */
3590 rtl8169_tx_timeout(dev); 3599 smp_rmb();
3591 goto out; 3600 if (status & tp->intr_mask & tp->napi_event) {
3592 } 3601 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
3602 tp->intr_mask = ~tp->napi_event;
3603
3604 if (likely(napi_schedule_prep(&tp->napi)))
3605 __napi_schedule(&tp->napi);
3606 else if (netif_msg_intr(tp)) {
3607 printk(KERN_INFO "%s: interrupt %04x in poll\n",
3608 dev->name, status);
3609 }
3610 }
3593 3611
3594 if (unlikely(status & SYSErr)) { 3612 /* We only get a new MSI interrupt when all active irq
3595 rtl8169_pcierr_interrupt(dev); 3613 * sources on the chip have been acknowledged. So, ack
3596 goto out; 3614 * everything we've seen and check if new sources have become
3615 * active to avoid blocking all interrupts from the chip.
3616 */
3617 RTL_W16(IntrStatus,
3618 (status & RxFIFOOver) ? (status | RxOverflow) : status);
3619 status = RTL_R16(IntrStatus);
3597 } 3620 }
3598 3621
3599 if (status & LinkChg)
3600 rtl8169_check_link_status(dev, tp, ioaddr);
3601
3602 if (status & tp->napi_event) {
3603 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
3604 tp->intr_mask = ~tp->napi_event;
3605
3606 if (likely(napi_schedule_prep(&tp->napi)))
3607 __napi_schedule(&tp->napi);
3608 else if (netif_msg_intr(tp)) {
3609 printk(KERN_INFO "%s: interrupt %04x in poll\n",
3610 dev->name, status);
3611 }
3612 }
3613out:
3614 return IRQ_RETVAL(handled); 3622 return IRQ_RETVAL(handled);
3615} 3623}
3616 3624
@@ -3626,13 +3634,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
3626 3634
3627 if (work_done < budget) { 3635 if (work_done < budget) {
3628 napi_complete(napi); 3636 napi_complete(napi);
3629 tp->intr_mask = 0xffff; 3637
3630 /* 3638 /* We need for force the visibility of tp->intr_mask
3631 * 20040426: the barrier is not strictly required but the 3639 * for other CPUs, as we can loose an MSI interrupt
3632 * behavior of the irq handler could be less predictable 3640 * and potentially wait for a retransmit timeout if we don't.
3633 * without it. Btw, the lack of flush for the posted pci 3641 * The posted write to IntrMask is safe, as it will
3634 * write is safe - FR 3642 * eventually make it to the chip and we won't loose anything
3643 * until it does.
3635 */ 3644 */
3645 tp->intr_mask = 0xffff;
3636 smp_wmb(); 3646 smp_wmb();
3637 RTL_W16(IntrMask, tp->intr_event); 3647 RTL_W16(IntrMask, tp->intr_event);
3638 } 3648 }
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 80562ea77de3..458daa06ed41 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -1764,7 +1764,7 @@ static int init_nic(struct s2io_nic *nic)
1764 * by then we return error. 1764 * by then we return error.
1765 */ 1765 */
1766 time = 0; 1766 time = 0;
1767 while (TRUE) { 1767 while (true) {
1768 val64 = readq(&bar0->rti_command_mem); 1768 val64 = readq(&bar0->rti_command_mem);
1769 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) 1769 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1770 break; 1770 break;
@@ -2137,7 +2137,7 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2137 2137
2138 herc = (sp->device_type == XFRAME_II_DEVICE); 2138 herc = (sp->device_type == XFRAME_II_DEVICE);
2139 2139
2140 if (flag == FALSE) { 2140 if (flag == false) {
2141 if ((!herc && (sp->pdev->revision >= 4)) || herc) { 2141 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2142 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE)) 2142 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2143 ret = 1; 2143 ret = 1;
@@ -3587,7 +3587,7 @@ static void s2io_reset(struct s2io_nic * sp)
3587 writeq(val64, &bar0->pcc_err_reg); 3587 writeq(val64, &bar0->pcc_err_reg);
3588 } 3588 }
3589 3589
3590 sp->device_enabled_once = FALSE; 3590 sp->device_enabled_once = false;
3591} 3591}
3592 3592
3593/** 3593/**
@@ -4299,7 +4299,6 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4299 s2io_stop_tx_queue(sp, fifo->fifo_no); 4299 s2io_stop_tx_queue(sp, fifo->fifo_no);
4300 } 4300 }
4301 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize; 4301 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4302 dev->trans_start = jiffies;
4303 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4302 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4304 4303
4305 if (sp->config.intr_type == MSI_X) 4304 if (sp->config.intr_type == MSI_X)
@@ -5573,10 +5572,10 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
5573 5572
5574 val64 = readq(&bar0->rmac_pause_cfg); 5573 val64 = readq(&bar0->rmac_pause_cfg);
5575 if (val64 & RMAC_PAUSE_GEN_ENABLE) 5574 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5576 ep->tx_pause = TRUE; 5575 ep->tx_pause = true;
5577 if (val64 & RMAC_PAUSE_RX_ENABLE) 5576 if (val64 & RMAC_PAUSE_RX_ENABLE)
5578 ep->rx_pause = TRUE; 5577 ep->rx_pause = true;
5579 ep->autoneg = FALSE; 5578 ep->autoneg = false;
5580} 5579}
5581 5580
5582/** 5581/**
@@ -6807,7 +6806,7 @@ static void s2io_set_link(struct work_struct *work)
6807 val64 |= ADAPTER_LED_ON; 6806 val64 |= ADAPTER_LED_ON;
6808 writeq(val64, &bar0->adapter_control); 6807 writeq(val64, &bar0->adapter_control);
6809 } 6808 }
6810 nic->device_enabled_once = TRUE; 6809 nic->device_enabled_once = true;
6811 } else { 6810 } else {
6812 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name); 6811 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6813 DBG_PRINT(ERR_DBG, "device is not Quiescent\n"); 6812 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
@@ -7755,7 +7754,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7755 struct s2io_nic *sp; 7754 struct s2io_nic *sp;
7756 struct net_device *dev; 7755 struct net_device *dev;
7757 int i, j, ret; 7756 int i, j, ret;
7758 int dma_flag = FALSE; 7757 int dma_flag = false;
7759 u32 mac_up, mac_down; 7758 u32 mac_up, mac_down;
7760 u64 val64 = 0, tmp64 = 0; 7759 u64 val64 = 0, tmp64 = 0;
7761 struct XENA_dev_config __iomem *bar0 = NULL; 7760 struct XENA_dev_config __iomem *bar0 = NULL;
@@ -7778,7 +7777,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7778 7777
7779 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 7778 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7780 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n"); 7779 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7781 dma_flag = TRUE; 7780 dma_flag = true;
7782 if (pci_set_consistent_dma_mask 7781 if (pci_set_consistent_dma_mask
7783 (pdev, DMA_BIT_MASK(64))) { 7782 (pdev, DMA_BIT_MASK(64))) {
7784 DBG_PRINT(ERR_DBG, 7783 DBG_PRINT(ERR_DBG,
@@ -7819,7 +7818,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7819 sp->dev = dev; 7818 sp->dev = dev;
7820 sp->pdev = pdev; 7819 sp->pdev = pdev;
7821 sp->high_dma_flag = dma_flag; 7820 sp->high_dma_flag = dma_flag;
7822 sp->device_enabled_once = FALSE; 7821 sp->device_enabled_once = false;
7823 if (rx_ring_mode == 1) 7822 if (rx_ring_mode == 1)
7824 sp->rxd_mode = RXD_MODE_1; 7823 sp->rxd_mode = RXD_MODE_1;
7825 if (rx_ring_mode == 2) 7824 if (rx_ring_mode == 2)
@@ -7965,7 +7964,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7965 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 7964 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7966 7965
7967 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 7966 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7968 if (sp->high_dma_flag == TRUE) 7967 if (sp->high_dma_flag == true)
7969 dev->features |= NETIF_F_HIGHDMA; 7968 dev->features |= NETIF_F_HIGHDMA;
7970 dev->features |= NETIF_F_TSO; 7969 dev->features |= NETIF_F_TSO;
7971 dev->features |= NETIF_F_TSO6; 7970 dev->features |= NETIF_F_TSO6;
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 55cb943f23f8..d5c5be6c07b9 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -18,15 +18,6 @@
18#define vBIT(val, loc, sz) (((u64)val) << (64-loc-sz)) 18#define vBIT(val, loc, sz) (((u64)val) << (64-loc-sz))
19#define INV(d) ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff) 19#define INV(d) ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff)
20 20
21#ifndef BOOL
22#define BOOL int
23#endif
24
25#ifndef TRUE
26#define TRUE 1
27#define FALSE 0
28#endif
29
30#undef SUCCESS 21#undef SUCCESS
31#define SUCCESS 0 22#define SUCCESS 0
32#define FAILURE -1 23#define FAILURE -1
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 043795715955..b67ccca3fc1a 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -438,6 +438,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
438 kfree_skb(skb); 438 kfree_skb(skb);
439 return -EPIPE; 439 return -EPIPE;
440 } 440 }
441 efx->net_dev->trans_start = jiffies;
441 } 442 }
442 443
443 return 0; 444 return 0;
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index db723c58f6f1..f4d509015f75 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -63,6 +63,7 @@
63 63
64/* extended status register */ 64/* extended status register */
65#define PMA_PMD_XSTATUS_REG 49153 65#define PMA_PMD_XSTATUS_REG 49153
66#define PMA_PMD_XSTAT_MDIX_LBN 14
66#define PMA_PMD_XSTAT_FLP_LBN (12) 67#define PMA_PMD_XSTAT_FLP_LBN (12)
67 68
68/* LED control register */ 69/* LED control register */
@@ -741,9 +742,17 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
741 742
742 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); 743 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
743 744
744 if (efx->phy_type != PHY_TYPE_SFX7101) 745 if (efx->phy_type != PHY_TYPE_SFX7101) {
745 ecmd->supported |= (SUPPORTED_100baseT_Full | 746 ecmd->supported |= (SUPPORTED_100baseT_Full |
746 SUPPORTED_1000baseT_Full); 747 SUPPORTED_1000baseT_Full);
748 if (ecmd->speed != SPEED_10000) {
749 ecmd->eth_tp_mdix =
750 (efx_mdio_read(efx, MDIO_MMD_PMAPMD,
751 PMA_PMD_XSTATUS_REG) &
752 (1 << PMA_PMD_XSTAT_MDIX_LBN))
753 ? ETH_TP_MDI_X : ETH_TP_MDI;
754 }
755 }
747 756
748 /* In loopback, the PHY automatically brings up the correct interface, 757 /* In loopback, the PHY automatically brings up the correct interface,
749 * but doesn't advertise the correct speed. So override it */ 758 * but doesn't advertise the correct speed. So override it */
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index d6681edb7014..14a14788566c 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -360,13 +360,6 @@ inline int efx_xmit(struct efx_nic *efx,
360 360
361 /* Map fragments for DMA and add to TX queue */ 361 /* Map fragments for DMA and add to TX queue */
362 rc = efx_enqueue_skb(tx_queue, skb); 362 rc = efx_enqueue_skb(tx_queue, skb);
363 if (unlikely(rc != NETDEV_TX_OK))
364 goto out;
365
366 /* Update last TX timer */
367 efx->net_dev->trans_start = jiffies;
368
369 out:
370 return rc; 363 return rc;
371} 364}
372 365
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 55ccd51d247e..e2247669a495 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -47,7 +47,7 @@
47#define PHY_ID_ANY 0x1f 47#define PHY_ID_ANY 0x1f
48#define MII_REG_ANY 0x1f 48#define MII_REG_ANY 0x1f
49 49
50#define DRV_VERSION "1.2" 50#define DRV_VERSION "1.3"
51#define DRV_NAME "sis190" 51#define DRV_NAME "sis190"
52#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION 52#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53#define PFX DRV_NAME ": " 53#define PFX DRV_NAME ": "
@@ -317,6 +317,7 @@ static struct mii_chip_info {
317 unsigned int type; 317 unsigned int type;
318 u32 feature; 318 u32 feature;
319} mii_chip_table[] = { 319} mii_chip_table[] = {
320 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
320 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 }, 321 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
321 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 }, 322 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
322 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 }, 323 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
@@ -347,7 +348,7 @@ static struct {
347 u32 msg_enable; 348 u32 msg_enable;
348} debug = { -1 }; 349} debug = { -1 };
349 350
350MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver"); 351MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
351module_param(rx_copybreak, int, 0); 352module_param(rx_copybreak, int, 0);
352MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 353MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
353module_param_named(debug, debug.msg_enable, int, 0); 354module_param_named(debug, debug.msg_enable, int, 0);
@@ -539,8 +540,8 @@ static bool sis190_try_rx_copy(struct sis190_private *tp,
539 if (!skb) 540 if (!skb)
540 goto out; 541 goto out;
541 542
542 pci_dma_sync_single_for_device(tp->pci_dev, addr, pkt_size, 543 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
543 PCI_DMA_FROMDEVICE); 544 PCI_DMA_FROMDEVICE);
544 skb_reserve(skb, 2); 545 skb_reserve(skb, 2);
545 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); 546 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
546 *sk_buff = skb; 547 *sk_buff = skb;
@@ -942,9 +943,9 @@ static void sis190_phy_task(struct work_struct *work)
942 u32 ctl; 943 u32 ctl;
943 const char *msg; 944 const char *msg;
944 } reg31[] = { 945 } reg31[] = {
945 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000, 946 { LPA_1000FULL, 0x07000c00 | 0x00001000,
946 "1000 Mbps Full Duplex" }, 947 "1000 Mbps Full Duplex" },
947 { LPA_1000XHALF | LPA_SLCT, 0x07000c00, 948 { LPA_1000HALF, 0x07000c00,
948 "1000 Mbps Half Duplex" }, 949 "1000 Mbps Half Duplex" },
949 { LPA_100FULL, 0x04000800 | 0x00001000, 950 { LPA_100FULL, 0x04000800 | 0x00001000,
950 "100 Mbps Full Duplex" }, 951 "100 Mbps Full Duplex" },
@@ -955,22 +956,35 @@ static void sis190_phy_task(struct work_struct *work)
955 { LPA_10HALF, 0x04000400, 956 { LPA_10HALF, 0x04000400,
956 "10 Mbps Half Duplex" }, 957 "10 Mbps Half Duplex" },
957 { 0, 0x04000400, "unknown" } 958 { 0, 0x04000400, "unknown" }
958 }, *p; 959 }, *p = NULL;
959 u16 adv; 960 u16 adv, autoexp, gigadv, gigrec;
960 961
961 val = mdio_read(ioaddr, phy_id, 0x1f); 962 val = mdio_read(ioaddr, phy_id, 0x1f);
962 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val); 963 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
963 964
964 val = mdio_read(ioaddr, phy_id, MII_LPA); 965 val = mdio_read(ioaddr, phy_id, MII_LPA);
965 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE); 966 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
966 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n", 967 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
967 dev->name, val, adv); 968 net_link(tp, KERN_INFO "%s: mii lpa=%04x adv=%04x exp=%04x.\n",
968 969 dev->name, val, adv, autoexp);
969 val &= adv; 970
971 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
972 /* check for gigabit speed */
973 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
974 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
975 val = (gigadv & (gigrec >> 2));
976 if (val & ADVERTISE_1000FULL)
977 p = reg31;
978 else if (val & ADVERTISE_1000HALF)
979 p = reg31 + 1;
980 }
981 if (!p) {
982 val &= adv;
970 983
971 for (p = reg31; p->val; p++) { 984 for (p = reg31; p->val; p++) {
972 if ((val & p->val) == p->val) 985 if ((val & p->val) == p->val)
973 break; 986 break;
987 }
974 } 988 }
975 989
976 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00; 990 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
@@ -1204,8 +1218,6 @@ static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1204 1218
1205 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb); 1219 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1206 1220
1207 dev->trans_start = jiffies;
1208
1209 dirty_tx = tp->dirty_tx; 1221 dirty_tx = tp->dirty_tx;
1210 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) { 1222 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1211 netif_stop_queue(dev); 1223 netif_stop_queue(dev);
@@ -1315,12 +1327,15 @@ static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1315 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ? 1327 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1316 LAN : HOME) : p->type; 1328 LAN : HOME) : p->type;
1317 tp->features |= p->feature; 1329 tp->features |= p->feature;
1318 } else 1330 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1331 pci_name(tp->pci_dev), p->name, phy_id);
1332 } else {
1319 phy->type = UNKNOWN; 1333 phy->type = UNKNOWN;
1320 1334 net_probe(tp, KERN_INFO
1321 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n", 1335 "%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1322 pci_name(tp->pci_dev), 1336 pci_name(tp->pci_dev),
1323 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id); 1337 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1338 }
1324} 1339}
1325 1340
1326static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp) 1341static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index c11cdd08ec57..60d502eef4fc 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2837,8 +2837,6 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2837 netif_stop_queue(dev); 2837 netif_stop_queue(dev);
2838 } 2838 }
2839 2839
2840 dev->trans_start = jiffies;
2841
2842 return NETDEV_TX_OK; 2840 return NETDEV_TX_OK;
2843} 2841}
2844 2842
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index a2ff9cb1e7ac..6b5946fe8ae2 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1690,7 +1690,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1690 1690
1691 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); 1691 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1692 1692
1693 dev->trans_start = jiffies;
1694 return NETDEV_TX_OK; 1693 return NETDEV_TX_OK;
1695 1694
1696mapping_unwind: 1695mapping_unwind:
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 3cff84078a9e..b60639bd181b 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2155,7 +2155,7 @@ static int smsc911x_resume(struct platform_device *pdev)
2155 2155
2156static struct platform_driver smsc911x_driver = { 2156static struct platform_driver smsc911x_driver = {
2157 .probe = smsc911x_drv_probe, 2157 .probe = smsc911x_drv_probe,
2158 .remove = smsc911x_drv_remove, 2158 .remove = __devexit_p(smsc911x_drv_remove),
2159 .driver = { 2159 .driver = {
2160 .name = SMSC_CHIPNAME, 2160 .name = SMSC_CHIPNAME,
2161 }, 2161 },
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index c399b1955c1e..545f81b34ad7 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -369,7 +369,6 @@ struct netdev_private {
369 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 369 struct sk_buff* tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_ring_dma; 370 dma_addr_t tx_ring_dma;
371 dma_addr_t rx_ring_dma; 371 dma_addr_t rx_ring_dma;
372 struct net_device_stats stats;
373 struct timer_list timer; /* Media monitoring timer. */ 372 struct timer_list timer; /* Media monitoring timer. */
374 /* Frequently used values: keep some adjacent for cache effect. */ 373 /* Frequently used values: keep some adjacent for cache effect. */
375 spinlock_t lock; 374 spinlock_t lock;
@@ -975,7 +974,7 @@ static void tx_timeout(struct net_device *dev)
975 dev->if_port = 0; 974 dev->if_port = 0;
976 975
977 dev->trans_start = jiffies; 976 dev->trans_start = jiffies;
978 np->stats.tx_errors++; 977 dev->stats.tx_errors++;
979 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { 978 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
980 netif_wake_queue(dev); 979 netif_wake_queue(dev);
981 } 980 }
@@ -1123,7 +1122,7 @@ reset_tx (struct net_device *dev)
1123 else 1122 else
1124 dev_kfree_skb (skb); 1123 dev_kfree_skb (skb);
1125 np->tx_skbuff[i] = NULL; 1124 np->tx_skbuff[i] = NULL;
1126 np->stats.tx_dropped++; 1125 dev->stats.tx_dropped++;
1127 } 1126 }
1128 } 1127 }
1129 np->cur_tx = np->dirty_tx = 0; 1128 np->cur_tx = np->dirty_tx = 0;
@@ -1181,15 +1180,15 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1181 if (netif_msg_tx_err(np)) 1180 if (netif_msg_tx_err(np))
1182 printk("%s: Transmit error status %4.4x.\n", 1181 printk("%s: Transmit error status %4.4x.\n",
1183 dev->name, tx_status); 1182 dev->name, tx_status);
1184 np->stats.tx_errors++; 1183 dev->stats.tx_errors++;
1185 if (tx_status & 0x10) 1184 if (tx_status & 0x10)
1186 np->stats.tx_fifo_errors++; 1185 dev->stats.tx_fifo_errors++;
1187 if (tx_status & 0x08) 1186 if (tx_status & 0x08)
1188 np->stats.collisions++; 1187 dev->stats.collisions++;
1189 if (tx_status & 0x04) 1188 if (tx_status & 0x04)
1190 np->stats.tx_fifo_errors++; 1189 dev->stats.tx_fifo_errors++;
1191 if (tx_status & 0x02) 1190 if (tx_status & 0x02)
1192 np->stats.tx_window_errors++; 1191 dev->stats.tx_window_errors++;
1193 1192
1194 /* 1193 /*
1195 ** This reset has been verified on 1194 ** This reset has been verified on
@@ -1313,11 +1312,15 @@ static void rx_poll(unsigned long data)
1313 if (netif_msg_rx_err(np)) 1312 if (netif_msg_rx_err(np))
1314 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", 1313 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1315 frame_status); 1314 frame_status);
1316 np->stats.rx_errors++; 1315 dev->stats.rx_errors++;
1317 if (frame_status & 0x00100000) np->stats.rx_length_errors++; 1316 if (frame_status & 0x00100000)
1318 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++; 1317 dev->stats.rx_length_errors++;
1319 if (frame_status & 0x00060000) np->stats.rx_frame_errors++; 1318 if (frame_status & 0x00010000)
1320 if (frame_status & 0x00080000) np->stats.rx_crc_errors++; 1319 dev->stats.rx_fifo_errors++;
1320 if (frame_status & 0x00060000)
1321 dev->stats.rx_frame_errors++;
1322 if (frame_status & 0x00080000)
1323 dev->stats.rx_crc_errors++;
1321 if (frame_status & 0x00100000) { 1324 if (frame_status & 0x00100000) {
1322 printk(KERN_WARNING "%s: Oversized Ethernet frame," 1325 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1323 " status %8.8x.\n", 1326 " status %8.8x.\n",
@@ -1485,22 +1488,22 @@ static struct net_device_stats *get_stats(struct net_device *dev)
1485 the vulnerability window is very small and statistics are 1488 the vulnerability window is very small and statistics are
1486 non-critical. */ 1489 non-critical. */
1487 /* The chip only need report frame silently dropped. */ 1490 /* The chip only need report frame silently dropped. */
1488 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); 1491 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1489 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK); 1492 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1490 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK); 1493 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1491 np->stats.collisions += ioread8(ioaddr + StatsLateColl); 1494 dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
1492 np->stats.collisions += ioread8(ioaddr + StatsMultiColl); 1495 dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1493 np->stats.collisions += ioread8(ioaddr + StatsOneColl); 1496 dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
1494 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); 1497 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1495 ioread8(ioaddr + StatsTxDefer); 1498 ioread8(ioaddr + StatsTxDefer);
1496 for (i = StatsTxDefer; i <= StatsMcastRx; i++) 1499 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1497 ioread8(ioaddr + i); 1500 ioread8(ioaddr + i);
1498 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); 1501 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1499 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; 1502 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1500 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); 1503 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1501 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; 1504 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1502 1505
1503 return &np->stats; 1506 return &dev->stats;
1504} 1507}
1505 1508
1506static void set_rx_mode(struct net_device *dev) 1509static void set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 7f4a9683ba1e..3c2679cd196b 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -948,8 +948,7 @@ static void print_rxfd(struct rxf_desc *rxfd);
948 948
949static void bdx_rxdb_destroy(struct rxdb *db) 949static void bdx_rxdb_destroy(struct rxdb *db)
950{ 950{
951 if (db) 951 vfree(db);
952 vfree(db);
953} 952}
954 953
955static struct rxdb *bdx_rxdb_create(int nelem) 954static struct rxdb *bdx_rxdb_create(int nelem)
@@ -1482,10 +1481,8 @@ static void bdx_tx_db_close(struct txdb *d)
1482{ 1481{
1483 BDX_ASSERT(d == NULL); 1482 BDX_ASSERT(d == NULL);
1484 1483
1485 if (d->start) { 1484 vfree(d->start);
1486 vfree(d->start); 1485 d->start = NULL;
1487 d->start = NULL;
1488 }
1489} 1486}
1490 1487
1491/************************************************************************* 1488/*************************************************************************
@@ -1718,8 +1715,9 @@ static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev)
1718 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); 1715 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1719 1716
1720#endif 1717#endif
1721 ndev->trans_start = jiffies; 1718#ifdef BDX_LLTX
1722 1719 ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1720#endif
1723 priv->net_stats.tx_packets++; 1721 priv->net_stats.tx_packets++;
1724 priv->net_stats.tx_bytes += skb->len; 1722 priv->net_stats.tx_bytes += skb->len;
1725 1723
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index eb65e25989f3..46a3f86125be 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -5021,7 +5021,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5021 /* New SKB is guaranteed to be linear. */ 5021 /* New SKB is guaranteed to be linear. */
5022 entry = *start; 5022 entry = *start;
5023 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE); 5023 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5024 new_addr = skb_shinfo(new_skb)->dma_maps[0]; 5024 new_addr = skb_shinfo(new_skb)->dma_head;
5025 5025
5026 /* Make sure new skb does not cross any 4G boundaries. 5026 /* Make sure new skb does not cross any 4G boundaries.
5027 * Drop the packet if it does. 5027 * Drop the packet if it does.
@@ -5155,7 +5155,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5155 5155
5156 sp = skb_shinfo(skb); 5156 sp = skb_shinfo(skb);
5157 5157
5158 mapping = sp->dma_maps[0]; 5158 mapping = sp->dma_head;
5159 5159
5160 tp->tx_buffers[entry].skb = skb; 5160 tp->tx_buffers[entry].skb = skb;
5161 5161
@@ -5173,7 +5173,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5173 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5173 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5174 5174
5175 len = frag->size; 5175 len = frag->size;
5176 mapping = sp->dma_maps[i + 1]; 5176 mapping = sp->dma_maps[i];
5177 tp->tx_buffers[entry].skb = NULL; 5177 tp->tx_buffers[entry].skb = NULL;
5178 5178
5179 tg3_set_txd(tp, entry, mapping, len, 5179 tg3_set_txd(tp, entry, mapping, len,
@@ -5194,9 +5194,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5194 } 5194 }
5195 5195
5196out_unlock: 5196out_unlock:
5197 mmiowb(); 5197 mmiowb();
5198
5199 dev->trans_start = jiffies;
5200 5198
5201 return NETDEV_TX_OK; 5199 return NETDEV_TX_OK;
5202} 5200}
@@ -5333,7 +5331,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5333 5331
5334 sp = skb_shinfo(skb); 5332 sp = skb_shinfo(skb);
5335 5333
5336 mapping = sp->dma_maps[0]; 5334 mapping = sp->dma_head;
5337 5335
5338 tp->tx_buffers[entry].skb = skb; 5336 tp->tx_buffers[entry].skb = skb;
5339 5337
@@ -5358,7 +5356,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5358 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5356 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5359 5357
5360 len = frag->size; 5358 len = frag->size;
5361 mapping = sp->dma_maps[i + 1]; 5359 mapping = sp->dma_maps[i];
5362 5360
5363 tp->tx_buffers[entry].skb = NULL; 5361 tp->tx_buffers[entry].skb = NULL;
5364 5362
@@ -5407,9 +5405,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5407 } 5405 }
5408 5406
5409out_unlock: 5407out_unlock:
5410 mmiowb(); 5408 mmiowb();
5411
5412 dev->trans_start = jiffies;
5413 5409
5414 return NETDEV_TX_OK; 5410 return NETDEV_TX_OK;
5415} 5411}
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index d913405bc393..1cc8cf4425d1 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -27,6 +27,18 @@ config DE2104X
27 To compile this driver as a module, choose M here. The module will 27 To compile this driver as a module, choose M here. The module will
28 be called de2104x. 28 be called de2104x.
29 29
30config DE2104X_DSL
31 int "Descriptor Skip Length in 32 bit longwords"
32 depends on DE2104X
33 range 0 31
34 default 0
35 help
36 Setting this value allows to align ring buffer descriptors into their
37 own cache lines. Value of 4 corresponds to the typical 32 byte line
38 (the descriptor is 16 bytes). This is necessary on systems that lack
39 cache coherence, an example is PowerMac 5500. Otherwise 0 is safe.
40 Default is 0, and range is 0 to 31.
41
30config TULIP 42config TULIP
31 tristate "DECchip Tulip (dc2114x) PCI support" 43 tristate "DECchip Tulip (dc2114x) PCI support"
32 depends on PCI 44 depends on PCI
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d4c5ecc51f77..e7609a05032d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -82,6 +82,13 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi
82 NETIF_MSG_RX_ERR | \ 82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR) 83 NETIF_MSG_TX_ERR)
84 84
85/* Descriptor skip length in 32 bit longwords. */
86#ifndef CONFIG_DE2104X_DSL
87#define DSL 0
88#else
89#define DSL CONFIG_DE2104X_DSL
90#endif
91
85#define DE_RX_RING_SIZE 64 92#define DE_RX_RING_SIZE 64
86#define DE_TX_RING_SIZE 64 93#define DE_TX_RING_SIZE 64
87#define DE_RING_BYTES \ 94#define DE_RING_BYTES \
@@ -153,6 +160,7 @@ enum {
153 CmdReset = (1 << 0), 160 CmdReset = (1 << 0),
154 CacheAlign16 = 0x00008000, 161 CacheAlign16 = 0x00008000,
155 BurstLen4 = 0x00000400, 162 BurstLen4 = 0x00000400,
163 DescSkipLen = (DSL << 2),
156 164
157 /* Rx/TxPoll bits */ 165 /* Rx/TxPoll bits */
158 NormalTxPoll = (1 << 0), 166 NormalTxPoll = (1 << 0),
@@ -246,7 +254,7 @@ static const u32 de_intr_mask =
246 * Set the programmable burst length to 4 longwords for all: 254 * Set the programmable burst length to 4 longwords for all:
247 * DMA errors result without these values. Cache align 16 long. 255 * DMA errors result without these values. Cache align 16 long.
248 */ 256 */
249static const u32 de_bus_mode = CacheAlign16 | BurstLen4; 257static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
250 258
251struct de_srom_media_block { 259struct de_srom_media_block {
252 u8 opts; 260 u8 opts;
@@ -266,6 +274,9 @@ struct de_desc {
266 __le32 opts2; 274 __le32 opts2;
267 __le32 addr1; 275 __le32 addr1;
268 __le32 addr2; 276 __le32 addr2;
277#if DSL
278 __le32 skip[DSL];
279#endif
269}; 280};
270 281
271struct media_info { 282struct media_info {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4cda69b6b28c..811d3517fce0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -565,9 +565,13 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
565 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) 565 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
566 return -EFAULT; 566 return -EFAULT;
567 567
568 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
569 gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
570 gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
571
568 if (gso.hdr_len > len) 572 if (gso.hdr_len > len)
569 return -EINVAL; 573 return -EINVAL;
570 offset += sizeof(pi); 574 offset += sizeof(gso);
571 } 575 }
572 576
573 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { 577 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -844,12 +848,12 @@ static void tun_sock_write_space(struct sock *sk)
844 if (!sock_writeable(sk)) 848 if (!sock_writeable(sk))
845 return; 849 return;
846 850
847 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
848 wake_up_interruptible_sync(sk->sk_sleep);
849
850 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 851 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
851 return; 852 return;
852 853
854 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
855 wake_up_interruptible_sync(sk->sk_sleep);
856
853 tun = container_of(sk, struct tun_sock, sk)->tun; 857 tun = container_of(sk, struct tun_sock, sk)->tun;
854 kill_fasync(&tun->fasync, SIGIO, POLL_OUT); 858 kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
855} 859}
@@ -1318,21 +1322,22 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1318static int tun_chr_close(struct inode *inode, struct file *file) 1322static int tun_chr_close(struct inode *inode, struct file *file)
1319{ 1323{
1320 struct tun_file *tfile = file->private_data; 1324 struct tun_file *tfile = file->private_data;
1321 struct tun_struct *tun = __tun_get(tfile); 1325 struct tun_struct *tun;
1322 1326
1323 1327
1328 rtnl_lock();
1329 tun = __tun_get(tfile);
1324 if (tun) { 1330 if (tun) {
1325 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name); 1331 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
1326 1332
1327 rtnl_lock();
1328 __tun_detach(tun); 1333 __tun_detach(tun);
1329 1334
1330 /* If desireable, unregister the netdevice. */ 1335 /* If desireable, unregister the netdevice. */
1331 if (!(tun->flags & TUN_PERSIST)) 1336 if (!(tun->flags & TUN_PERSIST))
1332 unregister_netdevice(tun->dev); 1337 unregister_netdevice(tun->dev);
1333 1338
1334 rtnl_unlock();
1335 } 1339 }
1340 rtnl_unlock();
1336 1341
1337 tun = tfile->tun; 1342 tun = tfile->tun;
1338 if (tun) 1343 if (tun)
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 0cf22c4f123b..fd6140bd9aae 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2007 Freescale Semicondutor, Inc. All rights reserved. 2 * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
3 * 3 *
4 * Author: Shlomi Gridish <gridish@freescale.com> 4 * Author: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com> 5 * Li Yang <leoli@freescale.com>
@@ -65,6 +65,8 @@
65 65
66static DEFINE_SPINLOCK(ugeth_lock); 66static DEFINE_SPINLOCK(ugeth_lock);
67 67
68static void uec_configure_serdes(struct net_device *dev);
69
68static struct { 70static struct {
69 u32 msg_enable; 71 u32 msg_enable;
70} debug = { -1 }; 72} debug = { -1 };
@@ -1410,6 +1412,9 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1410 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1412 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1411 upsmr |= UCC_GETH_UPSMR_TBIM; 1413 upsmr |= UCC_GETH_UPSMR_TBIM;
1412 } 1414 }
1415 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
1416 upsmr |= UCC_GETH_UPSMR_SGMM;
1417
1413 out_be32(&uf_regs->upsmr, upsmr); 1418 out_be32(&uf_regs->upsmr, upsmr);
1414 1419
1415 /* Disable autonegotiation in tbi mode, because by default it 1420 /* Disable autonegotiation in tbi mode, because by default it
@@ -1554,6 +1559,9 @@ static int init_phy(struct net_device *dev)
1554 return -ENODEV; 1559 return -ENODEV;
1555 } 1560 }
1556 1561
1562 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
1563 uec_configure_serdes(dev);
1564
1557 phydev->supported &= (ADVERTISED_10baseT_Half | 1565 phydev->supported &= (ADVERTISED_10baseT_Half |
1558 ADVERTISED_10baseT_Full | 1566 ADVERTISED_10baseT_Full |
1559 ADVERTISED_100baseT_Half | 1567 ADVERTISED_100baseT_Half |
@@ -1569,7 +1577,41 @@ static int init_phy(struct net_device *dev)
1569 return 0; 1577 return 0;
1570} 1578}
1571 1579
1580/* Initialize TBI PHY interface for communicating with the
1581 * SERDES lynx PHY on the chip. We communicate with this PHY
1582 * through the MDIO bus on each controller, treating it as a
1583 * "normal" PHY at the address found in the UTBIPA register. We assume
1584 * that the UTBIPA register is valid. Either the MDIO bus code will set
1585 * it to a value that doesn't conflict with other PHYs on the bus, or the
1586 * value doesn't matter, as there are no other PHYs on the bus.
1587 */
1588static void uec_configure_serdes(struct net_device *dev)
1589{
1590 struct ucc_geth_private *ugeth = netdev_priv(dev);
1591
1592 if (!ugeth->tbiphy) {
1593 printk(KERN_WARNING "SGMII mode requires that the device "
1594 "tree specify a tbi-handle\n");
1595 return;
1596 }
1597
1598 /*
1599 * If the link is already up, we must already be ok, and don't need to
1600 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1601 * everything for us? Resetting it takes the link down and requires
1602 * several seconds for it to come back.
1603 */
1604 if (phy_read(ugeth->tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
1605 return;
1606
1607 /* Single clk mode, mii mode off(for serdes communication) */
1608 phy_write(ugeth->tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
1572 1609
1610 phy_write(ugeth->tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1611
1612 phy_write(ugeth->tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
1613
1614}
1573 1615
1574static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) 1616static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1575{ 1617{
@@ -3523,6 +3565,8 @@ static phy_interface_t to_phy_interface(const char *phy_connection_type)
3523 return PHY_INTERFACE_MODE_RGMII_RXID; 3565 return PHY_INTERFACE_MODE_RGMII_RXID;
3524 if (strcasecmp(phy_connection_type, "rtbi") == 0) 3566 if (strcasecmp(phy_connection_type, "rtbi") == 0)
3525 return PHY_INTERFACE_MODE_RTBI; 3567 return PHY_INTERFACE_MODE_RTBI;
3568 if (strcasecmp(phy_connection_type, "sgmii") == 0)
3569 return PHY_INTERFACE_MODE_SGMII;
3526 3570
3527 return PHY_INTERFACE_MODE_MII; 3571 return PHY_INTERFACE_MODE_MII;
3528} 3572}
@@ -3567,6 +3611,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3567 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, 3611 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
3568 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, 3612 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
3569 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, 3613 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
3614 PHY_INTERFACE_MODE_SGMII,
3570 }; 3615 };
3571 3616
3572 ugeth_vdbg("%s: IN", __func__); 3617 ugeth_vdbg("%s: IN", __func__);
@@ -3682,6 +3727,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3682 case PHY_INTERFACE_MODE_RGMII_TXID: 3727 case PHY_INTERFACE_MODE_RGMII_TXID:
3683 case PHY_INTERFACE_MODE_TBI: 3728 case PHY_INTERFACE_MODE_TBI:
3684 case PHY_INTERFACE_MODE_RTBI: 3729 case PHY_INTERFACE_MODE_RTBI:
3730 case PHY_INTERFACE_MODE_SGMII:
3685 max_speed = SPEED_1000; 3731 max_speed = SPEED_1000;
3686 break; 3732 break;
3687 default: 3733 default:
@@ -3756,6 +3802,37 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3756 ugeth->ndev = dev; 3802 ugeth->ndev = dev;
3757 ugeth->node = np; 3803 ugeth->node = np;
3758 3804
3805 /* Find the TBI PHY. If it's not there, we don't support SGMII */
3806 ph = of_get_property(np, "tbi-handle", NULL);
3807 if (ph) {
3808 struct device_node *tbi = of_find_node_by_phandle(*ph);
3809 struct of_device *ofdev;
3810 struct mii_bus *bus;
3811 const unsigned int *id;
3812
3813 if (!tbi)
3814 return 0;
3815
3816 mdio = of_get_parent(tbi);
3817 if (!mdio)
3818 return 0;
3819
3820 ofdev = of_find_device_by_node(mdio);
3821
3822 of_node_put(mdio);
3823
3824 id = of_get_property(tbi, "reg", NULL);
3825 if (!id)
3826 return 0;
3827 of_node_put(tbi);
3828
3829 bus = dev_get_drvdata(&ofdev->dev);
3830 if (!bus)
3831 return 0;
3832
3833 ugeth->tbiphy = bus->phy_map[*id];
3834 }
3835
3759 return 0; 3836 return 0;
3760} 3837}
3761 3838
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index dca628a922ba..deb962bb68ef 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. 2 * Copyright (C) Freescale Semicondutor, Inc. 2006-2009. All rights reserved.
3 * 3 *
4 * Author: Shlomi Gridish <gridish@freescale.com> 4 * Author: Shlomi Gridish <gridish@freescale.com>
5 * 5 *
@@ -193,6 +193,31 @@ struct ucc_geth {
193#define ENET_TBI_MII_JD 0x10 /* Jitter diagnostics */ 193#define ENET_TBI_MII_JD 0x10 /* Jitter diagnostics */
194#define ENET_TBI_MII_TBICON 0x11 /* TBI control */ 194#define ENET_TBI_MII_TBICON 0x11 /* TBI control */
195 195
196/* TBI MDIO register bit fields*/
197#define TBISR_LSTATUS 0x0004
198#define TBICON_CLK_SELECT 0x0020
199#define TBIANA_ASYMMETRIC_PAUSE 0x0100
200#define TBIANA_SYMMETRIC_PAUSE 0x0080
201#define TBIANA_HALF_DUPLEX 0x0040
202#define TBIANA_FULL_DUPLEX 0x0020
203#define TBICR_PHY_RESET 0x8000
204#define TBICR_ANEG_ENABLE 0x1000
205#define TBICR_RESTART_ANEG 0x0200
206#define TBICR_FULL_DUPLEX 0x0100
207#define TBICR_SPEED1_SET 0x0040
208
209#define TBIANA_SETTINGS ( \
210 TBIANA_ASYMMETRIC_PAUSE \
211 | TBIANA_SYMMETRIC_PAUSE \
212 | TBIANA_FULL_DUPLEX \
213 )
214#define TBICR_SETTINGS ( \
215 TBICR_PHY_RESET \
216 | TBICR_ANEG_ENABLE \
217 | TBICR_FULL_DUPLEX \
218 | TBICR_SPEED1_SET \
219 )
220
196/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ 221/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
197#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control 222#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
198 Rx */ 223 Rx */
@@ -1188,6 +1213,7 @@ struct ucc_geth_private {
1188 1213
1189 struct ugeth_mii_info *mii_info; 1214 struct ugeth_mii_info *mii_info;
1190 struct phy_device *phydev; 1215 struct phy_device *phydev;
1216 struct phy_device *tbiphy;
1191 phy_interface_t phy_interface; 1217 phy_interface_t phy_interface;
1192 int max_speed; 1218 int max_speed;
1193 uint32_t msg_enable; 1219 uint32_t msg_enable;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 837135f0390a..e3580f42c899 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -899,15 +899,14 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
899 continue; 899 continue;
900 } 900 }
901 /* Allocate an sk_buff */ 901 /* Allocate an sk_buff */
902 odev->skb_rx_buf = dev_alloc_skb(frame_len); 902 odev->skb_rx_buf = netdev_alloc_skb(odev->net,
903 frame_len);
903 if (!odev->skb_rx_buf) { 904 if (!odev->skb_rx_buf) {
904 /* We got no receive buffer. */ 905 /* We got no receive buffer. */
905 D1("could not allocate memory"); 906 D1("could not allocate memory");
906 odev->rx_parse_state = WAIT_SYNC; 907 odev->rx_parse_state = WAIT_SYNC;
907 return; 908 return;
908 } 909 }
909 /* Here's where it came from */
910 odev->skb_rx_buf->dev = odev->net;
911 910
912 /* Copy what we got so far. make room for iphdr 911 /* Copy what we got so far. make room for iphdr
913 * after tail. */ 912 * after tail. */
@@ -2481,10 +2480,10 @@ static int add_net_device(struct hso_device *hso_dev)
2481 return 0; 2480 return 0;
2482} 2481}
2483 2482
2484static int hso_radio_toggle(void *data, enum rfkill_state state) 2483static int hso_rfkill_set_block(void *data, bool blocked)
2485{ 2484{
2486 struct hso_device *hso_dev = data; 2485 struct hso_device *hso_dev = data;
2487 int enabled = (state == RFKILL_STATE_UNBLOCKED); 2486 int enabled = !blocked;
2488 int rv; 2487 int rv;
2489 2488
2490 mutex_lock(&hso_dev->mutex); 2489 mutex_lock(&hso_dev->mutex);
@@ -2498,6 +2497,10 @@ static int hso_radio_toggle(void *data, enum rfkill_state state)
2498 return rv; 2497 return rv;
2499} 2498}
2500 2499
2500static const struct rfkill_ops hso_rfkill_ops = {
2501 .set_block = hso_rfkill_set_block,
2502};
2503
2501/* Creates and sets up everything for rfkill */ 2504/* Creates and sets up everything for rfkill */
2502static void hso_create_rfkill(struct hso_device *hso_dev, 2505static void hso_create_rfkill(struct hso_device *hso_dev,
2503 struct usb_interface *interface) 2506 struct usb_interface *interface)
@@ -2506,29 +2509,25 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
2506 struct device *dev = &hso_net->net->dev; 2509 struct device *dev = &hso_net->net->dev;
2507 char *rfkn; 2510 char *rfkn;
2508 2511
2509 hso_net->rfkill = rfkill_allocate(&interface_to_usbdev(interface)->dev,
2510 RFKILL_TYPE_WWAN);
2511 if (!hso_net->rfkill) {
2512 dev_err(dev, "%s - Out of memory\n", __func__);
2513 return;
2514 }
2515 rfkn = kzalloc(20, GFP_KERNEL); 2512 rfkn = kzalloc(20, GFP_KERNEL);
2516 if (!rfkn) { 2513 if (!rfkn)
2517 rfkill_free(hso_net->rfkill);
2518 hso_net->rfkill = NULL;
2519 dev_err(dev, "%s - Out of memory\n", __func__); 2514 dev_err(dev, "%s - Out of memory\n", __func__);
2520 return; 2515
2521 }
2522 snprintf(rfkn, 20, "hso-%d", 2516 snprintf(rfkn, 20, "hso-%d",
2523 interface->altsetting->desc.bInterfaceNumber); 2517 interface->altsetting->desc.bInterfaceNumber);
2524 hso_net->rfkill->name = rfkn; 2518
2525 hso_net->rfkill->state = RFKILL_STATE_UNBLOCKED; 2519 hso_net->rfkill = rfkill_alloc(rfkn,
2526 hso_net->rfkill->data = hso_dev; 2520 &interface_to_usbdev(interface)->dev,
2527 hso_net->rfkill->toggle_radio = hso_radio_toggle; 2521 RFKILL_TYPE_WWAN,
2522 &hso_rfkill_ops, hso_dev);
2523 if (!hso_net->rfkill) {
2524 dev_err(dev, "%s - Out of memory\n", __func__);
2525 kfree(rfkn);
2526 return;
2527 }
2528 if (rfkill_register(hso_net->rfkill) < 0) { 2528 if (rfkill_register(hso_net->rfkill) < 0) {
2529 rfkill_destroy(hso_net->rfkill);
2529 kfree(rfkn); 2530 kfree(rfkn);
2530 hso_net->rfkill->name = NULL;
2531 rfkill_free(hso_net->rfkill);
2532 hso_net->rfkill = NULL; 2531 hso_net->rfkill = NULL;
2533 dev_err(dev, "%s - Failed to register rfkill\n", __func__); 2532 dev_err(dev, "%s - Failed to register rfkill\n", __func__);
2534 return; 2533 return;
@@ -3165,8 +3164,10 @@ static void hso_free_interface(struct usb_interface *interface)
3165 hso_stop_net_device(network_table[i]); 3164 hso_stop_net_device(network_table[i]);
3166 cancel_work_sync(&network_table[i]->async_put_intf); 3165 cancel_work_sync(&network_table[i]->async_put_intf);
3167 cancel_work_sync(&network_table[i]->async_get_intf); 3166 cancel_work_sync(&network_table[i]->async_get_intf);
3168 if (rfk) 3167 if (rfk) {
3169 rfkill_unregister(rfk); 3168 rfkill_unregister(rfk);
3169 rfkill_destroy(rfk);
3170 }
3170 hso_free_net_device(network_table[i]); 3171 hso_free_net_device(network_table[i]);
3171 } 3172 }
3172 } 3173 }
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index f9fb454ffa8b..fcc6fa0905d1 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -221,7 +221,8 @@ static void ctrl_callback(struct urb *urb)
221 case -ENOENT: 221 case -ENOENT:
222 break; 222 break;
223 default: 223 default:
224 dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status); 224 if (printk_ratelimit())
225 dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status);
225 } 226 }
226 dev = urb->context; 227 dev = urb->context;
227 clear_bit(RX_REG_SET, &dev->flags); 228 clear_bit(RX_REG_SET, &dev->flags);
@@ -442,10 +443,12 @@ static void read_bulk_callback(struct urb *urb)
442 case -ENOENT: 443 case -ENOENT:
443 return; /* the urb is in unlink state */ 444 return; /* the urb is in unlink state */
444 case -ETIME: 445 case -ETIME:
445 dev_warn(&urb->dev->dev, "may be reset is needed?..\n"); 446 if (printk_ratelimit())
447 dev_warn(&urb->dev->dev, "may be reset is needed?..\n");
446 goto goon; 448 goto goon;
447 default: 449 default:
448 dev_warn(&urb->dev->dev, "Rx status %d\n", status); 450 if (printk_ratelimit())
451 dev_warn(&urb->dev->dev, "Rx status %d\n", status);
449 goto goon; 452 goto goon;
450 } 453 }
451 454
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8e56fcf0a0e3..87197dd9c788 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -176,8 +176,6 @@ static int veth_xmit(struct sk_buff *skb, struct net_device *dev)
176 if (dev->features & NETIF_F_NO_CSUM) 176 if (dev->features & NETIF_F_NO_CSUM)
177 skb->ip_summed = rcv_priv->ip_summed; 177 skb->ip_summed = rcv_priv->ip_summed;
178 178
179 dst_release(skb->dst);
180 skb->dst = NULL;
181 skb->mark = 0; 179 skb->mark = 0;
182 secpath_reset(skb); 180 secpath_reset(skb);
183 nf_reset(skb); 181 nf_reset(skb);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 45daba726b66..d3489a3c4c03 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -388,7 +388,6 @@ struct rhine_private {
388 long pioaddr; 388 long pioaddr;
389 struct net_device *dev; 389 struct net_device *dev;
390 struct napi_struct napi; 390 struct napi_struct napi;
391 struct net_device_stats stats;
392 spinlock_t lock; 391 spinlock_t lock;
393 392
394 /* Frequently used values: keep some adjacent for cache effect. */ 393 /* Frequently used values: keep some adjacent for cache effect. */
@@ -1209,7 +1208,7 @@ static void rhine_tx_timeout(struct net_device *dev)
1209 enable_irq(rp->pdev->irq); 1208 enable_irq(rp->pdev->irq);
1210 1209
1211 dev->trans_start = jiffies; 1210 dev->trans_start = jiffies;
1212 rp->stats.tx_errors++; 1211 dev->stats.tx_errors++;
1213 netif_wake_queue(dev); 1212 netif_wake_queue(dev);
1214} 1213}
1215 1214
@@ -1237,7 +1236,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1237 /* packet too long, drop it */ 1236 /* packet too long, drop it */
1238 dev_kfree_skb(skb); 1237 dev_kfree_skb(skb);
1239 rp->tx_skbuff[entry] = NULL; 1238 rp->tx_skbuff[entry] = NULL;
1240 rp->stats.tx_dropped++; 1239 dev->stats.tx_dropped++;
1241 return 0; 1240 return 0;
1242 } 1241 }
1243 1242
@@ -1378,29 +1377,33 @@ static void rhine_tx(struct net_device *dev)
1378 printk(KERN_DEBUG "%s: Transmit error, " 1377 printk(KERN_DEBUG "%s: Transmit error, "
1379 "Tx status %8.8x.\n", 1378 "Tx status %8.8x.\n",
1380 dev->name, txstatus); 1379 dev->name, txstatus);
1381 rp->stats.tx_errors++; 1380 dev->stats.tx_errors++;
1382 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++; 1381 if (txstatus & 0x0400)
1383 if (txstatus & 0x0200) rp->stats.tx_window_errors++; 1382 dev->stats.tx_carrier_errors++;
1384 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++; 1383 if (txstatus & 0x0200)
1385 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++; 1384 dev->stats.tx_window_errors++;
1385 if (txstatus & 0x0100)
1386 dev->stats.tx_aborted_errors++;
1387 if (txstatus & 0x0080)
1388 dev->stats.tx_heartbeat_errors++;
1386 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || 1389 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1387 (txstatus & 0x0800) || (txstatus & 0x1000)) { 1390 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1388 rp->stats.tx_fifo_errors++; 1391 dev->stats.tx_fifo_errors++;
1389 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1392 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1390 break; /* Keep the skb - we try again */ 1393 break; /* Keep the skb - we try again */
1391 } 1394 }
1392 /* Transmitter restarted in 'abnormal' handler. */ 1395 /* Transmitter restarted in 'abnormal' handler. */
1393 } else { 1396 } else {
1394 if (rp->quirks & rqRhineI) 1397 if (rp->quirks & rqRhineI)
1395 rp->stats.collisions += (txstatus >> 3) & 0x0F; 1398 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1396 else 1399 else
1397 rp->stats.collisions += txstatus & 0x0F; 1400 dev->stats.collisions += txstatus & 0x0F;
1398 if (debug > 6) 1401 if (debug > 6)
1399 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n", 1402 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1400 (txstatus >> 3) & 0xF, 1403 (txstatus >> 3) & 0xF,
1401 txstatus & 0xF); 1404 txstatus & 0xF);
1402 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len; 1405 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1403 rp->stats.tx_packets++; 1406 dev->stats.tx_packets++;
1404 } 1407 }
1405 /* Free the original skb. */ 1408 /* Free the original skb. */
1406 if (rp->tx_skbuff_dma[entry]) { 1409 if (rp->tx_skbuff_dma[entry]) {
@@ -1455,21 +1458,24 @@ static int rhine_rx(struct net_device *dev, int limit)
1455 printk(KERN_WARNING "%s: Oversized Ethernet " 1458 printk(KERN_WARNING "%s: Oversized Ethernet "
1456 "frame %p vs %p.\n", dev->name, 1459 "frame %p vs %p.\n", dev->name,
1457 rp->rx_head_desc, &rp->rx_ring[entry]); 1460 rp->rx_head_desc, &rp->rx_ring[entry]);
1458 rp->stats.rx_length_errors++; 1461 dev->stats.rx_length_errors++;
1459 } else if (desc_status & RxErr) { 1462 } else if (desc_status & RxErr) {
1460 /* There was a error. */ 1463 /* There was a error. */
1461 if (debug > 2) 1464 if (debug > 2)
1462 printk(KERN_DEBUG "rhine_rx() Rx " 1465 printk(KERN_DEBUG "rhine_rx() Rx "
1463 "error was %8.8x.\n", 1466 "error was %8.8x.\n",
1464 desc_status); 1467 desc_status);
1465 rp->stats.rx_errors++; 1468 dev->stats.rx_errors++;
1466 if (desc_status & 0x0030) rp->stats.rx_length_errors++; 1469 if (desc_status & 0x0030)
1467 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++; 1470 dev->stats.rx_length_errors++;
1468 if (desc_status & 0x0004) rp->stats.rx_frame_errors++; 1471 if (desc_status & 0x0048)
1472 dev->stats.rx_fifo_errors++;
1473 if (desc_status & 0x0004)
1474 dev->stats.rx_frame_errors++;
1469 if (desc_status & 0x0002) { 1475 if (desc_status & 0x0002) {
1470 /* this can also be updated outside the interrupt handler */ 1476 /* this can also be updated outside the interrupt handler */
1471 spin_lock(&rp->lock); 1477 spin_lock(&rp->lock);
1472 rp->stats.rx_crc_errors++; 1478 dev->stats.rx_crc_errors++;
1473 spin_unlock(&rp->lock); 1479 spin_unlock(&rp->lock);
1474 } 1480 }
1475 } 1481 }
@@ -1513,8 +1519,8 @@ static int rhine_rx(struct net_device *dev, int limit)
1513 } 1519 }
1514 skb->protocol = eth_type_trans(skb, dev); 1520 skb->protocol = eth_type_trans(skb, dev);
1515 netif_receive_skb(skb); 1521 netif_receive_skb(skb);
1516 rp->stats.rx_bytes += pkt_len; 1522 dev->stats.rx_bytes += pkt_len;
1517 rp->stats.rx_packets++; 1523 dev->stats.rx_packets++;
1518 } 1524 }
1519 entry = (++rp->cur_rx) % RX_RING_SIZE; 1525 entry = (++rp->cur_rx) % RX_RING_SIZE;
1520 rp->rx_head_desc = &rp->rx_ring[entry]; 1526 rp->rx_head_desc = &rp->rx_ring[entry];
@@ -1599,8 +1605,8 @@ static void rhine_error(struct net_device *dev, int intr_status)
1599 if (intr_status & IntrLinkChange) 1605 if (intr_status & IntrLinkChange)
1600 rhine_check_media(dev, 0); 1606 rhine_check_media(dev, 0);
1601 if (intr_status & IntrStatsMax) { 1607 if (intr_status & IntrStatsMax) {
1602 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 1608 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1603 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 1609 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1604 clear_tally_counters(ioaddr); 1610 clear_tally_counters(ioaddr);
1605 } 1611 }
1606 if (intr_status & IntrTxAborted) { 1612 if (intr_status & IntrTxAborted) {
@@ -1654,12 +1660,12 @@ static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1654 unsigned long flags; 1660 unsigned long flags;
1655 1661
1656 spin_lock_irqsave(&rp->lock, flags); 1662 spin_lock_irqsave(&rp->lock, flags);
1657 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 1663 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1658 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 1664 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1659 clear_tally_counters(ioaddr); 1665 clear_tally_counters(ioaddr);
1660 spin_unlock_irqrestore(&rp->lock, flags); 1666 spin_unlock_irqrestore(&rp->lock, flags);
1661 1667
1662 return &rp->stats; 1668 return &dev->stats;
1663} 1669}
1664 1670
1665static void rhine_set_rx_mode(struct net_device *dev) 1671static void rhine_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 754a4b182c1d..e2a7725e567e 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1385,7 +1385,7 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1385 1385
1386static int velocity_rx_srv(struct velocity_info *vptr, int status) 1386static int velocity_rx_srv(struct velocity_info *vptr, int status)
1387{ 1387{
1388 struct net_device_stats *stats = &vptr->stats; 1388 struct net_device_stats *stats = &vptr->dev->stats;
1389 int rd_curr = vptr->rx.curr; 1389 int rd_curr = vptr->rx.curr;
1390 int works = 0; 1390 int works = 0;
1391 1391
@@ -1519,7 +1519,7 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
1519static int velocity_receive_frame(struct velocity_info *vptr, int idx) 1519static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1520{ 1520{
1521 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 1521 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
1522 struct net_device_stats *stats = &vptr->stats; 1522 struct net_device_stats *stats = &vptr->dev->stats;
1523 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 1523 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1524 struct rx_desc *rd = &(vptr->rx.ring[idx]); 1524 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1525 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 1525 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
@@ -1532,7 +1532,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1532 } 1532 }
1533 1533
1534 if (rd->rdesc0.RSR & RSR_MAR) 1534 if (rd->rdesc0.RSR & RSR_MAR)
1535 vptr->stats.multicast++; 1535 stats->multicast++;
1536 1536
1537 skb = rd_info->skb; 1537 skb = rd_info->skb;
1538 1538
@@ -1634,7 +1634,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1634 int idx; 1634 int idx;
1635 int works = 0; 1635 int works = 0;
1636 struct velocity_td_info *tdinfo; 1636 struct velocity_td_info *tdinfo;
1637 struct net_device_stats *stats = &vptr->stats; 1637 struct net_device_stats *stats = &vptr->dev->stats;
1638 1638
1639 for (qnum = 0; qnum < vptr->tx.numq; qnum++) { 1639 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1640 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; 1640 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
@@ -2324,22 +2324,22 @@ static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2324 2324
2325 /* If the hardware is down, don't touch MII */ 2325 /* If the hardware is down, don't touch MII */
2326 if(!netif_running(dev)) 2326 if(!netif_running(dev))
2327 return &vptr->stats; 2327 return &dev->stats;
2328 2328
2329 spin_lock_irq(&vptr->lock); 2329 spin_lock_irq(&vptr->lock);
2330 velocity_update_hw_mibs(vptr); 2330 velocity_update_hw_mibs(vptr);
2331 spin_unlock_irq(&vptr->lock); 2331 spin_unlock_irq(&vptr->lock);
2332 2332
2333 vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; 2333 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2334 vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; 2334 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2335 vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; 2335 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2336 2336
2337// unsigned long rx_dropped; /* no space in linux buffers */ 2337// unsigned long rx_dropped; /* no space in linux buffers */
2338 vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; 2338 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2339 /* detailed rx_errors: */ 2339 /* detailed rx_errors: */
2340// unsigned long rx_length_errors; 2340// unsigned long rx_length_errors;
2341// unsigned long rx_over_errors; /* receiver ring buff overflow */ 2341// unsigned long rx_over_errors; /* receiver ring buff overflow */
2342 vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; 2342 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2343// unsigned long rx_frame_errors; /* recv'd frame alignment error */ 2343// unsigned long rx_frame_errors; /* recv'd frame alignment error */
2344// unsigned long rx_fifo_errors; /* recv'r fifo overrun */ 2344// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
2345// unsigned long rx_missed_errors; /* receiver missed packet */ 2345// unsigned long rx_missed_errors; /* receiver missed packet */
@@ -2347,7 +2347,7 @@ static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2347 /* detailed tx_errors */ 2347 /* detailed tx_errors */
2348// unsigned long tx_fifo_errors; 2348// unsigned long tx_fifo_errors;
2349 2349
2350 return &vptr->stats; 2350 return &dev->stats;
2351} 2351}
2352 2352
2353 2353
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index ea43e1832afb..4cd3f6c97379 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1503,7 +1503,6 @@ struct velocity_info {
1503 1503
1504 struct pci_dev *pdev; 1504 struct pci_dev *pdev;
1505 struct net_device *dev; 1505 struct net_device *dev;
1506 struct net_device_stats stats;
1507 1506
1508 struct vlan_group *vlgrp; 1507 struct vlan_group *vlgrp;
1509 u8 ip_addr[4]; 1508 u8 ip_addr[4];
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6cc5bcd34fb0..09bd4410fa65 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -470,7 +470,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
470 } 470 }
471 471
472 if (skb_is_gso(skb)) { 472 if (skb_is_gso(skb)) {
473 hdr->hdr_len = skb_transport_header(skb) - skb->data; 473 hdr->hdr_len = skb_headlen(skb);
474 hdr->gso_size = skb_shinfo(skb)->gso_size; 474 hdr->gso_size = skb_shinfo(skb)->gso_size;
475 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 475 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
476 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 476 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
@@ -680,6 +680,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
680 u8 promisc, allmulti; 680 u8 promisc, allmulti;
681 struct virtio_net_ctrl_mac *mac_data; 681 struct virtio_net_ctrl_mac *mac_data;
682 struct dev_addr_list *addr; 682 struct dev_addr_list *addr;
683 struct netdev_hw_addr *ha;
683 void *buf; 684 void *buf;
684 int i; 685 int i;
685 686
@@ -718,9 +719,9 @@ static void virtnet_set_rx_mode(struct net_device *dev)
718 719
719 /* Store the unicast list and count in the front of the buffer */ 720 /* Store the unicast list and count in the front of the buffer */
720 mac_data->entries = dev->uc_count; 721 mac_data->entries = dev->uc_count;
721 addr = dev->uc_list; 722 i = 0;
722 for (i = 0; i < dev->uc_count; i++, addr = addr->next) 723 list_for_each_entry(ha, &dev->uc_list, list)
723 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN); 724 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
724 725
725 sg_set_buf(&sg[0], mac_data, 726 sg_set_buf(&sg[0], mac_data,
726 sizeof(mac_data->entries) + (dev->uc_count * ETH_ALEN)); 727 sizeof(mac_data->entries) + (dev->uc_count * ETH_ALEN));
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 6b41c884a337..26cde573af43 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -1884,17 +1884,13 @@ void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1884 mempool->memblock_size, dma_object); 1884 mempool->memblock_size, dma_object);
1885 } 1885 }
1886 1886
1887 if (mempool->items_arr) 1887 vfree(mempool->items_arr);
1888 vfree(mempool->items_arr);
1889 1888
1890 if (mempool->memblocks_dma_arr) 1889 vfree(mempool->memblocks_dma_arr);
1891 vfree(mempool->memblocks_dma_arr);
1892 1890
1893 if (mempool->memblocks_priv_arr) 1891 vfree(mempool->memblocks_priv_arr);
1894 vfree(mempool->memblocks_priv_arr);
1895 1892
1896 if (mempool->memblocks_arr) 1893 vfree(mempool->memblocks_arr);
1897 vfree(mempool->memblocks_arr);
1898 1894
1899 vfree(mempool); 1895 vfree(mempool);
1900} 1896}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index b7f08f3e524b..6c838b3e063a 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -677,7 +677,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
677 return VXGE_HW_OK; 677 return VXGE_HW_OK;
678} 678}
679 679
680/* select a vpath to trasmit the packet */ 680/* select a vpath to transmit the packet */
681static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb, 681static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
682 int *do_lock) 682 int *do_lock)
683{ 683{
@@ -992,7 +992,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
992 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); 992 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
993 993
994 vxge_hw_fifo_txdl_post(fifo_hw, dtr); 994 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
995 dev->trans_start = jiffies; 995#ifdef NETIF_F_LLTX
996 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
997#endif
996 spin_unlock_irqrestore(&fifo->tx_lock, flags); 998 spin_unlock_irqrestore(&fifo->tx_lock, flags);
997 999
998 VXGE_COMPLETE_VPATH_TX(fifo); 1000 VXGE_COMPLETE_VPATH_TX(fifo);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 765a7f5d6aa4..08b1a284b690 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -731,8 +731,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
731 dma_unmap_single(&dev->dev, desc->data, 731 dma_unmap_single(&dev->dev, desc->data,
732 RX_SIZE, DMA_FROM_DEVICE); 732 RX_SIZE, DMA_FROM_DEVICE);
733#else 733#else
734 dma_sync_single(&dev->dev, desc->data, 734 dma_sync_single_for_cpu(&dev->dev, desc->data,
735 RX_SIZE, DMA_FROM_DEVICE); 735 RX_SIZE, DMA_FROM_DEVICE);
736 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], 736 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
737 ALIGN(desc->pkt_len, 4) / 4); 737 ALIGN(desc->pkt_len, 4) / 4);
738#endif 738#endif
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index b3cadb626fe0..bd193ae2178b 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -292,8 +292,6 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
292 292
293 d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state); 293 d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state);
294 294
295 if (unlikely(i2400m->ready == 0)) /* act if up */
296 goto out;
297 if (i2400m->state != i2400m_state) { 295 if (i2400m->state != i2400m_state) {
298 i2400m->state = i2400m_state; 296 i2400m->state = i2400m_state;
299 wake_up_all(&i2400m->state_wq); 297 wake_up_all(&i2400m->state_wq);
@@ -341,7 +339,6 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
341 i2400m->bus_reset(i2400m, I2400M_RT_WARM); 339 i2400m->bus_reset(i2400m, I2400M_RT_WARM);
342 break; 340 break;
343 }; 341 };
344out:
345 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n", 342 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
346 i2400m, ss, i2400m_state); 343 i2400m, ss, i2400m_state);
347} 344}
@@ -372,8 +369,6 @@ void i2400m_report_tlv_media_status(struct i2400m *i2400m,
372 369
373 d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status); 370 d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status);
374 371
375 if (unlikely(i2400m->ready == 0)) /* act if up */
376 goto out;
377 switch (status) { 372 switch (status) {
378 case I2400M_MEDIA_STATUS_LINK_UP: 373 case I2400M_MEDIA_STATUS_LINK_UP:
379 netif_carrier_on(net_dev); 374 netif_carrier_on(net_dev);
@@ -393,14 +388,59 @@ void i2400m_report_tlv_media_status(struct i2400m *i2400m,
393 dev_err(dev, "HW BUG? unknown media status %u\n", 388 dev_err(dev, "HW BUG? unknown media status %u\n",
394 status); 389 status);
395 }; 390 };
396out:
397 d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n", 391 d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
398 i2400m, ms, status); 392 i2400m, ms, status);
399} 393}
400 394
401 395
402/* 396/*
403 * Parse a 'state report' and extract carrier on/off information 397 * Process a TLV from a 'state report'
398 *
399 * @i2400m: device descriptor
400 * @tlv: pointer to the TLV header; it has been already validated for
401 * consistent size.
402 * @tag: for error messages
403 *
404 * Act on the TLVs from a 'state report'.
405 */
406static
407void i2400m_report_state_parse_tlv(struct i2400m *i2400m,
408 const struct i2400m_tlv_hdr *tlv,
409 const char *tag)
410{
411 struct device *dev = i2400m_dev(i2400m);
412 const struct i2400m_tlv_media_status *ms;
413 const struct i2400m_tlv_system_state *ss;
414 const struct i2400m_tlv_rf_switches_status *rfss;
415
416 if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, sizeof(*ss))) {
417 ss = container_of(tlv, typeof(*ss), hdr);
418 d_printf(2, dev, "%s: system state TLV "
419 "found (0x%04x), state 0x%08x\n",
420 tag, I2400M_TLV_SYSTEM_STATE,
421 le32_to_cpu(ss->state));
422 i2400m_report_tlv_system_state(i2400m, ss);
423 }
424 if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS, sizeof(*rfss))) {
425 rfss = container_of(tlv, typeof(*rfss), hdr);
426 d_printf(2, dev, "%s: RF status TLV "
427 "found (0x%04x), sw 0x%02x hw 0x%02x\n",
428 tag, I2400M_TLV_RF_STATUS,
429 le32_to_cpu(rfss->sw_rf_switch),
430 le32_to_cpu(rfss->hw_rf_switch));
431 i2400m_report_tlv_rf_switches_status(i2400m, rfss);
432 }
433 if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS, sizeof(*ms))) {
434 ms = container_of(tlv, typeof(*ms), hdr);
435 d_printf(2, dev, "%s: Media Status TLV: %u\n",
436 tag, le32_to_cpu(ms->media_status));
437 i2400m_report_tlv_media_status(i2400m, ms);
438 }
439}
440
441
442/*
443 * Parse a 'state report' and extract information
404 * 444 *
405 * @i2400m: device descriptor 445 * @i2400m: device descriptor
406 * @l3l4_hdr: pointer to message; it has been already validated for 446 * @l3l4_hdr: pointer to message; it has been already validated for
@@ -409,13 +449,7 @@ out:
409 * declaration is assumed to be congruent with @size (as in 449 * declaration is assumed to be congruent with @size (as in
410 * sizeof(*l3l4_hdr) + l3l4_hdr->length == size) 450 * sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
411 * 451 *
412 * Extract from the report state the system state TLV and infer from 452 * Walk over the TLVs in a report state and act on them.
413 * there if we have a carrier or not. Update our local state and tell
414 * netdev.
415 *
416 * When setting the carrier, it's fine to set OFF twice (for example),
417 * as netif_carrier_off() will not generate two OFF events (just on
418 * the transitions).
419 */ 453 */
420static 454static
421void i2400m_report_state_hook(struct i2400m *i2400m, 455void i2400m_report_state_hook(struct i2400m *i2400m,
@@ -424,9 +458,6 @@ void i2400m_report_state_hook(struct i2400m *i2400m,
424{ 458{
425 struct device *dev = i2400m_dev(i2400m); 459 struct device *dev = i2400m_dev(i2400m);
426 const struct i2400m_tlv_hdr *tlv; 460 const struct i2400m_tlv_hdr *tlv;
427 const struct i2400m_tlv_system_state *ss;
428 const struct i2400m_tlv_rf_switches_status *rfss;
429 const struct i2400m_tlv_media_status *ms;
430 size_t tlv_size = le16_to_cpu(l3l4_hdr->length); 461 size_t tlv_size = le16_to_cpu(l3l4_hdr->length);
431 462
432 d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n", 463 d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n",
@@ -434,34 +465,8 @@ void i2400m_report_state_hook(struct i2400m *i2400m,
434 tlv = NULL; 465 tlv = NULL;
435 466
436 while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl, 467 while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl,
437 tlv_size, tlv))) { 468 tlv_size, tlv)))
438 if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, 469 i2400m_report_state_parse_tlv(i2400m, tlv, tag);
439 sizeof(*ss))) {
440 ss = container_of(tlv, typeof(*ss), hdr);
441 d_printf(2, dev, "%s: system state TLV "
442 "found (0x%04x), state 0x%08x\n",
443 tag, I2400M_TLV_SYSTEM_STATE,
444 le32_to_cpu(ss->state));
445 i2400m_report_tlv_system_state(i2400m, ss);
446 }
447 if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS,
448 sizeof(*rfss))) {
449 rfss = container_of(tlv, typeof(*rfss), hdr);
450 d_printf(2, dev, "%s: RF status TLV "
451 "found (0x%04x), sw 0x%02x hw 0x%02x\n",
452 tag, I2400M_TLV_RF_STATUS,
453 le32_to_cpu(rfss->sw_rf_switch),
454 le32_to_cpu(rfss->hw_rf_switch));
455 i2400m_report_tlv_rf_switches_status(i2400m, rfss);
456 }
457 if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS,
458 sizeof(*ms))) {
459 ms = container_of(tlv, typeof(*ms), hdr);
460 d_printf(2, dev, "%s: Media Status TLV: %u\n",
461 tag, le32_to_cpu(ms->media_status));
462 i2400m_report_tlv_media_status(i2400m, ms);
463 }
464 }
465 d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n", 470 d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n",
466 i2400m, l3l4_hdr, size, tag); 471 i2400m, l3l4_hdr, size, tag);
467} 472}
@@ -721,6 +726,8 @@ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
721 ack_timeout = HZ; 726 ack_timeout = HZ;
722 }; 727 };
723 728
729 if (unlikely(i2400m->trace_msg_from_user))
730 wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
724 /* The RX path in rx.c will put any response for this message 731 /* The RX path in rx.c will put any response for this message
725 * in i2400m->ack_skb and wake us up. If we cancel the wait, 732 * in i2400m->ack_skb and wake us up. If we cancel the wait,
726 * we need to change the value of i2400m->ack_skb to something 733 * we need to change the value of i2400m->ack_skb to something
@@ -755,6 +762,9 @@ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
755 ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len); 762 ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len);
756 763
757 /* Check the ack and deliver it if it is ok */ 764 /* Check the ack and deliver it if it is ok */
765 if (unlikely(i2400m->trace_msg_from_user))
766 wimax_msg(&i2400m->wimax_dev, "echo",
767 ack_l3l4_hdr, ack_len, GFP_KERNEL);
758 result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len); 768 result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len);
759 if (result < 0) { 769 if (result < 0) {
760 dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n", 770 dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n",
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 07a54bad237b..ef16c573bb22 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -62,6 +62,7 @@
62 * unregister_netdev() 62 * unregister_netdev()
63 */ 63 */
64#include "i2400m.h" 64#include "i2400m.h"
65#include <linux/etherdevice.h>
65#include <linux/wimax/i2400m.h> 66#include <linux/wimax/i2400m.h>
66#include <linux/module.h> 67#include <linux/module.h>
67#include <linux/moduleparam.h> 68#include <linux/moduleparam.h>
@@ -234,9 +235,6 @@ int i2400m_op_msg_from_user(struct wimax_dev *wimax_dev,
234 result = PTR_ERR(ack_skb); 235 result = PTR_ERR(ack_skb);
235 if (IS_ERR(ack_skb)) 236 if (IS_ERR(ack_skb))
236 goto error_msg_to_dev; 237 goto error_msg_to_dev;
237 if (unlikely(i2400m->trace_msg_from_user))
238 wimax_msg(&i2400m->wimax_dev, "trace",
239 msg_buf, msg_len, GFP_KERNEL);
240 result = wimax_msg_send(&i2400m->wimax_dev, ack_skb); 238 result = wimax_msg_send(&i2400m->wimax_dev, ack_skb);
241error_msg_to_dev: 239error_msg_to_dev:
242 d_fnend(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p msg_len %zu " 240 d_fnend(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p msg_len %zu "
@@ -650,6 +648,7 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
650 result = i2400m_read_mac_addr(i2400m); 648 result = i2400m_read_mac_addr(i2400m);
651 if (result < 0) 649 if (result < 0)
652 goto error_read_mac_addr; 650 goto error_read_mac_addr;
651 random_ether_addr(i2400m->src_mac_addr);
653 652
654 result = register_netdev(net_dev); /* Okey dokey, bring it up */ 653 result = register_netdev(net_dev); /* Okey dokey, bring it up */
655 if (result < 0) { 654 if (result < 0) {
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 3ae2df38b59a..434ba310c2fe 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -323,6 +323,10 @@ struct i2400m_roq;
323 * delivered. Then the driver can release them to the host. See 323 * delivered. Then the driver can release them to the host. See
324 * drivers/net/i2400m/rx.c for details. 324 * drivers/net/i2400m/rx.c for details.
325 * 325 *
326 * @src_mac_addr: MAC address used to make ethernet packets be coming
327 * from. This is generated at i2400m_setup() time and used during
328 * the life cycle of the instance. See i2400m_fake_eth_header().
329 *
326 * @init_mutex: Mutex used for serializing the device bringup 330 * @init_mutex: Mutex used for serializing the device bringup
327 * sequence; this way if the device reboots in the middle, we 331 * sequence; this way if the device reboots in the middle, we
328 * don't try to do a bringup again while we are tearing down the 332 * don't try to do a bringup again while we are tearing down the
@@ -421,6 +425,7 @@ struct i2400m {
421 unsigned rx_pl_num, rx_pl_max, rx_pl_min, 425 unsigned rx_pl_num, rx_pl_max, rx_pl_min,
422 rx_num, rx_size_acc, rx_size_min, rx_size_max; 426 rx_num, rx_size_acc, rx_size_min, rx_size_max;
423 struct i2400m_roq *rx_roq; /* not under rx_lock! */ 427 struct i2400m_roq *rx_roq; /* not under rx_lock! */
428 u8 src_mac_addr[ETH_HLEN];
424 429
425 struct mutex msg_mutex; /* serialize command execution */ 430 struct mutex msg_mutex; /* serialize command execution */
426 struct completion msg_completion; 431 struct completion msg_completion;
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 6b1fe7a81f25..9653f478b382 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -404,10 +404,12 @@ static
404void i2400m_rx_fake_eth_header(struct net_device *net_dev, 404void i2400m_rx_fake_eth_header(struct net_device *net_dev,
405 void *_eth_hdr, __be16 protocol) 405 void *_eth_hdr, __be16 protocol)
406{ 406{
407 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
407 struct ethhdr *eth_hdr = _eth_hdr; 408 struct ethhdr *eth_hdr = _eth_hdr;
408 409
409 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest)); 410 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
410 memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest)); 411 memcpy(eth_hdr->h_source, i2400m->src_mac_addr,
412 sizeof(eth_hdr->h_source));
411 eth_hdr->h_proto = protocol; 413 eth_hdr->h_proto = protocol;
412} 414}
413 415
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index f9fc38902322..7643850a6fb8 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -177,7 +177,8 @@ void i2400m_report_hook_work(struct work_struct *ws)
177 struct i2400m_work *iw = 177 struct i2400m_work *iw =
178 container_of(ws, struct i2400m_work, ws); 178 container_of(ws, struct i2400m_work, ws);
179 struct i2400m_report_hook_args *args = (void *) iw->pl; 179 struct i2400m_report_hook_args *args = (void *) iw->pl;
180 i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size); 180 if (iw->i2400m->ready)
181 i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size);
181 kfree_skb(args->skb_rx); 182 kfree_skb(args->skb_rx);
182 i2400m_put(iw->i2400m); 183 i2400m_put(iw->i2400m);
183 kfree(iw); 184 kfree(iw);
@@ -309,6 +310,9 @@ void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
309 skb_get(skb_rx); 310 skb_get(skb_rx);
310 i2400m_queue_work(i2400m, i2400m_report_hook_work, 311 i2400m_queue_work(i2400m, i2400m_report_hook_work,
311 GFP_KERNEL, &args, sizeof(args)); 312 GFP_KERNEL, &args, sizeof(args));
313 if (unlikely(i2400m->trace_msg_from_user))
314 wimax_msg(&i2400m->wimax_dev, "echo",
315 l3l4_hdr, size, GFP_KERNEL);
312 result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size, 316 result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size,
313 GFP_KERNEL); 317 GFP_KERNEL);
314 if (result < 0) 318 if (result < 0)
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 5ac5e76701cd..777c981676fc 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -409,19 +409,19 @@ int i2400ms_probe(struct sdio_func *func,
409 i2400m->bus_fw_names = i2400ms_bus_fw_names; 409 i2400m->bus_fw_names = i2400ms_bus_fw_names;
410 i2400m->bus_bm_mac_addr_impaired = 1; 410 i2400m->bus_bm_mac_addr_impaired = 1;
411 411
412 result = i2400ms_enable_function(i2400ms->func);
413 if (result < 0) {
414 dev_err(dev, "Cannot enable SDIO function: %d\n", result);
415 goto error_func_enable;
416 }
417
418 sdio_claim_host(func); 412 sdio_claim_host(func);
419 result = sdio_set_block_size(func, I2400MS_BLK_SIZE); 413 result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
414 sdio_release_host(func);
420 if (result < 0) { 415 if (result < 0) {
421 dev_err(dev, "Failed to set block size: %d\n", result); 416 dev_err(dev, "Failed to set block size: %d\n", result);
422 goto error_set_blk_size; 417 goto error_set_blk_size;
423 } 418 }
424 sdio_release_host(func); 419
420 result = i2400ms_enable_function(i2400ms->func);
421 if (result < 0) {
422 dev_err(dev, "Cannot enable SDIO function: %d\n", result);
423 goto error_func_enable;
424 }
425 425
426 result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT); 426 result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
427 if (result < 0) { 427 if (result < 0) {
@@ -440,12 +440,12 @@ int i2400ms_probe(struct sdio_func *func,
440error_debugfs_add: 440error_debugfs_add:
441 i2400m_release(i2400m); 441 i2400m_release(i2400m);
442error_setup: 442error_setup:
443 sdio_set_drvdata(func, NULL);
444 sdio_claim_host(func); 443 sdio_claim_host(func);
445error_set_blk_size:
446 sdio_disable_func(func); 444 sdio_disable_func(func);
447 sdio_release_host(func); 445 sdio_release_host(func);
448error_func_enable: 446error_func_enable:
447error_set_blk_size:
448 sdio_set_drvdata(func, NULL);
449 free_netdev(net_dev); 449 free_netdev(net_dev);
450error_alloc_netdev: 450error_alloc_netdev:
451 return result; 451 return result;
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index ca4151a9e222..17851321b7fd 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -505,27 +505,52 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
505#ifdef CONFIG_PM 505#ifdef CONFIG_PM
506 struct usb_device *usb_dev = i2400mu->usb_dev; 506 struct usb_device *usb_dev = i2400mu->usb_dev;
507#endif 507#endif
508 unsigned is_autosuspend = 0;
508 struct i2400m *i2400m = &i2400mu->i2400m; 509 struct i2400m *i2400m = &i2400mu->i2400m;
509 510
511#ifdef CONFIG_PM
512 if (usb_dev->auto_pm > 0)
513 is_autosuspend = 1;
514#endif
515
510 d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event); 516 d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
511 if (i2400m->updown == 0) 517 if (i2400m->updown == 0)
512 goto no_firmware; 518 goto no_firmware;
513 d_printf(1, dev, "fw up, requesting standby\n"); 519 if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) {
520 /* ugh -- the device is connected and this suspend
521 * request is an autosuspend one (not a system standby
522 * / hibernate).
523 *
524 * The only way the device can go to standby is if the
525 * link with the base station is in IDLE mode; that
526 * were the case, we'd be in status
527 * I2400M_SS_CONNECTED_IDLE. But we are not.
528 *
529 * If we *tell* him to go power save now, it'll reset
530 * as a precautionary measure, so if this is an
531 * autosuspend thing, say no and it'll come back
532 * later, when the link is IDLE
533 */
534 result = -EBADF;
535 d_printf(1, dev, "fw up, link up, not-idle, autosuspend: "
536 "not entering powersave\n");
537 goto error_not_now;
538 }
539 d_printf(1, dev, "fw up: entering powersave\n");
514 atomic_dec(&i2400mu->do_autopm); 540 atomic_dec(&i2400mu->do_autopm);
515 result = i2400m_cmd_enter_powersave(i2400m); 541 result = i2400m_cmd_enter_powersave(i2400m);
516 atomic_inc(&i2400mu->do_autopm); 542 atomic_inc(&i2400mu->do_autopm);
517#ifdef CONFIG_PM 543 if (result < 0 && !is_autosuspend) {
518 if (result < 0 && usb_dev->auto_pm == 0) {
519 /* System suspend, can't fail */ 544 /* System suspend, can't fail */
520 dev_err(dev, "failed to suspend, will reset on resume\n"); 545 dev_err(dev, "failed to suspend, will reset on resume\n");
521 result = 0; 546 result = 0;
522 } 547 }
523#endif
524 if (result < 0) 548 if (result < 0)
525 goto error_enter_powersave; 549 goto error_enter_powersave;
526 i2400mu_notification_release(i2400mu); 550 i2400mu_notification_release(i2400mu);
527 d_printf(1, dev, "fw up, got standby\n"); 551 d_printf(1, dev, "powersave requested\n");
528error_enter_powersave: 552error_enter_powersave:
553error_not_now:
529no_firmware: 554no_firmware:
530 d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n", 555 d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n",
531 iface, pm_msg.event, result); 556 iface, pm_msg.event, result);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index a67d29290ba0..fb7541c28e58 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -153,7 +153,7 @@ config LIBERTAS_SDIO
153 153
154config LIBERTAS_SPI 154config LIBERTAS_SPI
155 tristate "Marvell Libertas 8686 SPI 802.11b/g cards" 155 tristate "Marvell Libertas 8686 SPI 802.11b/g cards"
156 depends on LIBERTAS && SPI && GENERIC_GPIO 156 depends on LIBERTAS && SPI
157 ---help--- 157 ---help---
158 A driver for Marvell Libertas 8686 SPI devices. 158 A driver for Marvell Libertas 8686 SPI devices.
159 159
@@ -333,11 +333,11 @@ config USB_ZD1201
333config USB_NET_RNDIS_WLAN 333config USB_NET_RNDIS_WLAN
334 tristate "Wireless RNDIS USB support" 334 tristate "Wireless RNDIS USB support"
335 depends on USB && WLAN_80211 && EXPERIMENTAL 335 depends on USB && WLAN_80211 && EXPERIMENTAL
336 depends on CFG80211
336 select USB_USBNET 337 select USB_USBNET
337 select USB_NET_CDCETHER 338 select USB_NET_CDCETHER
338 select USB_NET_RNDIS_HOST 339 select USB_NET_RNDIS_HOST
339 select WIRELESS_EXT 340 select WIRELESS_EXT
340 select CFG80211
341 ---help--- 341 ---help---
342 This is a driver for wireless RNDIS devices. 342 This is a driver for wireless RNDIS devices.
343 These are USB based adapters found in devices such as: 343 These are USB based adapters found in devices such as:
@@ -431,6 +431,7 @@ config RTL8187
431 ASUS P5B Deluxe 431 ASUS P5B Deluxe
432 Toshiba Satellite Pro series of laptops 432 Toshiba Satellite Pro series of laptops
433 Asus Wireless Link 433 Asus Wireless Link
434 Linksys WUSB54GC-EU
434 435
435 Thanks to Realtek for their support! 436 Thanks to Realtek for their support!
436 437
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index cea7f1466c54..4efbdbe6d6bf 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1873,18 +1873,18 @@ static void at76_dwork_hw_scan(struct work_struct *work)
1873 if (ret != CMD_STATUS_COMPLETE) { 1873 if (ret != CMD_STATUS_COMPLETE) {
1874 queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan, 1874 queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan,
1875 SCAN_POLL_INTERVAL); 1875 SCAN_POLL_INTERVAL);
1876 goto exit; 1876 mutex_unlock(&priv->mtx);
1877 return;
1877 } 1878 }
1878 1879
1879 ieee80211_scan_completed(priv->hw, false);
1880
1881 if (is_valid_ether_addr(priv->bssid)) 1880 if (is_valid_ether_addr(priv->bssid))
1882 at76_join(priv); 1881 at76_join(priv);
1883 1882
1884 ieee80211_wake_queues(priv->hw);
1885
1886exit:
1887 mutex_unlock(&priv->mtx); 1883 mutex_unlock(&priv->mtx);
1884
1885 ieee80211_scan_completed(priv->hw, false);
1886
1887 ieee80211_wake_queues(priv->hw);
1888} 1888}
1889 1889
1890static int at76_hw_scan(struct ieee80211_hw *hw, 1890static int at76_hw_scan(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 17bd3eaf3e03..bb97981fb248 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -91,6 +91,7 @@ struct ar9170_led {
91 struct led_classdev l; 91 struct led_classdev l;
92 char name[32]; 92 char name[32];
93 unsigned int toggled; 93 unsigned int toggled;
94 bool last_state;
94 bool registered; 95 bool registered;
95}; 96};
96 97
@@ -101,7 +102,6 @@ enum ar9170_device_state {
101 AR9170_STOPPED, 102 AR9170_STOPPED,
102 AR9170_IDLE, 103 AR9170_IDLE,
103 AR9170_STARTED, 104 AR9170_STARTED,
104 AR9170_ASSOCIATED,
105}; 105};
106 106
107struct ar9170_rxstream_mpdu_merge { 107struct ar9170_rxstream_mpdu_merge {
@@ -109,6 +109,11 @@ struct ar9170_rxstream_mpdu_merge {
109 bool has_plcp; 109 bool has_plcp;
110}; 110};
111 111
112#define AR9170_QUEUE_TIMEOUT 64
113#define AR9170_TX_TIMEOUT 8
114#define AR9170_JANITOR_DELAY 128
115#define AR9170_TX_INVALID_RATE 0xffffffff
116
112struct ar9170 { 117struct ar9170 {
113 struct ieee80211_hw *hw; 118 struct ieee80211_hw *hw;
114 struct mutex mutex; 119 struct mutex mutex;
@@ -117,10 +122,11 @@ struct ar9170 {
117 122
118 int (*open)(struct ar9170 *); 123 int (*open)(struct ar9170 *);
119 void (*stop)(struct ar9170 *); 124 void (*stop)(struct ar9170 *);
120 int (*tx)(struct ar9170 *, struct sk_buff *, bool, unsigned int); 125 int (*tx)(struct ar9170 *, struct sk_buff *);
121 int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 , 126 int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 ,
122 void *, u32 , void *); 127 void *, u32 , void *);
123 void (*callback_cmd)(struct ar9170 *, u32 , void *); 128 void (*callback_cmd)(struct ar9170 *, u32 , void *);
129 int (*flush)(struct ar9170 *);
124 130
125 /* interface mode settings */ 131 /* interface mode settings */
126 struct ieee80211_vif *vif; 132 struct ieee80211_vif *vif;
@@ -140,7 +146,7 @@ struct ar9170 {
140 struct work_struct filter_config_work; 146 struct work_struct filter_config_work;
141 u64 cur_mc_hash, want_mc_hash; 147 u64 cur_mc_hash, want_mc_hash;
142 u32 cur_filter, want_filter; 148 u32 cur_filter, want_filter;
143 unsigned int filter_changed; 149 unsigned long filter_changed;
144 unsigned int filter_state; 150 unsigned int filter_state;
145 bool sniffer_enabled; 151 bool sniffer_enabled;
146 152
@@ -177,10 +183,10 @@ struct ar9170 {
177 struct ar9170_eeprom eeprom; 183 struct ar9170_eeprom eeprom;
178 struct ath_regulatory regulatory; 184 struct ath_regulatory regulatory;
179 185
180 /* global tx status for unregistered Stations. */ 186 /* tx queues - as seen by hw - */
181 struct sk_buff_head global_tx_status; 187 struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
182 struct sk_buff_head global_tx_status_waste; 188 struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
183 struct delayed_work tx_status_janitor; 189 struct delayed_work tx_janitor;
184 190
185 /* rxstream mpdu merge */ 191 /* rxstream mpdu merge */
186 struct ar9170_rxstream_mpdu_merge rx_mpdu; 192 struct ar9170_rxstream_mpdu_merge rx_mpdu;
@@ -189,13 +195,21 @@ struct ar9170 {
189}; 195};
190 196
191struct ar9170_sta_info { 197struct ar9170_sta_info {
192 struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
193}; 198};
194 199
195#define IS_STARTED(a) (a->state >= AR9170_STARTED) 200#define AR9170_TX_FLAG_WAIT_FOR_ACK BIT(0)
196#define IS_ACCEPTING_CMD(a) (a->state >= AR9170_IDLE) 201#define AR9170_TX_FLAG_NO_ACK BIT(1)
202#define AR9170_TX_FLAG_BLOCK_ACK BIT(2)
203
204struct ar9170_tx_info {
205 unsigned long timeout;
206 unsigned int flags;
207};
208
209#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
210#define IS_ACCEPTING_CMD(a) (((struct ar9170 *)a)->state >= AR9170_IDLE)
197 211
198#define AR9170_FILTER_CHANGED_PROMISC BIT(0) 212#define AR9170_FILTER_CHANGED_MODE BIT(0)
199#define AR9170_FILTER_CHANGED_MULTICAST BIT(1) 213#define AR9170_FILTER_CHANGED_MULTICAST BIT(1)
200#define AR9170_FILTER_CHANGED_FRAMEFILTER BIT(2) 214#define AR9170_FILTER_CHANGED_FRAMEFILTER BIT(2)
201 215
@@ -204,8 +218,9 @@ void *ar9170_alloc(size_t priv_size);
204int ar9170_register(struct ar9170 *ar, struct device *pdev); 218int ar9170_register(struct ar9170 *ar, struct device *pdev);
205void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb); 219void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb);
206void ar9170_unregister(struct ar9170 *ar); 220void ar9170_unregister(struct ar9170 *ar);
207void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb, 221void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb);
208 bool update_statistics, u16 tx_status); 222void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
223int ar9170_nag_limiter(struct ar9170 *ar);
209 224
210/* MAC */ 225/* MAC */
211int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 226int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -215,6 +230,9 @@ int ar9170_update_multicast(struct ar9170 *ar);
215int ar9170_update_frame_filter(struct ar9170 *ar); 230int ar9170_update_frame_filter(struct ar9170 *ar);
216int ar9170_set_operating_mode(struct ar9170 *ar); 231int ar9170_set_operating_mode(struct ar9170 *ar);
217int ar9170_set_beacon_timers(struct ar9170 *ar); 232int ar9170_set_beacon_timers(struct ar9170 *ar);
233int ar9170_set_dyn_sifs_ack(struct ar9170 *ar);
234int ar9170_set_slot_time(struct ar9170 *ar);
235int ar9170_set_basic_rates(struct ar9170 *ar);
218int ar9170_set_hwretry_limit(struct ar9170 *ar, u32 max_retry); 236int ar9170_set_hwretry_limit(struct ar9170 *ar, u32 max_retry);
219int ar9170_update_beacon(struct ar9170 *ar); 237int ar9170_update_beacon(struct ar9170 *ar);
220void ar9170_new_beacon(struct work_struct *work); 238void ar9170_new_beacon(struct work_struct *work);
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 3293e0fb24fb..6cbfb2f83391 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -207,7 +207,8 @@ enum ar9170_cmd {
207#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xB44) 207#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xB44)
208#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xB48) 208#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xB48)
209 209
210#define AR9170_MAC_REG_AMPDU_SET (AR9170_MAC_REG_BASE + 0xba0) 210#define AR9170_MAC_REG_AMPDU_FACTOR (AR9170_MAC_REG_BASE + 0xB9C)
211#define AR9170_MAC_REG_AMPDU_DENSITY (AR9170_MAC_REG_BASE + 0xBA0)
211 212
212#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xC00) 213#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xC00)
213#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xC50) 214#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xC50)
@@ -376,7 +377,6 @@ static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
376#define AR9170_RX_ERROR_FATAL 0x80 377#define AR9170_RX_ERROR_FATAL 0x80
377 378
378struct ar9170_cmd_tx_status { 379struct ar9170_cmd_tx_status {
379 __le16 unkn;
380 u8 dst[ETH_ALEN]; 380 u8 dst[ETH_ALEN];
381 __le32 rate; 381 __le32 rate;
382 __le16 status; 382 __le16 status;
@@ -394,6 +394,7 @@ struct ar9170_cmd_ba_failed_count {
394struct ar9170_cmd_response { 394struct ar9170_cmd_response {
395 u8 flag; 395 u8 flag;
396 u8 type; 396 u8 type;
397 __le16 padding;
397 398
398 union { 399 union {
399 struct ar9170_cmd_tx_status tx_status; 400 struct ar9170_cmd_tx_status tx_status;
@@ -419,4 +420,7 @@ enum ar9170_txq {
419 __AR9170_NUM_TXQ, 420 __AR9170_NUM_TXQ,
420}; 421};
421 422
423#define AR9170_TXQ_DEPTH 32
424#define AR9170_TX_MAX_PENDING 128
425
422#endif /* __AR9170_HW_H */ 426#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/led.c b/drivers/net/wireless/ath/ar9170/led.c
index 341cead7f606..63fda6cd2101 100644
--- a/drivers/net/wireless/ath/ar9170/led.c
+++ b/drivers/net/wireless/ath/ar9170/led.c
@@ -74,7 +74,7 @@ static void ar9170_update_leds(struct work_struct *work)
74 74
75 mutex_lock(&ar->mutex); 75 mutex_lock(&ar->mutex);
76 for (i = 0; i < AR9170_NUM_LEDS; i++) 76 for (i = 0; i < AR9170_NUM_LEDS; i++)
77 if (ar->leds[i].toggled) { 77 if (ar->leds[i].registered && ar->leds[i].toggled) {
78 led_val |= 1 << i; 78 led_val |= 1 << i;
79 79
80 tmp = 70 + 200 / (ar->leds[i].toggled); 80 tmp = 70 + 200 / (ar->leds[i].toggled);
@@ -101,9 +101,15 @@ static void ar9170_led_brightness_set(struct led_classdev *led,
101 struct ar9170_led *arl = container_of(led, struct ar9170_led, l); 101 struct ar9170_led *arl = container_of(led, struct ar9170_led, l);
102 struct ar9170 *ar = arl->ar; 102 struct ar9170 *ar = arl->ar;
103 103
104 arl->toggled++; 104 if (unlikely(!arl->registered))
105 return ;
106
107 if (arl->last_state != !!brightness) {
108 arl->toggled++;
109 arl->last_state = !!brightness;
110 }
105 111
106 if (likely(IS_ACCEPTING_CMD(ar) && brightness)) 112 if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
107 queue_delayed_work(ar->hw->workqueue, &ar->led_work, HZ/10); 113 queue_delayed_work(ar->hw->workqueue, &ar->led_work, HZ/10);
108} 114}
109 115
@@ -136,13 +142,14 @@ void ar9170_unregister_leds(struct ar9170 *ar)
136{ 142{
137 int i; 143 int i;
138 144
139 cancel_delayed_work_sync(&ar->led_work);
140
141 for (i = 0; i < AR9170_NUM_LEDS; i++) 145 for (i = 0; i < AR9170_NUM_LEDS; i++)
142 if (ar->leds[i].registered) { 146 if (ar->leds[i].registered) {
143 led_classdev_unregister(&ar->leds[i].l); 147 led_classdev_unregister(&ar->leds[i].l);
144 ar->leds[i].registered = false; 148 ar->leds[i].registered = false;
149 ar->leds[i].toggled = 0;
145 } 150 }
151
152 cancel_delayed_work_sync(&ar->led_work);
146} 153}
147 154
148int ar9170_register_leds(struct ar9170 *ar) 155int ar9170_register_leds(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index 43aeb69685d3..d9f1f46de183 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -38,6 +38,55 @@
38#include "ar9170.h" 38#include "ar9170.h"
39#include "cmd.h" 39#include "cmd.h"
40 40
41int ar9170_set_dyn_sifs_ack(struct ar9170 *ar)
42{
43 u32 val;
44
45 if (conf_is_ht40(&ar->hw->conf))
46 val = 0x010a;
47 else {
48 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
49 val = 0x105;
50 else
51 val = 0x104;
52 }
53
54 return ar9170_write_reg(ar, AR9170_MAC_REG_DYNAMIC_SIFS_ACK, val);
55}
56
57int ar9170_set_slot_time(struct ar9170 *ar)
58{
59 u32 slottime = 20;
60
61 if (!ar->vif)
62 return 0;
63
64 if ((ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ) ||
65 ar->vif->bss_conf.use_short_slot)
66 slottime = 9;
67
68 return ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME, slottime << 10);
69}
70
71int ar9170_set_basic_rates(struct ar9170 *ar)
72{
73 u8 cck, ofdm;
74
75 if (!ar->vif)
76 return 0;
77
78 ofdm = ar->vif->bss_conf.basic_rates >> 4;
79
80 /* FIXME: is still necessary? */
81 if (ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
82 cck = 0;
83 else
84 cck = ar->vif->bss_conf.basic_rates & 0xf;
85
86 return ar9170_write_reg(ar, AR9170_MAC_REG_BASIC_RATE,
87 ofdm << 8 | cck);
88}
89
41int ar9170_set_qos(struct ar9170 *ar) 90int ar9170_set_qos(struct ar9170 *ar)
42{ 91{
43 ar9170_regwrite_begin(ar); 92 ar9170_regwrite_begin(ar);
@@ -84,7 +133,7 @@ static int ar9170_set_ampdu_density(struct ar9170 *ar, u8 mpdudensity)
84 val = 0x140a00 | (mpdudensity ? (mpdudensity + 1) : 0); 133 val = 0x140a00 | (mpdudensity ? (mpdudensity + 1) : 0);
85 134
86 ar9170_regwrite_begin(ar); 135 ar9170_regwrite_begin(ar);
87 ar9170_regwrite(AR9170_MAC_REG_AMPDU_SET, val); 136 ar9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, val);
88 ar9170_regwrite_finish(); 137 ar9170_regwrite_finish();
89 138
90 return ar9170_regwrite_result(); 139 return ar9170_regwrite_result();
@@ -398,10 +447,10 @@ int ar9170_update_beacon(struct ar9170 *ar)
398 /* XXX: use skb->cb info */ 447 /* XXX: use skb->cb info */
399 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) 448 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
400 ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP, 449 ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
401 ((skb->len + 4) << (3+16)) + 0x0400); 450 ((skb->len + 4) << (3 + 16)) + 0x0400);
402 else 451 else
403 ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP, 452 ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
404 ((skb->len + 4) << (3+16)) + 0x0400); 453 ((skb->len + 4) << 16) + 0x001b);
405 454
406 ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4); 455 ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4);
407 ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS); 456 ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS);
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 99df9ddae9cb..9d38cf60a0db 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -146,7 +146,6 @@ static struct ieee80211_channel ar9170_5ghz_chantable[] = {
146{ \ 146{ \
147 .ht_supported = true, \ 147 .ht_supported = true, \
148 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \ 148 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
149 IEEE80211_HT_CAP_SM_PS | \
150 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \ 149 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
151 IEEE80211_HT_CAP_SGI_40 | \ 150 IEEE80211_HT_CAP_SGI_40 | \
152 IEEE80211_HT_CAP_DSSSCCK40 | \ 151 IEEE80211_HT_CAP_DSSSCCK40 | \
@@ -174,59 +173,122 @@ static struct ieee80211_supported_band ar9170_band_5GHz = {
174 .ht_cap = AR9170_HT_CAP, 173 .ht_cap = AR9170_HT_CAP,
175}; 174};
176 175
177#ifdef AR9170_QUEUE_DEBUG 176static void ar9170_tx(struct ar9170 *ar);
178/*
179 * In case some wants works with AR9170's crazy tx_status queueing techniques.
180 * He might need this rather useful probing function.
181 *
182 * NOTE: caller must hold the queue's spinlock!
183 */
184 177
178#ifdef AR9170_QUEUE_DEBUG
185static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb) 179static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
186{ 180{
187 struct ar9170_tx_control *txc = (void *) skb->data; 181 struct ar9170_tx_control *txc = (void *) skb->data;
188 struct ieee80211_hdr *hdr = (void *)txc->frame_data; 182 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
183 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
184 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
189 185
190 printk(KERN_DEBUG "%s: => FRAME [skb:%p, queue:%d, DA:[%pM] " 186 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x "
191 "mac_control:%04x, phy_control:%08x]\n", 187 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
192 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), 188 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
193 ieee80211_get_DA(hdr), le16_to_cpu(txc->mac_control), 189 ieee80211_get_DA(hdr), arinfo->flags,
194 le32_to_cpu(txc->phy_control)); 190 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
191 jiffies_to_msecs(arinfo->timeout - jiffies));
195} 192}
196 193
197static void ar9170_dump_station_tx_status_queue(struct ar9170 *ar, 194static void __ar9170_dump_txqueue(struct ar9170 *ar,
198 struct sk_buff_head *queue) 195 struct sk_buff_head *queue)
199{ 196{
200 struct sk_buff *skb; 197 struct sk_buff *skb;
201 int i = 0; 198 int i = 0;
202 199
203 printk(KERN_DEBUG "---[ cut here ]---\n"); 200 printk(KERN_DEBUG "---[ cut here ]---\n");
204 printk(KERN_DEBUG "%s: %d entries in tx_status queue.\n", 201 printk(KERN_DEBUG "%s: %d entries in queue.\n",
205 wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); 202 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
206 203
207 skb_queue_walk(queue, skb) { 204 skb_queue_walk(queue, skb) {
208 struct ar9170_tx_control *txc = (void *) skb->data; 205 printk(KERN_DEBUG "index:%d => \n", i++);
209 struct ieee80211_hdr *hdr = (void *)txc->frame_data;
210
211 printk(KERN_DEBUG "index:%d => \n", i);
212 ar9170_print_txheader(ar, skb); 206 ar9170_print_txheader(ar, skb);
213 } 207 }
208 if (i != skb_queue_len(queue))
209 printk(KERN_DEBUG "WARNING: queue frame counter "
210 "mismatch %d != %d\n", skb_queue_len(queue), i);
214 printk(KERN_DEBUG "---[ end ]---\n"); 211 printk(KERN_DEBUG "---[ end ]---\n");
215} 212}
216#endif /* AR9170_QUEUE_DEBUG */
217 213
218void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb, 214static void ar9170_dump_txqueue(struct ar9170 *ar,
219 bool valid_status, u16 tx_status) 215 struct sk_buff_head *queue)
216{
217 unsigned long flags;
218
219 spin_lock_irqsave(&queue->lock, flags);
220 __ar9170_dump_txqueue(ar, queue);
221 spin_unlock_irqrestore(&queue->lock, flags);
222}
223
224static void __ar9170_dump_txstats(struct ar9170 *ar)
225{
226 int i;
227
228 printk(KERN_DEBUG "%s: QoS queue stats\n",
229 wiphy_name(ar->hw->wiphy));
230
231 for (i = 0; i < __AR9170_NUM_TXQ; i++)
232 printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d\n",
233 wiphy_name(ar->hw->wiphy), i, ar->tx_stats[i].limit,
234 ar->tx_stats[i].len, skb_queue_len(&ar->tx_status[i]));
235}
236
237static void ar9170_dump_txstats(struct ar9170 *ar)
220{ 238{
221 struct ieee80211_tx_info *txinfo;
222 unsigned int retries = 0, queue = skb_get_queue_mapping(skb);
223 unsigned long flags; 239 unsigned long flags;
224 240
225 spin_lock_irqsave(&ar->tx_stats_lock, flags); 241 spin_lock_irqsave(&ar->tx_stats_lock, flags);
226 ar->tx_stats[queue].len--; 242 __ar9170_dump_txstats(ar);
227 if (ieee80211_queue_stopped(ar->hw, queue))
228 ieee80211_wake_queue(ar->hw, queue);
229 spin_unlock_irqrestore(&ar->tx_stats_lock, flags); 243 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
244}
245#endif /* AR9170_QUEUE_DEBUG */
246
247/* caller must guarantee exclusive access for _bin_ queue. */
248static void ar9170_recycle_expired(struct ar9170 *ar,
249 struct sk_buff_head *queue,
250 struct sk_buff_head *bin)
251{
252 struct sk_buff *skb, *old = NULL;
253 unsigned long flags;
254
255 spin_lock_irqsave(&queue->lock, flags);
256 while ((skb = skb_peek(queue))) {
257 struct ieee80211_tx_info *txinfo;
258 struct ar9170_tx_info *arinfo;
259
260 txinfo = IEEE80211_SKB_CB(skb);
261 arinfo = (void *) txinfo->rate_driver_data;
262
263 if (time_is_before_jiffies(arinfo->timeout)) {
264#ifdef AR9170_QUEUE_DEBUG
265 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
266 "recycle \n", wiphy_name(ar->hw->wiphy),
267 jiffies, arinfo->timeout);
268 ar9170_print_txheader(ar, skb);
269#endif /* AR9170_QUEUE_DEBUG */
270 __skb_unlink(skb, queue);
271 __skb_queue_tail(bin, skb);
272 } else {
273 break;
274 }
275
276 if (unlikely(old == skb)) {
277 /* bail out - queue is shot. */
278
279 WARN_ON(1);
280 break;
281 }
282 old = skb;
283 }
284 spin_unlock_irqrestore(&queue->lock, flags);
285}
286
287static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
288 u16 tx_status)
289{
290 struct ieee80211_tx_info *txinfo;
291 unsigned int retries = 0;
230 292
231 txinfo = IEEE80211_SKB_CB(skb); 293 txinfo = IEEE80211_SKB_CB(skb);
232 ieee80211_tx_info_clear_status(txinfo); 294 ieee80211_tx_info_clear_status(txinfo);
@@ -248,45 +310,61 @@ void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
248 break; 310 break;
249 } 311 }
250 312
251 if (valid_status) 313 txinfo->status.rates[0].count = retries + 1;
252 txinfo->status.rates[0].count = retries + 1;
253
254 skb_pull(skb, sizeof(struct ar9170_tx_control)); 314 skb_pull(skb, sizeof(struct ar9170_tx_control));
255 ieee80211_tx_status_irqsafe(ar->hw, skb); 315 ieee80211_tx_status_irqsafe(ar->hw, skb);
256} 316}
257 317
258static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar, 318void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
259 const u8 *mac,
260 const u32 queue,
261 struct sk_buff_head *q)
262{ 319{
320 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
321 struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
322 unsigned int queue = skb_get_queue_mapping(skb);
263 unsigned long flags; 323 unsigned long flags;
264 struct sk_buff *skb;
265 324
266 spin_lock_irqsave(&q->lock, flags); 325 spin_lock_irqsave(&ar->tx_stats_lock, flags);
267 skb_queue_walk(q, skb) { 326 ar->tx_stats[queue].len--;
268 struct ar9170_tx_control *txc = (void *) skb->data;
269 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
270 u32 txc_queue = (le32_to_cpu(txc->phy_control) &
271 AR9170_TX_PHY_QOS_MASK) >>
272 AR9170_TX_PHY_QOS_SHIFT;
273 327
274 if ((queue != txc_queue) || 328 if (skb_queue_empty(&ar->tx_pending[queue])) {
275 (compare_ether_addr(ieee80211_get_DA(hdr), mac))) 329#ifdef AR9170_QUEUE_STOP_DEBUG
276 continue; 330 printk(KERN_DEBUG "%s: wake queue %d\n",
331 wiphy_name(ar->hw->wiphy), queue);
332 __ar9170_dump_txstats(ar);
333#endif /* AR9170_QUEUE_STOP_DEBUG */
334 ieee80211_wake_queue(ar->hw, queue);
335 }
336 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
277 337
278 __skb_unlink(skb, q); 338 if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
279 spin_unlock_irqrestore(&q->lock, flags); 339 dev_kfree_skb_any(skb);
280 return skb; 340 } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
341 arinfo->timeout = jiffies +
342 msecs_to_jiffies(AR9170_TX_TIMEOUT);
343
344 skb_queue_tail(&ar->tx_status[queue], skb);
345 } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
346 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
347 } else {
348#ifdef AR9170_QUEUE_DEBUG
349 printk(KERN_DEBUG "%s: unsupported frame flags!\n",
350 wiphy_name(ar->hw->wiphy));
351 ar9170_print_txheader(ar, skb);
352#endif /* AR9170_QUEUE_DEBUG */
353 dev_kfree_skb_any(skb);
354 }
355
356 if (!ar->tx_stats[queue].len &&
357 !skb_queue_empty(&ar->tx_pending[queue])) {
358 ar9170_tx(ar);
281 } 359 }
282 spin_unlock_irqrestore(&q->lock, flags);
283 return NULL;
284} 360}
285 361
286static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac, 362static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
287 const u32 queue) 363 const u8 *mac,
364 struct sk_buff_head *queue,
365 const u32 rate)
288{ 366{
289 struct ieee80211_sta *sta; 367 unsigned long flags;
290 struct sk_buff *skb; 368 struct sk_buff *skb;
291 369
292 /* 370 /*
@@ -297,85 +375,94 @@ static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac,
297 * the firmware provided (-> destination MAC, and phy_control) - 375 * the firmware provided (-> destination MAC, and phy_control) -
298 * and hope that we picked the right one... 376 * and hope that we picked the right one...
299 */ 377 */
300 rcu_read_lock();
301 sta = ieee80211_find_sta(ar->hw, mac);
302
303 if (likely(sta)) {
304 struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv;
305 skb = skb_dequeue(&sta_priv->tx_status[queue]);
306 rcu_read_unlock();
307 if (likely(skb))
308 return skb;
309 } else
310 rcu_read_unlock();
311
312 /* scan the waste queue for candidates */
313 skb = ar9170_find_skb_in_queue(ar, mac, queue,
314 &ar->global_tx_status_waste);
315 if (!skb) {
316 /* so it still _must_ be in the global list. */
317 skb = ar9170_find_skb_in_queue(ar, mac, queue,
318 &ar->global_tx_status);
319 }
320 378
379 spin_lock_irqsave(&queue->lock, flags);
380 skb_queue_walk(queue, skb) {
381 struct ar9170_tx_control *txc = (void *) skb->data;
382 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
383 u32 r;
384
385 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
386#ifdef AR9170_QUEUE_DEBUG
387 printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n",
388 wiphy_name(ar->hw->wiphy), mac,
389 ieee80211_get_DA(hdr));
390 ar9170_print_txheader(ar, skb);
391#endif /* AR9170_QUEUE_DEBUG */
392 continue;
393 }
394
395 r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
396 AR9170_TX_PHY_MCS_SHIFT;
397
398 if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
321#ifdef AR9170_QUEUE_DEBUG 399#ifdef AR9170_QUEUE_DEBUG
322 if (unlikely((!skb) && net_ratelimit())) { 400 printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n",
323 printk(KERN_ERR "%s: ESS:[%pM] does not have any " 401 wiphy_name(ar->hw->wiphy), rate, r);
324 "outstanding frames in this queue (%d).\n", 402 ar9170_print_txheader(ar, skb);
325 wiphy_name(ar->hw->wiphy), mac, queue); 403#endif /* AR9170_QUEUE_DEBUG */
404 continue;
405 }
406
407 __skb_unlink(skb, queue);
408 spin_unlock_irqrestore(&queue->lock, flags);
409 return skb;
326 } 410 }
411
412#ifdef AR9170_QUEUE_DEBUG
413 printk(KERN_ERR "%s: ESS:[%pM] does not have any "
414 "outstanding frames in queue.\n",
415 wiphy_name(ar->hw->wiphy), mac);
416 __ar9170_dump_txqueue(ar, queue);
327#endif /* AR9170_QUEUE_DEBUG */ 417#endif /* AR9170_QUEUE_DEBUG */
328 return skb; 418 spin_unlock_irqrestore(&queue->lock, flags);
419
420 return NULL;
329} 421}
330 422
331/* 423/*
332 * This worker tries to keep the global tx_status queue empty. 424 * This worker tries to keeps an maintain tx_status queues.
333 * So we can guarantee that incoming tx_status reports for 425 * So we can guarantee that incoming tx_status reports are
334 * unregistered stations are always synced with the actual 426 * actually for a pending frame.
335 * frame - which we think - belongs to.
336 */ 427 */
337 428
338static void ar9170_tx_status_janitor(struct work_struct *work) 429static void ar9170_tx_janitor(struct work_struct *work)
339{ 430{
340 struct ar9170 *ar = container_of(work, struct ar9170, 431 struct ar9170 *ar = container_of(work, struct ar9170,
341 tx_status_janitor.work); 432 tx_janitor.work);
342 struct sk_buff *skb; 433 struct sk_buff_head waste;
434 unsigned int i;
435 bool resched = false;
343 436
344 if (unlikely(!IS_STARTED(ar))) 437 if (unlikely(!IS_STARTED(ar)))
345 return ; 438 return ;
346 439
347 mutex_lock(&ar->mutex); 440 skb_queue_head_init(&waste);
348 /* recycle the garbage back to mac80211... one by one. */ 441
349 while ((skb = skb_dequeue(&ar->global_tx_status_waste))) { 442 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
350#ifdef AR9170_QUEUE_DEBUG 443#ifdef AR9170_QUEUE_DEBUG
351 printk(KERN_DEBUG "%s: dispose queued frame =>\n", 444 printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n",
352 wiphy_name(ar->hw->wiphy)); 445 wiphy_name(ar->hw->wiphy), i);
353 ar9170_print_txheader(ar, skb); 446 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
447 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
354#endif /* AR9170_QUEUE_DEBUG */ 448#endif /* AR9170_QUEUE_DEBUG */
355 ar9170_handle_tx_status(ar, skb, false,
356 AR9170_TX_STATUS_FAILED);
357 }
358 449
359 while ((skb = skb_dequeue(&ar->global_tx_status))) { 450 ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
360#ifdef AR9170_QUEUE_DEBUG 451 ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
361 printk(KERN_DEBUG "%s: moving frame into waste queue =>\n", 452 skb_queue_purge(&waste);
362 wiphy_name(ar->hw->wiphy));
363 453
364 ar9170_print_txheader(ar, skb); 454 if (!skb_queue_empty(&ar->tx_status[i]) ||
365#endif /* AR9170_QUEUE_DEBUG */ 455 !skb_queue_empty(&ar->tx_pending[i]))
366 skb_queue_tail(&ar->global_tx_status_waste, skb); 456 resched = true;
367 } 457 }
368 458
369 /* recall the janitor in 100ms - if there's garbage in the can. */ 459 if (resched)
370 if (skb_queue_len(&ar->global_tx_status_waste) > 0) 460 queue_delayed_work(ar->hw->workqueue,
371 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor, 461 &ar->tx_janitor,
372 msecs_to_jiffies(100)); 462 msecs_to_jiffies(AR9170_JANITOR_DELAY));
373
374 mutex_unlock(&ar->mutex);
375} 463}
376 464
377static void ar9170_handle_command_response(struct ar9170 *ar, 465void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
378 void *buf, u32 len)
379{ 466{
380 struct ar9170_cmd_response *cmd = (void *) buf; 467 struct ar9170_cmd_response *cmd = (void *) buf;
381 468
@@ -399,15 +486,21 @@ static void ar9170_handle_command_response(struct ar9170 *ar,
399 */ 486 */
400 487
401 struct sk_buff *skb; 488 struct sk_buff *skb;
402 u32 queue = (le32_to_cpu(cmd->tx_status.rate) & 489 u32 phy = le32_to_cpu(cmd->tx_status.rate);
403 AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT; 490 u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
491 AR9170_TX_PHY_QOS_SHIFT;
492#ifdef AR9170_QUEUE_DEBUG
493 printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n",
494 wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q);
495#endif /* AR9170_QUEUE_DEBUG */
404 496
405 skb = ar9170_find_queued_skb(ar, cmd->tx_status.dst, queue); 497 skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
498 &ar->tx_status[q],
499 AR9170_TX_INVALID_RATE);
406 if (unlikely(!skb)) 500 if (unlikely(!skb))
407 return ; 501 return ;
408 502
409 ar9170_handle_tx_status(ar, skb, true, 503 ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
410 le16_to_cpu(cmd->tx_status.status));
411 break; 504 break;
412 } 505 }
413 506
@@ -447,6 +540,38 @@ static void ar9170_handle_command_response(struct ar9170 *ar,
447 /* retransmission issue / SIFS/EIFS collision ?! */ 540 /* retransmission issue / SIFS/EIFS collision ?! */
448 break; 541 break;
449 542
543 /* firmware debug */
544 case 0xca:
545 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
546 break;
547 case 0xcb:
548 len -= 4;
549
550 switch (len) {
551 case 1:
552 printk(KERN_DEBUG "ar9170 FW: u8: %#.2x\n",
553 *((char *)buf + 4));
554 break;
555 case 2:
556 printk(KERN_DEBUG "ar9170 FW: u8: %#.4x\n",
557 le16_to_cpup((__le16 *)((char *)buf + 4)));
558 break;
559 case 4:
560 printk(KERN_DEBUG "ar9170 FW: u8: %#.8x\n",
561 le32_to_cpup((__le32 *)((char *)buf + 4)));
562 break;
563 case 8:
564 printk(KERN_DEBUG "ar9170 FW: u8: %#.16lx\n",
565 (unsigned long)le64_to_cpup(
566 (__le64 *)((char *)buf + 4)));
567 break;
568 }
569 break;
570 case 0xcc:
571 print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE,
572 (char *)buf + 4, len - 4);
573 break;
574
450 default: 575 default:
451 printk(KERN_INFO "received unhandled event %x\n", cmd->type); 576 printk(KERN_INFO "received unhandled event %x\n", cmd->type);
452 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len); 577 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
@@ -460,7 +585,7 @@ static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
460 ar->rx_mpdu.has_plcp = false; 585 ar->rx_mpdu.has_plcp = false;
461} 586}
462 587
463static int ar9170_nag_limiter(struct ar9170 *ar) 588int ar9170_nag_limiter(struct ar9170 *ar)
464{ 589{
465 bool print_message; 590 bool print_message;
466 591
@@ -957,10 +1082,12 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
957 1082
958 mutex_lock(&ar->mutex); 1083 mutex_lock(&ar->mutex);
959 1084
1085 ar->filter_changed = 0;
1086
960 /* reinitialize queues statistics */ 1087 /* reinitialize queues statistics */
961 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats)); 1088 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
962 for (i = 0; i < ARRAY_SIZE(ar->tx_stats); i++) 1089 for (i = 0; i < __AR9170_NUM_TXQ; i++)
963 ar->tx_stats[i].limit = 8; 1090 ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
964 1091
965 /* reset QoS defaults */ 1092 /* reset QoS defaults */
966 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/ 1093 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
@@ -1006,18 +1133,17 @@ out:
1006static void ar9170_op_stop(struct ieee80211_hw *hw) 1133static void ar9170_op_stop(struct ieee80211_hw *hw)
1007{ 1134{
1008 struct ar9170 *ar = hw->priv; 1135 struct ar9170 *ar = hw->priv;
1136 unsigned int i;
1009 1137
1010 if (IS_STARTED(ar)) 1138 if (IS_STARTED(ar))
1011 ar->state = AR9170_IDLE; 1139 ar->state = AR9170_IDLE;
1012 1140
1013 flush_workqueue(ar->hw->workqueue); 1141 flush_workqueue(ar->hw->workqueue);
1014 1142
1015 mutex_lock(&ar->mutex); 1143 cancel_delayed_work_sync(&ar->tx_janitor);
1016 cancel_delayed_work_sync(&ar->tx_status_janitor);
1017 cancel_work_sync(&ar->filter_config_work); 1144 cancel_work_sync(&ar->filter_config_work);
1018 cancel_work_sync(&ar->beacon_work); 1145 cancel_work_sync(&ar->beacon_work);
1019 skb_queue_purge(&ar->global_tx_status_waste); 1146 mutex_lock(&ar->mutex);
1020 skb_queue_purge(&ar->global_tx_status);
1021 1147
1022 if (IS_ACCEPTING_CMD(ar)) { 1148 if (IS_ACCEPTING_CMD(ar)) {
1023 ar9170_set_leds_state(ar, 0); 1149 ar9170_set_leds_state(ar, 0);
@@ -1027,51 +1153,32 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
1027 ar->stop(ar); 1153 ar->stop(ar);
1028 } 1154 }
1029 1155
1156 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1157 skb_queue_purge(&ar->tx_pending[i]);
1158 skb_queue_purge(&ar->tx_status[i]);
1159 }
1030 mutex_unlock(&ar->mutex); 1160 mutex_unlock(&ar->mutex);
1031} 1161}
1032 1162
1033int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1163static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1034{ 1164{
1035 struct ar9170 *ar = hw->priv;
1036 struct ieee80211_hdr *hdr; 1165 struct ieee80211_hdr *hdr;
1037 struct ar9170_tx_control *txc; 1166 struct ar9170_tx_control *txc;
1038 struct ieee80211_tx_info *info; 1167 struct ieee80211_tx_info *info;
1039 struct ieee80211_rate *rate = NULL;
1040 struct ieee80211_tx_rate *txrate; 1168 struct ieee80211_tx_rate *txrate;
1169 struct ar9170_tx_info *arinfo;
1041 unsigned int queue = skb_get_queue_mapping(skb); 1170 unsigned int queue = skb_get_queue_mapping(skb);
1042 unsigned long flags = 0;
1043 struct ar9170_sta_info *sta_info = NULL;
1044 u32 power, chains;
1045 u16 keytype = 0; 1171 u16 keytype = 0;
1046 u16 len, icv = 0; 1172 u16 len, icv = 0;
1047 int err;
1048 bool tx_status;
1049 1173
1050 if (unlikely(!IS_STARTED(ar))) 1174 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1051 goto err_free;
1052 1175
1053 hdr = (void *)skb->data; 1176 hdr = (void *)skb->data;
1054 info = IEEE80211_SKB_CB(skb); 1177 info = IEEE80211_SKB_CB(skb);
1055 len = skb->len; 1178 len = skb->len;
1056 1179
1057 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1058 if (ar->tx_stats[queue].limit < ar->tx_stats[queue].len) {
1059 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1060 return NETDEV_TX_OK;
1061 }
1062
1063 ar->tx_stats[queue].len++;
1064 ar->tx_stats[queue].count++;
1065 if (ar->tx_stats[queue].limit == ar->tx_stats[queue].len)
1066 ieee80211_stop_queue(hw, queue);
1067
1068 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1069
1070 txc = (void *)skb_push(skb, sizeof(*txc)); 1180 txc = (void *)skb_push(skb, sizeof(*txc));
1071 1181
1072 tx_status = (((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) != 0) ||
1073 ((info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) != 0));
1074
1075 if (info->control.hw_key) { 1182 if (info->control.hw_key) {
1076 icv = info->control.hw_key->icv_len; 1183 icv = info->control.hw_key->icv_len;
1077 1184
@@ -1087,7 +1194,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1087 break; 1194 break;
1088 default: 1195 default:
1089 WARN_ON(1); 1196 WARN_ON(1);
1090 goto err_dequeue; 1197 goto err_out;
1091 } 1198 }
1092 } 1199 }
1093 1200
@@ -1104,16 +1211,65 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1104 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1211 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1105 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); 1212 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
1106 1213
1107 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1108 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1109
1110 txrate = &info->control.rates[0]; 1214 txrate = &info->control.rates[0];
1111
1112 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) 1215 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1113 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); 1216 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1114 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS) 1217 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
1115 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); 1218 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1116 1219
1220 arinfo = (void *)info->rate_driver_data;
1221 arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
1222
1223 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1224 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1225 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1226 if (unlikely(!info->control.sta))
1227 goto err_out;
1228
1229 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1230 arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
1231 goto out;
1232 }
1233
1234 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1235 /*
1236 * WARNING:
1237 * Putting the QoS queue bits into an unexplored territory is
1238 * certainly not elegant.
1239 *
1240 * In my defense: This idea provides a reasonable way to
1241 * smuggle valuable information to the tx_status callback.
1242 * Also, the idea behind this bit-abuse came straight from
1243 * the original driver code.
1244 */
1245
1246 txc->phy_control |=
1247 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1248 arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
1249 } else {
1250 arinfo->flags = AR9170_TX_FLAG_NO_ACK;
1251 }
1252
1253out:
1254 return 0;
1255
1256err_out:
1257 skb_pull(skb, sizeof(*txc));
1258 return -EINVAL;
1259}
1260
1261static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1262{
1263 struct ar9170_tx_control *txc;
1264 struct ieee80211_tx_info *info;
1265 struct ieee80211_rate *rate = NULL;
1266 struct ieee80211_tx_rate *txrate;
1267 u32 power, chains;
1268
1269 txc = (void *) skb->data;
1270 info = IEEE80211_SKB_CB(skb);
1271 txrate = &info->control.rates[0];
1272
1117 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 1273 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
1118 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); 1274 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
1119 1275
@@ -1133,9 +1289,12 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1133 u32 r = txrate->idx; 1289 u32 r = txrate->idx;
1134 u8 *txpower; 1290 u8 *txpower;
1135 1291
1292 /* heavy clip control */
1293 txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
1294
1136 r <<= AR9170_TX_PHY_MCS_SHIFT; 1295 r <<= AR9170_TX_PHY_MCS_SHIFT;
1137 if (WARN_ON(r & ~AR9170_TX_PHY_MCS_MASK)) 1296 BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
1138 goto err_dequeue; 1297
1139 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK); 1298 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
1140 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT); 1299 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
1141 1300
@@ -1197,53 +1356,154 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1197 chains = AR9170_TX_PHY_TXCHAIN_1; 1356 chains = AR9170_TX_PHY_TXCHAIN_1;
1198 } 1357 }
1199 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT); 1358 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
1359}
1200 1360
1201 if (tx_status) { 1361static void ar9170_tx(struct ar9170 *ar)
1202 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE); 1362{
1203 /* 1363 struct sk_buff *skb;
1204 * WARNING: 1364 unsigned long flags;
1205 * Putting the QoS queue bits into an unexplored territory is 1365 struct ieee80211_tx_info *info;
1206 * certainly not elegant. 1366 struct ar9170_tx_info *arinfo;
1207 * 1367 unsigned int i, frames, frames_failed, remaining_space;
1208 * In my defense: This idea provides a reasonable way to 1368 int err;
1209 * smuggle valuable information to the tx_status callback. 1369 bool schedule_garbagecollector = false;
1210 * Also, the idea behind this bit-abuse came straight from
1211 * the original driver code.
1212 */
1213 1370
1214 txc->phy_control |= 1371 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1215 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1216 1372
1217 if (info->control.sta) { 1373 if (unlikely(!IS_STARTED(ar)))
1218 sta_info = (void *) info->control.sta->drv_priv; 1374 return ;
1219 skb_queue_tail(&sta_info->tx_status[queue], skb); 1375
1220 } else { 1376 remaining_space = AR9170_TX_MAX_PENDING;
1221 skb_queue_tail(&ar->global_tx_status, skb); 1377
1378 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1379 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1380 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1381#ifdef AR9170_QUEUE_DEBUG
1382 printk(KERN_DEBUG "%s: queue %d full\n",
1383 wiphy_name(ar->hw->wiphy), i);
1222 1384
1223 queue_delayed_work(ar->hw->workqueue, 1385 __ar9170_dump_txstats(ar);
1224 &ar->tx_status_janitor, 1386 printk(KERN_DEBUG "stuck frames: ===> \n");
1225 msecs_to_jiffies(100)); 1387 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1388 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
1389#endif /* AR9170_QUEUE_DEBUG */
1390 ieee80211_stop_queue(ar->hw, i);
1391 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1392 continue;
1393 }
1394
1395 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1396 skb_queue_len(&ar->tx_pending[i]));
1397
1398 if (remaining_space < frames) {
1399#ifdef AR9170_QUEUE_DEBUG
1400 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1401 "remaining slots:%d, needed:%d\n",
1402 wiphy_name(ar->hw->wiphy), i, remaining_space,
1403 frames);
1404
1405 ar9170_dump_txstats(ar);
1406#endif /* AR9170_QUEUE_DEBUG */
1407 frames = remaining_space;
1408 }
1409
1410 ar->tx_stats[i].len += frames;
1411 ar->tx_stats[i].count += frames;
1412 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1413
1414 if (!frames)
1415 continue;
1416
1417 frames_failed = 0;
1418 while (frames) {
1419 skb = skb_dequeue(&ar->tx_pending[i]);
1420 if (unlikely(!skb)) {
1421 frames_failed += frames;
1422 frames = 0;
1423 break;
1424 }
1425
1426 info = IEEE80211_SKB_CB(skb);
1427 arinfo = (void *) info->rate_driver_data;
1428
1429 /* TODO: cancel stuck frames */
1430 arinfo->timeout = jiffies +
1431 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1432
1433#ifdef AR9170_QUEUE_DEBUG
1434 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
1435 wiphy_name(ar->hw->wiphy), i);
1436 ar9170_print_txheader(ar, skb);
1437#endif /* AR9170_QUEUE_DEBUG */
1438
1439 err = ar->tx(ar, skb);
1440 if (unlikely(err)) {
1441 frames_failed++;
1442 dev_kfree_skb_any(skb);
1443 } else {
1444 remaining_space--;
1445 schedule_garbagecollector = true;
1446 }
1447
1448 frames--;
1449 }
1450
1451#ifdef AR9170_QUEUE_DEBUG
1452 printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n",
1453 wiphy_name(ar->hw->wiphy), i);
1454
1455 printk(KERN_DEBUG "%s: unprocessed pending frames left:\n",
1456 wiphy_name(ar->hw->wiphy));
1457 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1458#endif /* AR9170_QUEUE_DEBUG */
1459
1460 if (unlikely(frames_failed)) {
1461#ifdef AR9170_QUEUE_DEBUG
1462 printk(KERN_DEBUG "%s: frames failed =>\n",
1463 wiphy_name(ar->hw->wiphy), frames_failed);
1464#endif /* AR9170_QUEUE_DEBUG */
1465
1466 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1467 ar->tx_stats[i].len -= frames_failed;
1468 ar->tx_stats[i].count -= frames_failed;
1469 ieee80211_wake_queue(ar->hw, i);
1470 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1226 } 1471 }
1227 } 1472 }
1228 1473
1229 err = ar->tx(ar, skb, tx_status, 0); 1474 if (schedule_garbagecollector)
1230 if (unlikely(tx_status && err)) { 1475 queue_delayed_work(ar->hw->workqueue,
1231 if (info->control.sta) 1476 &ar->tx_janitor,
1232 skb_unlink(skb, &sta_info->tx_status[queue]); 1477 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1233 else 1478}
1234 skb_unlink(skb, &ar->global_tx_status); 1479
1480int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1481{
1482 struct ar9170 *ar = hw->priv;
1483 struct ieee80211_tx_info *info;
1484
1485 if (unlikely(!IS_STARTED(ar)))
1486 goto err_free;
1487
1488 if (unlikely(ar9170_tx_prepare(ar, skb)))
1489 goto err_free;
1490
1491 info = IEEE80211_SKB_CB(skb);
1492 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1493 /* drop frame, we do not allow TX A-MPDU aggregation yet. */
1494 goto err_free;
1495 } else {
1496 unsigned int queue = skb_get_queue_mapping(skb);
1497
1498 ar9170_tx_prepare_phy(ar, skb);
1499 skb_queue_tail(&ar->tx_pending[queue], skb);
1235 } 1500 }
1236 1501
1502 ar9170_tx(ar);
1237 return NETDEV_TX_OK; 1503 return NETDEV_TX_OK;
1238 1504
1239err_dequeue:
1240 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1241 ar->tx_stats[queue].len--;
1242 ar->tx_stats[queue].count--;
1243 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1244
1245err_free: 1505err_free:
1246 dev_kfree_skb(skb); 1506 dev_kfree_skb_any(skb);
1247 return NETDEV_TX_OK; 1507 return NETDEV_TX_OK;
1248} 1508}
1249 1509
@@ -1306,11 +1566,6 @@ static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
1306 1566
1307 mutex_lock(&ar->mutex); 1567 mutex_lock(&ar->mutex);
1308 1568
1309 if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
1310 /* TODO */
1311 err = 0;
1312 }
1313
1314 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { 1569 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
1315 /* TODO */ 1570 /* TODO */
1316 err = 0; 1571 err = 0;
@@ -1344,15 +1599,21 @@ static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
1344 } 1599 }
1345 1600
1346 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1601 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1602
1603 /* adjust slot time for 5 GHz */
1604 err = ar9170_set_slot_time(ar);
1605 if (err)
1606 goto out;
1607
1608 err = ar9170_set_dyn_sifs_ack(ar);
1609 if (err)
1610 goto out;
1611
1347 err = ar9170_set_channel(ar, hw->conf.channel, 1612 err = ar9170_set_channel(ar, hw->conf.channel,
1348 AR9170_RFI_NONE, 1613 AR9170_RFI_NONE,
1349 nl80211_to_ar9170(hw->conf.channel_type)); 1614 nl80211_to_ar9170(hw->conf.channel_type));
1350 if (err) 1615 if (err)
1351 goto out; 1616 goto out;
1352 /* adjust slot time for 5 GHz */
1353 if (hw->conf.channel->band == IEEE80211_BAND_5GHZ)
1354 err = ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME,
1355 9 << 10);
1356 } 1617 }
1357 1618
1358out: 1619out:
@@ -1370,20 +1631,26 @@ static void ar9170_set_filters(struct work_struct *work)
1370 return ; 1631 return ;
1371 1632
1372 mutex_lock(&ar->mutex); 1633 mutex_lock(&ar->mutex);
1373 if (ar->filter_changed & AR9170_FILTER_CHANGED_PROMISC) { 1634 if (test_and_clear_bit(AR9170_FILTER_CHANGED_MODE,
1635 &ar->filter_changed)) {
1374 err = ar9170_set_operating_mode(ar); 1636 err = ar9170_set_operating_mode(ar);
1375 if (err) 1637 if (err)
1376 goto unlock; 1638 goto unlock;
1377 } 1639 }
1378 1640
1379 if (ar->filter_changed & AR9170_FILTER_CHANGED_MULTICAST) { 1641 if (test_and_clear_bit(AR9170_FILTER_CHANGED_MULTICAST,
1642 &ar->filter_changed)) {
1380 err = ar9170_update_multicast(ar); 1643 err = ar9170_update_multicast(ar);
1381 if (err) 1644 if (err)
1382 goto unlock; 1645 goto unlock;
1383 } 1646 }
1384 1647
1385 if (ar->filter_changed & AR9170_FILTER_CHANGED_FRAMEFILTER) 1648 if (test_and_clear_bit(AR9170_FILTER_CHANGED_FRAMEFILTER,
1649 &ar->filter_changed)) {
1386 err = ar9170_update_frame_filter(ar); 1650 err = ar9170_update_frame_filter(ar);
1651 if (err)
1652 goto unlock;
1653 }
1387 1654
1388unlock: 1655unlock:
1389 mutex_unlock(&ar->mutex); 1656 mutex_unlock(&ar->mutex);
@@ -1413,7 +1680,7 @@ static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
1413 int i; 1680 int i;
1414 1681
1415 /* always get broadcast frames */ 1682 /* always get broadcast frames */
1416 mchash = 1ULL << (0xff>>2); 1683 mchash = 1ULL << (0xff >> 2);
1417 1684
1418 for (i = 0; i < mc_count; i++) { 1685 for (i = 0; i < mc_count; i++) {
1419 if (WARN_ON(!mclist)) 1686 if (WARN_ON(!mclist))
@@ -1423,7 +1690,7 @@ static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
1423 } 1690 }
1424 ar->want_mc_hash = mchash; 1691 ar->want_mc_hash = mchash;
1425 } 1692 }
1426 ar->filter_changed |= AR9170_FILTER_CHANGED_MULTICAST; 1693 set_bit(AR9170_FILTER_CHANGED_MULTICAST, &ar->filter_changed);
1427 } 1694 }
1428 1695
1429 if (changed_flags & FIF_CONTROL) { 1696 if (changed_flags & FIF_CONTROL) {
@@ -1439,12 +1706,14 @@ static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
1439 else 1706 else
1440 ar->want_filter = ar->cur_filter & ~filter; 1707 ar->want_filter = ar->cur_filter & ~filter;
1441 1708
1442 ar->filter_changed |= AR9170_FILTER_CHANGED_FRAMEFILTER; 1709 set_bit(AR9170_FILTER_CHANGED_FRAMEFILTER,
1710 &ar->filter_changed);
1443 } 1711 }
1444 1712
1445 if (changed_flags & FIF_PROMISC_IN_BSS) { 1713 if (changed_flags & FIF_PROMISC_IN_BSS) {
1446 ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0; 1714 ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
1447 ar->filter_changed |= AR9170_FILTER_CHANGED_PROMISC; 1715 set_bit(AR9170_FILTER_CHANGED_MODE,
1716 &ar->filter_changed);
1448 } 1717 }
1449 1718
1450 if (likely(IS_STARTED(ar))) 1719 if (likely(IS_STARTED(ar)))
@@ -1464,27 +1733,32 @@ static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
1464 if (changed & BSS_CHANGED_BSSID) { 1733 if (changed & BSS_CHANGED_BSSID) {
1465 memcpy(ar->bssid, bss_conf->bssid, ETH_ALEN); 1734 memcpy(ar->bssid, bss_conf->bssid, ETH_ALEN);
1466 err = ar9170_set_operating_mode(ar); 1735 err = ar9170_set_operating_mode(ar);
1736 if (err)
1737 goto out;
1467 } 1738 }
1468 1739
1469 if (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) { 1740 if (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) {
1470 err = ar9170_update_beacon(ar); 1741 err = ar9170_update_beacon(ar);
1471 if (!err) 1742 if (err)
1472 ar9170_set_beacon_timers(ar); 1743 goto out;
1473 }
1474 1744
1475 ar9170_regwrite_begin(ar); 1745 err = ar9170_set_beacon_timers(ar);
1746 if (err)
1747 goto out;
1748 }
1476 1749
1477 if (changed & BSS_CHANGED_ASSOC) { 1750 if (changed & BSS_CHANGED_ASSOC) {
1478 ar->state = bss_conf->assoc ? AR9170_ASSOCIATED : ar->state;
1479
1480#ifndef CONFIG_AR9170_LEDS 1751#ifndef CONFIG_AR9170_LEDS
1481 /* enable assoc LED. */ 1752 /* enable assoc LED. */
1482 err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0); 1753 err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
1483#endif /* CONFIG_AR9170_LEDS */ 1754#endif /* CONFIG_AR9170_LEDS */
1484 } 1755 }
1485 1756
1486 if (changed & BSS_CHANGED_BEACON_INT) 1757 if (changed & BSS_CHANGED_BEACON_INT) {
1487 err = ar9170_set_beacon_timers(ar); 1758 err = ar9170_set_beacon_timers(ar);
1759 if (err)
1760 goto out;
1761 }
1488 1762
1489 if (changed & BSS_CHANGED_HT) { 1763 if (changed & BSS_CHANGED_HT) {
1490 /* TODO */ 1764 /* TODO */
@@ -1492,31 +1766,18 @@ static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
1492 } 1766 }
1493 1767
1494 if (changed & BSS_CHANGED_ERP_SLOT) { 1768 if (changed & BSS_CHANGED_ERP_SLOT) {
1495 u32 slottime = 20; 1769 err = ar9170_set_slot_time(ar);
1496 1770 if (err)
1497 if (bss_conf->use_short_slot) 1771 goto out;
1498 slottime = 9;
1499
1500 ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, slottime << 10);
1501 } 1772 }
1502 1773
1503 if (changed & BSS_CHANGED_BASIC_RATES) { 1774 if (changed & BSS_CHANGED_BASIC_RATES) {
1504 u32 cck, ofdm; 1775 err = ar9170_set_basic_rates(ar);
1505 1776 if (err)
1506 if (hw->conf.channel->band == IEEE80211_BAND_5GHZ) { 1777 goto out;
1507 ofdm = bss_conf->basic_rates;
1508 cck = 0;
1509 } else {
1510 /* four cck rates */
1511 cck = bss_conf->basic_rates & 0xf;
1512 ofdm = bss_conf->basic_rates >> 4;
1513 }
1514 ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE,
1515 ofdm << 8 | cck);
1516 } 1778 }
1517 1779
1518 ar9170_regwrite_finish(); 1780out:
1519 err = ar9170_regwrite_result();
1520 mutex_unlock(&ar->mutex); 1781 mutex_unlock(&ar->mutex);
1521} 1782}
1522 1783
@@ -1668,43 +1929,6 @@ static void ar9170_sta_notify(struct ieee80211_hw *hw,
1668 enum sta_notify_cmd cmd, 1929 enum sta_notify_cmd cmd,
1669 struct ieee80211_sta *sta) 1930 struct ieee80211_sta *sta)
1670{ 1931{
1671 struct ar9170 *ar = hw->priv;
1672 struct ar9170_sta_info *info = (void *) sta->drv_priv;
1673 struct sk_buff *skb;
1674 unsigned int i;
1675
1676 switch (cmd) {
1677 case STA_NOTIFY_ADD:
1678 for (i = 0; i < ar->hw->queues; i++)
1679 skb_queue_head_init(&info->tx_status[i]);
1680 break;
1681
1682 case STA_NOTIFY_REMOVE:
1683
1684 /*
1685 * transfer all outstanding frames that need a tx_status
1686 * reports to the global tx_status queue
1687 */
1688
1689 for (i = 0; i < ar->hw->queues; i++) {
1690 while ((skb = skb_dequeue(&info->tx_status[i]))) {
1691#ifdef AR9170_QUEUE_DEBUG
1692 printk(KERN_DEBUG "%s: queueing frame in "
1693 "global tx_status queue =>\n",
1694 wiphy_name(ar->hw->wiphy));
1695
1696 ar9170_print_txheader(ar, skb);
1697#endif /* AR9170_QUEUE_DEBUG */
1698 skb_queue_tail(&ar->global_tx_status, skb);
1699 }
1700 }
1701 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
1702 msecs_to_jiffies(100));
1703 break;
1704
1705 default:
1706 break;
1707 }
1708} 1932}
1709 1933
1710static int ar9170_get_stats(struct ieee80211_hw *hw, 1934static int ar9170_get_stats(struct ieee80211_hw *hw,
@@ -1743,7 +1967,7 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1743 int ret; 1967 int ret;
1744 1968
1745 mutex_lock(&ar->mutex); 1969 mutex_lock(&ar->mutex);
1746 if ((param) && !(queue > ar->hw->queues)) { 1970 if ((param) && !(queue > __AR9170_NUM_TXQ)) {
1747 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], 1971 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
1748 param, sizeof(*param)); 1972 param, sizeof(*param));
1749 1973
@@ -1819,12 +2043,14 @@ void *ar9170_alloc(size_t priv_size)
1819 mutex_init(&ar->mutex); 2043 mutex_init(&ar->mutex);
1820 spin_lock_init(&ar->cmdlock); 2044 spin_lock_init(&ar->cmdlock);
1821 spin_lock_init(&ar->tx_stats_lock); 2045 spin_lock_init(&ar->tx_stats_lock);
1822 skb_queue_head_init(&ar->global_tx_status); 2046 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1823 skb_queue_head_init(&ar->global_tx_status_waste); 2047 skb_queue_head_init(&ar->tx_status[i]);
2048 skb_queue_head_init(&ar->tx_pending[i]);
2049 }
1824 ar9170_rx_reset_rx_mpdu(ar); 2050 ar9170_rx_reset_rx_mpdu(ar);
1825 INIT_WORK(&ar->filter_config_work, ar9170_set_filters); 2051 INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
1826 INIT_WORK(&ar->beacon_work, ar9170_new_beacon); 2052 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
1827 INIT_DELAYED_WORK(&ar->tx_status_janitor, ar9170_tx_status_janitor); 2053 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
1828 2054
1829 /* all hw supports 2.4 GHz, so set channel to 1 by default */ 2055 /* all hw supports 2.4 GHz, so set channel to 1 by default */
1830 ar->channel = &ar9170_2ghz_chantable[0]; 2056 ar->channel = &ar9170_2ghz_chantable[0];
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index 6ce20754b8e7..df86f70cd817 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -401,7 +401,7 @@ int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
401 int i, err; 401 int i, err;
402 u32 val; 402 u32 val;
403 bool is_2ghz = band == IEEE80211_BAND_2GHZ; 403 bool is_2ghz = band == IEEE80211_BAND_2GHZ;
404 bool is_40mhz = false; /* XXX: for now */ 404 bool is_40mhz = conf_is_ht40(&ar->hw->conf);
405 405
406 ar9170_regwrite_begin(ar); 406 ar9170_regwrite_begin(ar);
407 407
@@ -1200,7 +1200,7 @@ int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1200 return -ENOSYS; 1200 return -ENOSYS;
1201 } 1201 }
1202 1202
1203 if (0 /* 2 streams capable */) 1203 if (ar->eeprom.tx_mask != 1)
1204 tmp |= 0x100; 1204 tmp |= 0x100;
1205 1205
1206 err = ar9170_write_reg(ar, 0x1c5804, tmp); 1206 err = ar9170_write_reg(ar, 0x1c5804, tmp);
@@ -1214,7 +1214,7 @@ int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1214 freqpar = ar9170_get_hw_dyn_params(channel, bw); 1214 freqpar = ar9170_get_hw_dyn_params(channel, bw);
1215 1215
1216 vals[0] = cpu_to_le32(channel->center_freq * 1000); 1216 vals[0] = cpu_to_le32(channel->center_freq * 1000);
1217 vals[1] = cpu_to_le32(bw == AR9170_BW_20 ? 0 : 1); 1217 vals[1] = cpu_to_le32(conf_is_ht40(&ar->hw->conf));
1218 vals[2] = cpu_to_le32(offs << 2 | 1); 1218 vals[2] = cpu_to_le32(offs << 2 | 1);
1219 vals[3] = cpu_to_le32(freqpar->coeff_exp); 1219 vals[3] = cpu_to_le32(freqpar->coeff_exp);
1220 vals[4] = cpu_to_le32(freqpar->coeff_man); 1220 vals[4] = cpu_to_le32(freqpar->coeff_man);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index d7c13c0177ca..754b1f8d8da9 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -51,9 +51,14 @@ MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
51MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>"); 51MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
53MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless"); 53MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
54MODULE_FIRMWARE("ar9170.fw");
54MODULE_FIRMWARE("ar9170-1.fw"); 55MODULE_FIRMWARE("ar9170-1.fw");
55MODULE_FIRMWARE("ar9170-2.fw"); 56MODULE_FIRMWARE("ar9170-2.fw");
56 57
58enum ar9170_requirements {
59 AR9170_REQ_FW1_ONLY = 1,
60};
61
57static struct usb_device_id ar9170_usb_ids[] = { 62static struct usb_device_id ar9170_usb_ids[] = {
58 /* Atheros 9170 */ 63 /* Atheros 9170 */
59 { USB_DEVICE(0x0cf3, 0x9170) }, 64 { USB_DEVICE(0x0cf3, 0x9170) },
@@ -81,25 +86,74 @@ static struct usb_device_id ar9170_usb_ids[] = {
81 { USB_DEVICE(0x2019, 0x5304) }, 86 { USB_DEVICE(0x2019, 0x5304) },
82 /* IO-Data WNGDNUS2 */ 87 /* IO-Data WNGDNUS2 */
83 { USB_DEVICE(0x04bb, 0x093f) }, 88 { USB_DEVICE(0x04bb, 0x093f) },
89 /* AVM FRITZ!WLAN USB Stick N */
90 { USB_DEVICE(0x057C, 0x8401) },
91 /* AVM FRITZ!WLAN USB Stick N 2.4 */
92 { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
84 93
85 /* terminate */ 94 /* terminate */
86 {} 95 {}
87}; 96};
88MODULE_DEVICE_TABLE(usb, ar9170_usb_ids); 97MODULE_DEVICE_TABLE(usb, ar9170_usb_ids);
89 98
90static void ar9170_usb_tx_urb_complete_free(struct urb *urb) 99static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
100{
101 struct urb *urb;
102 unsigned long flags;
103 int err;
104
105 if (unlikely(!IS_STARTED(&aru->common)))
106 return ;
107
108 spin_lock_irqsave(&aru->tx_urb_lock, flags);
109 if (aru->tx_submitted_urbs >= AR9170_NUM_TX_URBS) {
110 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
111 return ;
112 }
113 aru->tx_submitted_urbs++;
114
115 urb = usb_get_from_anchor(&aru->tx_pending);
116 if (!urb) {
117 aru->tx_submitted_urbs--;
118 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
119
120 return ;
121 }
122 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
123
124 aru->tx_pending_urbs--;
125 usb_anchor_urb(urb, &aru->tx_submitted);
126
127 err = usb_submit_urb(urb, GFP_ATOMIC);
128 if (unlikely(err)) {
129 if (ar9170_nag_limiter(&aru->common))
130 dev_err(&aru->udev->dev, "submit_urb failed (%d).\n",
131 err);
132
133 usb_unanchor_urb(urb);
134 aru->tx_submitted_urbs--;
135 ar9170_tx_callback(&aru->common, urb->context);
136 }
137
138 usb_free_urb(urb);
139}
140
141static void ar9170_usb_tx_urb_complete_frame(struct urb *urb)
91{ 142{
92 struct sk_buff *skb = urb->context; 143 struct sk_buff *skb = urb->context;
93 struct ar9170_usb *aru = (struct ar9170_usb *) 144 struct ar9170_usb *aru = (struct ar9170_usb *)
94 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); 145 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
95 146
96 if (!aru) { 147 if (unlikely(!aru)) {
97 dev_kfree_skb_irq(skb); 148 dev_kfree_skb_irq(skb);
98 return ; 149 return ;
99 } 150 }
100 151
101 ar9170_handle_tx_status(&aru->common, skb, false, 152 aru->tx_submitted_urbs--;
102 AR9170_TX_STATUS_COMPLETE); 153
154 ar9170_tx_callback(&aru->common, skb);
155
156 ar9170_usb_submit_urb(aru);
103} 157}
104 158
105static void ar9170_usb_tx_urb_complete(struct urb *urb) 159static void ar9170_usb_tx_urb_complete(struct urb *urb)
@@ -126,8 +180,8 @@ static void ar9170_usb_irq_completed(struct urb *urb)
126 goto resubmit; 180 goto resubmit;
127 } 181 }
128 182
129 print_hex_dump_bytes("ar9170 irq: ", DUMP_PREFIX_OFFSET, 183 ar9170_handle_command_response(&aru->common, urb->transfer_buffer,
130 urb->transfer_buffer, urb->actual_length); 184 urb->actual_length);
131 185
132resubmit: 186resubmit:
133 usb_anchor_urb(urb, &aru->rx_submitted); 187 usb_anchor_urb(urb, &aru->rx_submitted);
@@ -177,16 +231,15 @@ resubmit:
177 231
178 usb_anchor_urb(urb, &aru->rx_submitted); 232 usb_anchor_urb(urb, &aru->rx_submitted);
179 err = usb_submit_urb(urb, GFP_ATOMIC); 233 err = usb_submit_urb(urb, GFP_ATOMIC);
180 if (err) { 234 if (unlikely(err)) {
181 usb_unanchor_urb(urb); 235 usb_unanchor_urb(urb);
182 dev_kfree_skb_irq(skb); 236 goto free;
183 } 237 }
184 238
185 return ; 239 return ;
186 240
187free: 241free:
188 dev_kfree_skb_irq(skb); 242 dev_kfree_skb_irq(skb);
189 return;
190} 243}
191 244
192static int ar9170_usb_prep_rx_urb(struct ar9170_usb *aru, 245static int ar9170_usb_prep_rx_urb(struct ar9170_usb *aru,
@@ -282,21 +335,47 @@ err_out:
282 return err; 335 return err;
283} 336}
284 337
285static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru) 338static int ar9170_usb_flush(struct ar9170 *ar)
286{ 339{
287 int ret; 340 struct ar9170_usb *aru = (void *) ar;
341 struct urb *urb;
342 int ret, err = 0;
288 343
289 aru->common.state = AR9170_UNKNOWN_STATE; 344 if (IS_STARTED(ar))
345 aru->common.state = AR9170_IDLE;
290 346
291 usb_unlink_anchored_urbs(&aru->tx_submitted); 347 usb_wait_anchor_empty_timeout(&aru->tx_pending,
348 msecs_to_jiffies(800));
349 while ((urb = usb_get_from_anchor(&aru->tx_pending))) {
350 ar9170_tx_callback(&aru->common, (void *) urb->context);
351 usb_free_urb(urb);
352 }
292 353
293 /* give the LED OFF command and the deauth frame a chance to air. */ 354 /* lets wait a while until the tx - queues are dried out */
294 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted, 355 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
295 msecs_to_jiffies(100)); 356 msecs_to_jiffies(100));
296 if (ret == 0) 357 if (ret == 0)
297 dev_err(&aru->udev->dev, "kill pending tx urbs.\n"); 358 err = -ETIMEDOUT;
298 usb_poison_anchored_urbs(&aru->tx_submitted); 359
360 usb_kill_anchored_urbs(&aru->tx_submitted);
361
362 if (IS_ACCEPTING_CMD(ar))
363 aru->common.state = AR9170_STARTED;
299 364
365 return err;
366}
367
368static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru)
369{
370 int err;
371
372 aru->common.state = AR9170_UNKNOWN_STATE;
373
374 err = ar9170_usb_flush(&aru->common);
375 if (err)
376 dev_err(&aru->udev->dev, "stuck tx urbs!\n");
377
378 usb_poison_anchored_urbs(&aru->tx_submitted);
300 usb_poison_anchored_urbs(&aru->rx_submitted); 379 usb_poison_anchored_urbs(&aru->rx_submitted);
301} 380}
302 381
@@ -337,7 +416,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
337 416
338 usb_anchor_urb(urb, &aru->tx_submitted); 417 usb_anchor_urb(urb, &aru->tx_submitted);
339 err = usb_submit_urb(urb, GFP_ATOMIC); 418 err = usb_submit_urb(urb, GFP_ATOMIC);
340 if (err) { 419 if (unlikely(err)) {
341 usb_unanchor_urb(urb); 420 usb_unanchor_urb(urb);
342 usb_free_urb(urb); 421 usb_free_urb(urb);
343 goto err_unbuf; 422 goto err_unbuf;
@@ -380,12 +459,10 @@ err_free:
380 return err; 459 return err;
381} 460}
382 461
383static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb, 462static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb)
384 bool txstatus_needed, unsigned int extra_len)
385{ 463{
386 struct ar9170_usb *aru = (struct ar9170_usb *) ar; 464 struct ar9170_usb *aru = (struct ar9170_usb *) ar;
387 struct urb *urb; 465 struct urb *urb;
388 int err;
389 466
390 if (unlikely(!IS_STARTED(ar))) { 467 if (unlikely(!IS_STARTED(ar))) {
391 /* Seriously, what were you drink... err... thinking!? */ 468 /* Seriously, what were you drink... err... thinking!? */
@@ -398,18 +475,17 @@ static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb,
398 475
399 usb_fill_bulk_urb(urb, aru->udev, 476 usb_fill_bulk_urb(urb, aru->udev,
400 usb_sndbulkpipe(aru->udev, AR9170_EP_TX), 477 usb_sndbulkpipe(aru->udev, AR9170_EP_TX),
401 skb->data, skb->len + extra_len, (txstatus_needed ? 478 skb->data, skb->len,
402 ar9170_usb_tx_urb_complete : 479 ar9170_usb_tx_urb_complete_frame, skb);
403 ar9170_usb_tx_urb_complete_free), skb);
404 urb->transfer_flags |= URB_ZERO_PACKET; 480 urb->transfer_flags |= URB_ZERO_PACKET;
405 481
406 usb_anchor_urb(urb, &aru->tx_submitted); 482 usb_anchor_urb(urb, &aru->tx_pending);
407 err = usb_submit_urb(urb, GFP_ATOMIC); 483 aru->tx_pending_urbs++;
408 if (unlikely(err))
409 usb_unanchor_urb(urb);
410 484
411 usb_free_urb(urb); 485 usb_free_urb(urb);
412 return err; 486
487 ar9170_usb_submit_urb(aru);
488 return 0;
413} 489}
414 490
415static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer) 491static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer)
@@ -418,7 +494,7 @@ static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer)
418 unsigned long flags; 494 unsigned long flags;
419 u32 in, out; 495 u32 in, out;
420 496
421 if (!buffer) 497 if (unlikely(!buffer))
422 return ; 498 return ;
423 499
424 in = le32_to_cpup((__le32 *)buffer); 500 in = le32_to_cpup((__le32 *)buffer);
@@ -504,17 +580,29 @@ static int ar9170_usb_request_firmware(struct ar9170_usb *aru)
504{ 580{
505 int err = 0; 581 int err = 0;
506 582
507 err = request_firmware(&aru->init_values, "ar9170-1.fw", 583 err = request_firmware(&aru->firmware, "ar9170.fw",
508 &aru->udev->dev); 584 &aru->udev->dev);
509 if (err) { 585 if (!err) {
510 dev_err(&aru->udev->dev, "file with init values not found.\n"); 586 aru->init_values = NULL;
511 return err; 587 return 0;
588 }
589
590 if (aru->req_one_stage_fw) {
591 dev_err(&aru->udev->dev, "ar9170.fw firmware file "
592 "not found and is required for this device\n");
593 return -EINVAL;
512 } 594 }
513 595
596 dev_err(&aru->udev->dev, "ar9170.fw firmware file "
597 "not found, trying old firmware...\n");
598
599 err = request_firmware(&aru->init_values, "ar9170-1.fw",
600 &aru->udev->dev);
601
514 err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev); 602 err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev);
515 if (err) { 603 if (err) {
516 release_firmware(aru->init_values); 604 release_firmware(aru->init_values);
517 dev_err(&aru->udev->dev, "firmware file not found.\n"); 605 dev_err(&aru->udev->dev, "file with init values not found.\n");
518 return err; 606 return err;
519 } 607 }
520 608
@@ -548,6 +636,9 @@ static int ar9170_usb_upload_firmware(struct ar9170_usb *aru)
548{ 636{
549 int err; 637 int err;
550 638
639 if (!aru->init_values)
640 goto upload_fw_start;
641
551 /* First, upload initial values to device RAM */ 642 /* First, upload initial values to device RAM */
552 err = ar9170_usb_upload(aru, aru->init_values->data, 643 err = ar9170_usb_upload(aru, aru->init_values->data,
553 aru->init_values->size, 0x102800, false); 644 aru->init_values->size, 0x102800, false);
@@ -557,6 +648,8 @@ static int ar9170_usb_upload_firmware(struct ar9170_usb *aru)
557 return err; 648 return err;
558 } 649 }
559 650
651upload_fw_start:
652
560 /* Then, upload the firmware itself and start it */ 653 /* Then, upload the firmware itself and start it */
561 return ar9170_usb_upload(aru, aru->firmware->data, aru->firmware->size, 654 return ar9170_usb_upload(aru, aru->firmware->data, aru->firmware->size,
562 0x200000, true); 655 0x200000, true);
@@ -592,10 +685,8 @@ static void ar9170_usb_stop(struct ar9170 *ar)
592 if (IS_ACCEPTING_CMD(ar)) 685 if (IS_ACCEPTING_CMD(ar))
593 aru->common.state = AR9170_STOPPED; 686 aru->common.state = AR9170_STOPPED;
594 687
595 /* lets wait a while until the tx - queues are dried out */ 688 ret = ar9170_usb_flush(ar);
596 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted, 689 if (ret)
597 msecs_to_jiffies(1000));
598 if (ret == 0)
599 dev_err(&aru->udev->dev, "kill pending tx urbs.\n"); 690 dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
600 691
601 usb_poison_anchored_urbs(&aru->tx_submitted); 692 usb_poison_anchored_urbs(&aru->tx_submitted);
@@ -656,6 +747,15 @@ err_out:
656 return err; 747 return err;
657} 748}
658 749
750static bool ar9170_requires_one_stage(const struct usb_device_id *id)
751{
752 if (!id->driver_info)
753 return false;
754 if (id->driver_info == AR9170_REQ_FW1_ONLY)
755 return true;
756 return false;
757}
758
659static int ar9170_usb_probe(struct usb_interface *intf, 759static int ar9170_usb_probe(struct usb_interface *intf,
660 const struct usb_device_id *id) 760 const struct usb_device_id *id)
661{ 761{
@@ -676,14 +776,22 @@ static int ar9170_usb_probe(struct usb_interface *intf,
676 aru->intf = intf; 776 aru->intf = intf;
677 ar = &aru->common; 777 ar = &aru->common;
678 778
779 aru->req_one_stage_fw = ar9170_requires_one_stage(id);
780
679 usb_set_intfdata(intf, aru); 781 usb_set_intfdata(intf, aru);
680 SET_IEEE80211_DEV(ar->hw, &udev->dev); 782 SET_IEEE80211_DEV(ar->hw, &udev->dev);
681 783
682 init_usb_anchor(&aru->rx_submitted); 784 init_usb_anchor(&aru->rx_submitted);
785 init_usb_anchor(&aru->tx_pending);
683 init_usb_anchor(&aru->tx_submitted); 786 init_usb_anchor(&aru->tx_submitted);
684 init_completion(&aru->cmd_wait); 787 init_completion(&aru->cmd_wait);
788 spin_lock_init(&aru->tx_urb_lock);
789
790 aru->tx_pending_urbs = 0;
791 aru->tx_submitted_urbs = 0;
685 792
686 aru->common.stop = ar9170_usb_stop; 793 aru->common.stop = ar9170_usb_stop;
794 aru->common.flush = ar9170_usb_flush;
687 aru->common.open = ar9170_usb_open; 795 aru->common.open = ar9170_usb_open;
688 aru->common.tx = ar9170_usb_tx; 796 aru->common.tx = ar9170_usb_tx;
689 aru->common.exec_cmd = ar9170_usb_exec_cmd; 797 aru->common.exec_cmd = ar9170_usb_exec_cmd;
@@ -691,7 +799,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
691 799
692#ifdef CONFIG_PM 800#ifdef CONFIG_PM
693 udev->reset_resume = 1; 801 udev->reset_resume = 1;
694#endif 802#endif /* CONFIG_PM */
695 err = ar9170_usb_reset(aru); 803 err = ar9170_usb_reset(aru);
696 if (err) 804 if (err)
697 goto err_freehw; 805 goto err_freehw;
@@ -776,11 +884,6 @@ static int ar9170_resume(struct usb_interface *intf)
776 usb_unpoison_anchored_urbs(&aru->rx_submitted); 884 usb_unpoison_anchored_urbs(&aru->rx_submitted);
777 usb_unpoison_anchored_urbs(&aru->tx_submitted); 885 usb_unpoison_anchored_urbs(&aru->tx_submitted);
778 886
779 /*
780 * FIXME: firmware upload will fail on resume.
781 * but this is better than a hang!
782 */
783
784 err = ar9170_usb_init_device(aru); 887 err = ar9170_usb_init_device(aru);
785 if (err) 888 if (err)
786 goto err_unrx; 889 goto err_unrx;
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index ac42586495d8..d098f4d5d2f2 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -51,6 +51,7 @@
51#include "ar9170.h" 51#include "ar9170.h"
52 52
53#define AR9170_NUM_RX_URBS 16 53#define AR9170_NUM_RX_URBS 16
54#define AR9170_NUM_TX_URBS 8
54 55
55struct firmware; 56struct firmware;
56 57
@@ -60,9 +61,15 @@ struct ar9170_usb {
60 struct usb_interface *intf; 61 struct usb_interface *intf;
61 62
62 struct usb_anchor rx_submitted; 63 struct usb_anchor rx_submitted;
64 struct usb_anchor tx_pending;
63 struct usb_anchor tx_submitted; 65 struct usb_anchor tx_submitted;
64 66
65 spinlock_t cmdlock; 67 bool req_one_stage_fw;
68
69 spinlock_t tx_urb_lock;
70 unsigned int tx_submitted_urbs;
71 unsigned int tx_pending_urbs;
72
66 struct completion cmd_wait; 73 struct completion cmd_wait;
67 int readlen; 74 int readlen;
68 u8 *readbuf; 75 u8 *readbuf;
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 84a74c5248e5..090dc6d268a3 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -11,5 +11,6 @@ ath5k-y += reset.o
11ath5k-y += attach.o 11ath5k-y += attach.o
12ath5k-y += base.o 12ath5k-y += base.o
13ath5k-y += led.o 13ath5k-y += led.o
14ath5k-y += rfkill.o
14ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o 15ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
15obj-$(CONFIG_ATH5K) += ath5k.o 16obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 813718210338..6358233bac99 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1256,6 +1256,10 @@ extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
1256extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val); 1256extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
1257extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level); 1257extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level);
1258 1258
1259/* rfkill Functions */
1260extern void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
1261extern void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
1262
1259/* Misc functions */ 1263/* Misc functions */
1260int ath5k_hw_set_capabilities(struct ath5k_hw *ah); 1264int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
1261extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result); 1265extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index fb5193764afa..55f7de09d134 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2070,6 +2070,13 @@ err_unmap:
2070 return ret; 2070 return ret;
2071} 2071}
2072 2072
2073static void ath5k_beacon_disable(struct ath5k_softc *sc)
2074{
2075 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2076 ath5k_hw_set_imr(sc->ah, sc->imask);
2077 ath5k_hw_stop_tx_dma(sc->ah, sc->bhalq);
2078}
2079
2073/* 2080/*
2074 * Transmit a beacon frame at SWBA. Dynamic updates to the 2081 * Transmit a beacon frame at SWBA. Dynamic updates to the
2075 * frame contents are done as needed and the slot time is 2082 * frame contents are done as needed and the slot time is
@@ -2353,6 +2360,8 @@ ath5k_init(struct ath5k_softc *sc)
2353 if (ret) 2360 if (ret)
2354 goto done; 2361 goto done;
2355 2362
2363 ath5k_rfkill_hw_start(ah);
2364
2356 /* 2365 /*
2357 * Reset the key cache since some parts do not reset the 2366 * Reset the key cache since some parts do not reset the
2358 * contents on initial power up or resume from suspend. 2367 * contents on initial power up or resume from suspend.
@@ -2461,6 +2470,8 @@ ath5k_stop_hw(struct ath5k_softc *sc)
2461 tasklet_kill(&sc->restq); 2470 tasklet_kill(&sc->restq);
2462 tasklet_kill(&sc->beacontq); 2471 tasklet_kill(&sc->beacontq);
2463 2472
2473 ath5k_rfkill_hw_stop(sc->ah);
2474
2464 return ret; 2475 return ret;
2465} 2476}
2466 2477
@@ -2519,6 +2530,9 @@ ath5k_intr(int irq, void *dev_id)
2519 */ 2530 */
2520 ath5k_hw_update_mib_counters(ah, &sc->ll_stats); 2531 ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
2521 } 2532 }
2533 if (status & AR5K_INT_GPIO)
2534 tasklet_schedule(&sc->rf_kill.toggleq);
2535
2522 } 2536 }
2523 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); 2537 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2524 2538
@@ -2757,6 +2771,7 @@ ath5k_remove_interface(struct ieee80211_hw *hw,
2757 goto end; 2771 goto end;
2758 2772
2759 ath5k_hw_set_lladdr(sc->ah, mac); 2773 ath5k_hw_set_lladdr(sc->ah, mac);
2774 ath5k_beacon_disable(sc);
2760 sc->vif = NULL; 2775 sc->vif = NULL;
2761end: 2776end:
2762 mutex_unlock(&sc->lock); 2777 mutex_unlock(&sc->lock);
@@ -2775,11 +2790,9 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
2775 2790
2776 mutex_lock(&sc->lock); 2791 mutex_lock(&sc->lock);
2777 2792
2778 sc->bintval = conf->beacon_int;
2779
2780 ret = ath5k_chan_set(sc, conf->channel); 2793 ret = ath5k_chan_set(sc, conf->channel);
2781 if (ret < 0) 2794 if (ret < 0)
2782 return ret; 2795 goto unlock;
2783 2796
2784 if ((changed & IEEE80211_CONF_CHANGE_POWER) && 2797 if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
2785 (sc->power_level != conf->power_level)) { 2798 (sc->power_level != conf->power_level)) {
@@ -2808,8 +2821,9 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
2808 */ 2821 */
2809 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); 2822 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
2810 2823
2824unlock:
2811 mutex_unlock(&sc->lock); 2825 mutex_unlock(&sc->lock);
2812 return 0; 2826 return ret;
2813} 2827}
2814 2828
2815#define SUPPORTED_FIF_FLAGS \ 2829#define SUPPORTED_FIF_FLAGS \
@@ -3061,7 +3075,14 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
3061{ 3075{
3062 int ret; 3076 int ret;
3063 struct ath5k_softc *sc = hw->priv; 3077 struct ath5k_softc *sc = hw->priv;
3064 struct sk_buff *skb = ieee80211_beacon_get(hw, vif); 3078 struct sk_buff *skb;
3079
3080 if (WARN_ON(!vif)) {
3081 ret = -EINVAL;
3082 goto out;
3083 }
3084
3085 skb = ieee80211_beacon_get(hw, vif);
3065 3086
3066 if (!skb) { 3087 if (!skb) {
3067 ret = -ENOMEM; 3088 ret = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 852b2c189fd8..f9b7f2f819b7 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -46,6 +46,7 @@
46#include <linux/wireless.h> 46#include <linux/wireless.h>
47#include <linux/if_ether.h> 47#include <linux/if_ether.h>
48#include <linux/leds.h> 48#include <linux/leds.h>
49#include <linux/rfkill.h>
49 50
50#include "ath5k.h" 51#include "ath5k.h"
51#include "debug.h" 52#include "debug.h"
@@ -91,6 +92,15 @@ struct ath5k_led
91 struct led_classdev led_dev; /* led classdev */ 92 struct led_classdev led_dev; /* led classdev */
92}; 93};
93 94
95/* Rfkill */
96struct ath5k_rfkill {
97 /* GPIO PIN for rfkill */
98 u16 gpio;
99 /* polarity of rfkill GPIO PIN */
100 bool polarity;
101 /* RFKILL toggle tasklet */
102 struct tasklet_struct toggleq;
103};
94 104
95#if CHAN_DEBUG 105#if CHAN_DEBUG
96#define ATH_CHAN_MAX (26+26+26+200+200) 106#define ATH_CHAN_MAX (26+26+26+200+200)
@@ -167,6 +177,8 @@ struct ath5k_softc {
167 struct tasklet_struct txtq; /* tx intr tasklet */ 177 struct tasklet_struct txtq; /* tx intr tasklet */
168 struct ath5k_led tx_led; /* tx led */ 178 struct ath5k_led tx_led; /* tx led */
169 179
180 struct ath5k_rfkill rf_kill;
181
170 spinlock_t block; /* protects beacon */ 182 spinlock_t block; /* protects beacon */
171 struct tasklet_struct beacontq; /* beacon intr tasklet */ 183 struct tasklet_struct beacontq; /* beacon intr tasklet */
172 struct ath5k_buf *bbuf; /* beacon buffer */ 184 struct ath5k_buf *bbuf; /* beacon buffer */
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 66067733ddd3..bd0a97a38d34 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -1304,23 +1304,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1304 if (ah->ah_version != AR5K_AR5210) 1304 if (ah->ah_version != AR5K_AR5210)
1305 ath5k_hw_set_imr(ah, ah->ah_imr); 1305 ath5k_hw_set_imr(ah, ah->ah_imr);
1306 1306
1307 /*
1308 * Setup RFKill interrupt if rfkill flag is set on eeprom.
1309 * TODO: Use gpio pin and polarity infos from eeprom
1310 * TODO: Handle this in ath5k_intr because it'll result
1311 * a nasty interrupt storm.
1312 */
1313#if 0
1314 if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) {
1315 ath5k_hw_set_gpio_input(ah, 0);
1316 ah->ah_gpio[0] = ath5k_hw_get_gpio(ah, 0);
1317 if (ah->ah_gpio[0] == 0)
1318 ath5k_hw_set_gpio_intr(ah, 0, 1);
1319 else
1320 ath5k_hw_set_gpio_intr(ah, 0, 0);
1321 }
1322#endif
1323
1324 /* Enable 32KHz clock function for AR5212+ chips 1307 /* Enable 32KHz clock function for AR5212+ chips
1325 * Set clocks to 32KHz operation and use an 1308 * Set clocks to 32KHz operation and use an
1326 * external 32KHz crystal when sleeping if one 1309 * external 32KHz crystal when sleeping if one
diff --git a/drivers/net/wireless/ath/ath5k/rfkill.c b/drivers/net/wireless/ath/ath5k/rfkill.c
new file mode 100644
index 000000000000..41a877b73fce
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/rfkill.c
@@ -0,0 +1,121 @@
1/*
2 * RFKILL support for ath5k
3 *
4 * Copyright (c) 2009 Tobias Doerffel <tobias.doerffel@gmail.com>
5 *
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 * redistribution must be conditioned upon including a substantially
17 * similar Disclaimer requirement for further binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * NO WARRANTY
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGES.
34 */
35
36#include "base.h"
37
38
39static inline void ath5k_rfkill_disable(struct ath5k_softc *sc)
40{
41 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
42 sc->rf_kill.gpio, sc->rf_kill.polarity);
43 ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio);
44 ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, !sc->rf_kill.polarity);
45}
46
47
48static inline void ath5k_rfkill_enable(struct ath5k_softc *sc)
49{
50 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
51 sc->rf_kill.gpio, sc->rf_kill.polarity);
52 ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio);
53 ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, sc->rf_kill.polarity);
54}
55
56static inline void ath5k_rfkill_set_intr(struct ath5k_softc *sc, bool enable)
57{
58 struct ath5k_hw *ah = sc->ah;
59 u32 curval;
60
61 ath5k_hw_set_gpio_input(ah, sc->rf_kill.gpio);
62 curval = ath5k_hw_get_gpio(ah, sc->rf_kill.gpio);
63 ath5k_hw_set_gpio_intr(ah, sc->rf_kill.gpio, enable ?
64 !!curval : !curval);
65}
66
67static bool
68ath5k_is_rfkill_set(struct ath5k_softc *sc)
69{
70 /* configuring GPIO for input for some reason disables rfkill */
71 /*ath5k_hw_set_gpio_input(sc->ah, sc->rf_kill.gpio);*/
72 return ath5k_hw_get_gpio(sc->ah, sc->rf_kill.gpio) ==
73 sc->rf_kill.polarity;
74}
75
76static void
77ath5k_tasklet_rfkill_toggle(unsigned long data)
78{
79 struct ath5k_softc *sc = (void *)data;
80 bool blocked;
81
82 blocked = ath5k_is_rfkill_set(sc);
83 wiphy_rfkill_set_hw_state(sc->hw->wiphy, blocked);
84}
85
86
87void
88ath5k_rfkill_hw_start(struct ath5k_hw *ah)
89{
90 struct ath5k_softc *sc = ah->ah_sc;
91
92 /* read rfkill GPIO configuration from EEPROM header */
93 sc->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
94 sc->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
95
96 tasklet_init(&sc->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
97 (unsigned long)sc);
98
99 ath5k_rfkill_disable(sc);
100
101 /* enable interrupt for rfkill switch */
102 if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
103 ath5k_rfkill_set_intr(sc, true);
104}
105
106
107void
108ath5k_rfkill_hw_stop(struct ath5k_hw *ah)
109{
110 struct ath5k_softc *sc = ah->ah_sc;
111
112 /* disable interrupt for rfkill switch */
113 if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
114 ath5k_rfkill_set_intr(sc, false);
115
116 tasklet_kill(&sc->rf_kill.toggleq);
117
118 /* enable RFKILL when stopping HW so Wifi LED is turned off */
119 ath5k_rfkill_enable(sc);
120}
121
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 796a3adffea0..515880aa2116 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -460,12 +460,9 @@ struct ath_led {
460 bool registered; 460 bool registered;
461}; 461};
462 462
463/* Rfkill */
464#define ATH_RFKILL_POLL_INTERVAL 2000 /* msecs */
465
466struct ath_rfkill { 463struct ath_rfkill {
467 struct rfkill *rfkill; 464 struct rfkill *rfkill;
468 struct delayed_work rfkill_poll; 465 struct rfkill_ops ops;
469 char rfkill_name[32]; 466 char rfkill_name[32];
470}; 467};
471 468
@@ -509,8 +506,6 @@ struct ath_rfkill {
509#define SC_OP_RXFLUSH BIT(7) 506#define SC_OP_RXFLUSH BIT(7)
510#define SC_OP_LED_ASSOCIATED BIT(8) 507#define SC_OP_LED_ASSOCIATED BIT(8)
511#define SC_OP_RFKILL_REGISTERED BIT(9) 508#define SC_OP_RFKILL_REGISTERED BIT(9)
512#define SC_OP_RFKILL_SW_BLOCKED BIT(10)
513#define SC_OP_RFKILL_HW_BLOCKED BIT(11)
514#define SC_OP_WAIT_FOR_BEACON BIT(12) 509#define SC_OP_WAIT_FOR_BEACON BIT(12)
515#define SC_OP_LED_ON BIT(13) 510#define SC_OP_LED_ON BIT(13)
516#define SC_OP_SCANNING BIT(14) 511#define SC_OP_SCANNING BIT(14)
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index a21b21339fbc..3639a2e6987d 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -411,6 +411,7 @@ void ath_beacon_tasklet(unsigned long data)
411 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 411 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
412 DPRINTF(sc, ATH_DBG_BEACON, 412 DPRINTF(sc, ATH_DBG_BEACON,
413 "beacon is officially stuck\n"); 413 "beacon is officially stuck\n");
414 sc->sc_flags |= SC_OP_TSF_RESET;
414 ath_reset(sc, false); 415 ath_reset(sc, false);
415 } 416 }
416 417
@@ -673,6 +674,14 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
673 674
674 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD; 675 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
675 676
677 /*
678 * It looks like mac80211 may end up using beacon interval of zero in
679 * some cases (at least for mesh point). Avoid getting into an
680 * infinite loop by using a bit safer value instead..
681 */
682 if (intval == 0)
683 intval = 100;
684
676 /* Pull nexttbtt forward to reflect the current TSF */ 685 /* Pull nexttbtt forward to reflect the current TSF */
677 686
678 nexttbtt = TSF_TO_TU(sc->beacon.bc_tstamp >> 32, sc->beacon.bc_tstamp); 687 nexttbtt = TSF_TO_TU(sc->beacon.bc_tstamp >> 32, sc->beacon.bc_tstamp);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 97df20cbf528..6d20725d6451 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -44,6 +44,44 @@ static int ath9k_debugfs_open(struct inode *inode, struct file *file)
44 return 0; 44 return 0;
45} 45}
46 46
47static ssize_t read_file_debug(struct file *file, char __user *user_buf,
48 size_t count, loff_t *ppos)
49{
50 struct ath_softc *sc = file->private_data;
51 char buf[32];
52 unsigned int len;
53
54 len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.debug_mask);
55 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
56}
57
58static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
59 size_t count, loff_t *ppos)
60{
61 struct ath_softc *sc = file->private_data;
62 unsigned long mask;
63 char buf[32];
64 ssize_t len;
65
66 len = min(count, sizeof(buf) - 1);
67 if (copy_from_user(buf, user_buf, len))
68 return -EINVAL;
69
70 buf[len] = '\0';
71 if (strict_strtoul(buf, 0, &mask))
72 return -EINVAL;
73
74 sc->debug.debug_mask = mask;
75 return count;
76}
77
78static const struct file_operations fops_debug = {
79 .read = read_file_debug,
80 .write = write_file_debug,
81 .open = ath9k_debugfs_open,
82 .owner = THIS_MODULE
83};
84
47static ssize_t read_file_dma(struct file *file, char __user *user_buf, 85static ssize_t read_file_dma(struct file *file, char __user *user_buf,
48 size_t count, loff_t *ppos) 86 size_t count, loff_t *ppos)
49{ 87{
@@ -224,111 +262,66 @@ static const struct file_operations fops_interrupt = {
224 .owner = THIS_MODULE 262 .owner = THIS_MODULE
225}; 263};
226 264
227static void ath_debug_stat_11n_rc(struct ath_softc *sc, struct sk_buff *skb) 265void ath_debug_stat_rc(struct ath_softc *sc, struct sk_buff *skb)
228{
229 struct ath_tx_info_priv *tx_info_priv = NULL;
230 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
231 struct ieee80211_tx_rate *rates = tx_info->status.rates;
232 int final_ts_idx, idx;
233
234 tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
235 final_ts_idx = tx_info_priv->tx.ts_rateindex;
236 idx = sc->cur_rate_table->info[rates[final_ts_idx].idx].dot11rate;
237
238 sc->debug.stats.n_rcstats[idx].success++;
239}
240
241static void ath_debug_stat_legacy_rc(struct ath_softc *sc, struct sk_buff *skb)
242{ 266{
243 struct ath_tx_info_priv *tx_info_priv = NULL; 267 struct ath_tx_info_priv *tx_info_priv = NULL;
244 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 268 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
245 struct ieee80211_tx_rate *rates = tx_info->status.rates; 269 struct ieee80211_tx_rate *rates = tx_info->status.rates;
246 int final_ts_idx, idx; 270 int final_ts_idx, idx;
271 struct ath_rc_stats *stats;
247 272
248 tx_info_priv = ATH_TX_INFO_PRIV(tx_info); 273 tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
249 final_ts_idx = tx_info_priv->tx.ts_rateindex; 274 final_ts_idx = tx_info_priv->tx.ts_rateindex;
250 idx = rates[final_ts_idx].idx; 275 idx = rates[final_ts_idx].idx;
251 276 stats = &sc->debug.stats.rcstats[idx];
252 sc->debug.stats.legacy_rcstats[idx].success++; 277 stats->success++;
253}
254
255void ath_debug_stat_rc(struct ath_softc *sc, struct sk_buff *skb)
256{
257 if (conf_is_ht(&sc->hw->conf))
258 ath_debug_stat_11n_rc(sc, skb);
259 else
260 ath_debug_stat_legacy_rc(sc, skb);
261} 278}
262 279
263/* FIXME: legacy rates, later on .. */
264void ath_debug_stat_retries(struct ath_softc *sc, int rix, 280void ath_debug_stat_retries(struct ath_softc *sc, int rix,
265 int xretries, int retries, u8 per) 281 int xretries, int retries, u8 per)
266{ 282{
267 if (conf_is_ht(&sc->hw->conf)) { 283 struct ath_rc_stats *stats = &sc->debug.stats.rcstats[rix];
268 int idx = sc->cur_rate_table->info[rix].dot11rate;
269 284
270 sc->debug.stats.n_rcstats[idx].xretries += xretries; 285 stats->xretries += xretries;
271 sc->debug.stats.n_rcstats[idx].retries += retries; 286 stats->retries += retries;
272 sc->debug.stats.n_rcstats[idx].per = per; 287 stats->per = per;
273 }
274} 288}
275 289
276static ssize_t ath_read_file_stat_11n_rc(struct file *file, 290static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
277 char __user *user_buf, 291 size_t count, loff_t *ppos)
278 size_t count, loff_t *ppos)
279{ 292{
280 struct ath_softc *sc = file->private_data; 293 struct ath_softc *sc = file->private_data;
281 char buf[1024]; 294 char *buf;
282 unsigned int len = 0; 295 unsigned int len = 0, max;
283 int i = 0; 296 int i = 0;
297 ssize_t retval;
284 298
285 len += sprintf(buf, "%7s %13s %8s %8s %6s\n\n", "Rate", "Success", 299 if (sc->cur_rate_table == NULL)
286 "Retries", "XRetries", "PER"); 300 return 0;
287
288 for (i = 0; i <= 15; i++) {
289 len += snprintf(buf + len, sizeof(buf) - len,
290 "%5s%3d: %8u %8u %8u %8u\n", "MCS", i,
291 sc->debug.stats.n_rcstats[i].success,
292 sc->debug.stats.n_rcstats[i].retries,
293 sc->debug.stats.n_rcstats[i].xretries,
294 sc->debug.stats.n_rcstats[i].per);
295 }
296
297 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
298}
299 301
300static ssize_t ath_read_file_stat_legacy_rc(struct file *file, 302 max = 80 + sc->cur_rate_table->rate_cnt * 64;
301 char __user *user_buf, 303 buf = kmalloc(max + 1, GFP_KERNEL);
302 size_t count, loff_t *ppos) 304 if (buf == NULL)
303{ 305 return 0;
304 struct ath_softc *sc = file->private_data; 306 buf[max] = 0;
305 char buf[512];
306 unsigned int len = 0;
307 int i = 0;
308 307
309 len += sprintf(buf, "%7s %13s\n\n", "Rate", "Success"); 308 len += sprintf(buf, "%5s %15s %8s %9s %3s\n\n", "Rate", "Success",
309 "Retries", "XRetries", "PER");
310 310
311 for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) { 311 for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
312 len += snprintf(buf + len, sizeof(buf) - len, "%5u: %12u\n", 312 u32 ratekbps = sc->cur_rate_table->info[i].ratekbps;
313 sc->cur_rate_table->info[i].ratekbps / 1000, 313 struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i];
314 sc->debug.stats.legacy_rcstats[i].success); 314
315 len += snprintf(buf + len, max - len,
316 "%3u.%d: %8u %8u %8u %8u\n", ratekbps / 1000,
317 (ratekbps % 1000) / 100, stats->success,
318 stats->retries, stats->xretries,
319 stats->per);
315 } 320 }
316 321
317 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 322 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
318} 323 kfree(buf);
319 324 return retval;
320static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
321 size_t count, loff_t *ppos)
322{
323 struct ath_softc *sc = file->private_data;
324
325 if (sc->cur_rate_table == NULL)
326 return 0;
327
328 if (conf_is_ht(&sc->hw->conf))
329 return ath_read_file_stat_11n_rc(file, user_buf, count, ppos);
330 else
331 return ath_read_file_stat_legacy_rc(file, user_buf, count ,ppos);
332} 325}
333 326
334static const struct file_operations fops_rcstat = { 327static const struct file_operations fops_rcstat = {
@@ -506,6 +499,11 @@ int ath9k_init_debug(struct ath_softc *sc)
506 if (!sc->debug.debugfs_phy) 499 if (!sc->debug.debugfs_phy)
507 goto err; 500 goto err;
508 501
502 sc->debug.debugfs_debug = debugfs_create_file("debug",
503 S_IRUGO | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug);
504 if (!sc->debug.debugfs_debug)
505 goto err;
506
509 sc->debug.debugfs_dma = debugfs_create_file("dma", S_IRUGO, 507 sc->debug.debugfs_dma = debugfs_create_file("dma", S_IRUGO,
510 sc->debug.debugfs_phy, sc, &fops_dma); 508 sc->debug.debugfs_phy, sc, &fops_dma);
511 if (!sc->debug.debugfs_dma) 509 if (!sc->debug.debugfs_dma)
@@ -543,6 +541,7 @@ void ath9k_exit_debug(struct ath_softc *sc)
543 debugfs_remove(sc->debug.debugfs_rcstat); 541 debugfs_remove(sc->debug.debugfs_rcstat);
544 debugfs_remove(sc->debug.debugfs_interrupt); 542 debugfs_remove(sc->debug.debugfs_interrupt);
545 debugfs_remove(sc->debug.debugfs_dma); 543 debugfs_remove(sc->debug.debugfs_dma);
544 debugfs_remove(sc->debug.debugfs_debug);
546 debugfs_remove(sc->debug.debugfs_phy); 545 debugfs_remove(sc->debug.debugfs_phy);
547} 546}
548 547
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index db845cf960c9..edda15bf2c15 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -80,11 +80,7 @@ struct ath_interrupt_stats {
80 u32 dtim; 80 u32 dtim;
81}; 81};
82 82
83struct ath_legacy_rc_stats { 83struct ath_rc_stats {
84 u32 success;
85};
86
87struct ath_11n_rc_stats {
88 u32 success; 84 u32 success;
89 u32 retries; 85 u32 retries;
90 u32 xretries; 86 u32 xretries;
@@ -93,13 +89,13 @@ struct ath_11n_rc_stats {
93 89
94struct ath_stats { 90struct ath_stats {
95 struct ath_interrupt_stats istats; 91 struct ath_interrupt_stats istats;
96 struct ath_legacy_rc_stats legacy_rcstats[12]; /* max(11a,11b,11g) */ 92 struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
97 struct ath_11n_rc_stats n_rcstats[16]; /* 0..15 MCS rates */
98}; 93};
99 94
100struct ath9k_debug { 95struct ath9k_debug {
101 int debug_mask; 96 int debug_mask;
102 struct dentry *debugfs_phy; 97 struct dentry *debugfs_phy;
98 struct dentry *debugfs_debug;
103 struct dentry *debugfs_dma; 99 struct dentry *debugfs_dma;
104 struct dentry *debugfs_interrupt; 100 struct dentry *debugfs_interrupt;
105 struct dentry *debugfs_rcstat; 101 struct dentry *debugfs_rcstat;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 61da08a1648c..f7baa406918b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1192,120 +1192,69 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
1192 ah->rfkill_polarity; 1192 ah->rfkill_polarity;
1193} 1193}
1194 1194
1195/* h/w rfkill poll function */ 1195/* s/w rfkill handlers */
1196static void ath_rfkill_poll(struct work_struct *work) 1196static int ath_rfkill_set_block(void *data, bool blocked)
1197{ 1197{
1198 struct ath_softc *sc = container_of(work, struct ath_softc, 1198 struct ath_softc *sc = data;
1199 rf_kill.rfkill_poll.work);
1200 bool radio_on;
1201
1202 if (sc->sc_flags & SC_OP_INVALID)
1203 return;
1204
1205 radio_on = !ath_is_rfkill_set(sc);
1206
1207 /*
1208 * enable/disable radio only when there is a
1209 * state change in RF switch
1210 */
1211 if (radio_on == !!(sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED)) {
1212 enum rfkill_state state;
1213
1214 if (sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED) {
1215 state = radio_on ? RFKILL_STATE_SOFT_BLOCKED
1216 : RFKILL_STATE_HARD_BLOCKED;
1217 } else if (radio_on) {
1218 ath_radio_enable(sc);
1219 state = RFKILL_STATE_UNBLOCKED;
1220 } else {
1221 ath_radio_disable(sc);
1222 state = RFKILL_STATE_HARD_BLOCKED;
1223 }
1224
1225 if (state == RFKILL_STATE_HARD_BLOCKED)
1226 sc->sc_flags |= SC_OP_RFKILL_HW_BLOCKED;
1227 else
1228 sc->sc_flags &= ~SC_OP_RFKILL_HW_BLOCKED;
1229 1199
1230 rfkill_force_state(sc->rf_kill.rfkill, state); 1200 if (blocked)
1231 } 1201 ath_radio_disable(sc);
1202 else
1203 ath_radio_enable(sc);
1232 1204
1233 queue_delayed_work(sc->hw->workqueue, &sc->rf_kill.rfkill_poll, 1205 return 0;
1234 msecs_to_jiffies(ATH_RFKILL_POLL_INTERVAL));
1235} 1206}
1236 1207
1237/* s/w rfkill handler */ 1208static void ath_rfkill_poll_state(struct rfkill *rfkill, void *data)
1238static int ath_sw_toggle_radio(void *data, enum rfkill_state state)
1239{ 1209{
1240 struct ath_softc *sc = data; 1210 struct ath_softc *sc = data;
1211 bool blocked = !!ath_is_rfkill_set(sc);
1241 1212
1242 switch (state) { 1213 if (rfkill_set_hw_state(rfkill, blocked))
1243 case RFKILL_STATE_SOFT_BLOCKED: 1214 ath_radio_disable(sc);
1244 if (!(sc->sc_flags & (SC_OP_RFKILL_HW_BLOCKED | 1215 else
1245 SC_OP_RFKILL_SW_BLOCKED))) 1216 ath_radio_enable(sc);
1246 ath_radio_disable(sc);
1247 sc->sc_flags |= SC_OP_RFKILL_SW_BLOCKED;
1248 return 0;
1249 case RFKILL_STATE_UNBLOCKED:
1250 if ((sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED)) {
1251 sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED;
1252 if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) {
1253 DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the"
1254 "radio as it is disabled by h/w\n");
1255 return -EPERM;
1256 }
1257 ath_radio_enable(sc);
1258 }
1259 return 0;
1260 default:
1261 return -EINVAL;
1262 }
1263} 1217}
1264 1218
1265/* Init s/w rfkill */ 1219/* Init s/w rfkill */
1266static int ath_init_sw_rfkill(struct ath_softc *sc) 1220static int ath_init_sw_rfkill(struct ath_softc *sc)
1267{ 1221{
1268 sc->rf_kill.rfkill = rfkill_allocate(wiphy_dev(sc->hw->wiphy), 1222 sc->rf_kill.ops.set_block = ath_rfkill_set_block;
1269 RFKILL_TYPE_WLAN); 1223 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1224 sc->rf_kill.ops.poll = ath_rfkill_poll_state;
1225
1226 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
1227 "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
1228
1229 sc->rf_kill.rfkill = rfkill_alloc(sc->rf_kill.rfkill_name,
1230 wiphy_dev(sc->hw->wiphy),
1231 RFKILL_TYPE_WLAN,
1232 &sc->rf_kill.ops, sc);
1270 if (!sc->rf_kill.rfkill) { 1233 if (!sc->rf_kill.rfkill) {
1271 DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n"); 1234 DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
1272 return -ENOMEM; 1235 return -ENOMEM;
1273 } 1236 }
1274 1237
1275 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
1276 "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
1277 sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name;
1278 sc->rf_kill.rfkill->data = sc;
1279 sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio;
1280 sc->rf_kill.rfkill->state = RFKILL_STATE_UNBLOCKED;
1281
1282 return 0; 1238 return 0;
1283} 1239}
1284 1240
1285/* Deinitialize rfkill */ 1241/* Deinitialize rfkill */
1286static void ath_deinit_rfkill(struct ath_softc *sc) 1242static void ath_deinit_rfkill(struct ath_softc *sc)
1287{ 1243{
1288 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1289 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1290
1291 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) { 1244 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
1292 rfkill_unregister(sc->rf_kill.rfkill); 1245 rfkill_unregister(sc->rf_kill.rfkill);
1246 rfkill_destroy(sc->rf_kill.rfkill);
1293 sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED; 1247 sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
1294 sc->rf_kill.rfkill = NULL;
1295 } 1248 }
1296} 1249}
1297 1250
1298static int ath_start_rfkill_poll(struct ath_softc *sc) 1251static int ath_start_rfkill_poll(struct ath_softc *sc)
1299{ 1252{
1300 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1301 queue_delayed_work(sc->hw->workqueue,
1302 &sc->rf_kill.rfkill_poll, 0);
1303
1304 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) { 1253 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
1305 if (rfkill_register(sc->rf_kill.rfkill)) { 1254 if (rfkill_register(sc->rf_kill.rfkill)) {
1306 DPRINTF(sc, ATH_DBG_FATAL, 1255 DPRINTF(sc, ATH_DBG_FATAL,
1307 "Unable to register rfkill\n"); 1256 "Unable to register rfkill\n");
1308 rfkill_free(sc->rf_kill.rfkill); 1257 rfkill_destroy(sc->rf_kill.rfkill);
1309 1258
1310 /* Deinitialize the device */ 1259 /* Deinitialize the device */
1311 ath_cleanup(sc); 1260 ath_cleanup(sc);
@@ -1678,10 +1627,6 @@ int ath_attach(u16 devid, struct ath_softc *sc)
1678 goto error_attach; 1627 goto error_attach;
1679 1628
1680#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 1629#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1681 /* Initialze h/w Rfkill */
1682 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1683 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
1684
1685 /* Initialize s/w rfkill */ 1630 /* Initialize s/w rfkill */
1686 error = ath_init_sw_rfkill(sc); 1631 error = ath_init_sw_rfkill(sc);
1687 if (error) 1632 if (error)
@@ -2214,10 +2159,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2214 } else 2159 } else
2215 sc->rx.rxlink = NULL; 2160 sc->rx.rxlink = NULL;
2216 2161
2217#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 2162 rfkill_pause_polling(sc->rf_kill.rfkill);
2218 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 2163
2219 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
2220#endif
2221 /* disable HAL and put h/w to sleep */ 2164 /* disable HAL and put h/w to sleep */
2222 ath9k_hw_disable(sc->sc_ah); 2165 ath9k_hw_disable(sc->sc_ah);
2223 ath9k_hw_configpcipowersave(sc->sc_ah, 1); 2166 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 168411d322a2..ccdf20a2e9be 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -227,11 +227,6 @@ static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
227 227
228 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1); 228 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
229 229
230#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
231 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
232 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
233#endif
234
235 pci_save_state(pdev); 230 pci_save_state(pdev);
236 pci_disable_device(pdev); 231 pci_disable_device(pdev);
237 pci_set_power_state(pdev, PCI_D3hot); 232 pci_set_power_state(pdev, PCI_D3hot);
@@ -256,16 +251,6 @@ static int ath_pci_resume(struct pci_dev *pdev)
256 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 251 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
257 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1); 252 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
258 253
259#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
260 /*
261 * check the h/w rfkill state on resume
262 * and start the rfkill poll timer
263 */
264 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
265 queue_delayed_work(sc->hw->workqueue,
266 &sc->rf_kill.rfkill_poll, 0);
267#endif
268
269 return 0; 254 return 0;
270} 255}
271 256
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index a8def4fa449c..b61a071788a5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -711,6 +711,7 @@ int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
711 return 0; 711 return 0;
712 712
713 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 713 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
714 txtid->state &= ~AGGR_ADDBA_PROGRESS;
714 txtid->addba_exchangeattempts = 0; 715 txtid->addba_exchangeattempts = 0;
715 return 0; 716 return 0;
716 } 717 }
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 7a89f9fac7d4..eef370bd1211 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -366,11 +366,17 @@ static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
366 if (rd & COUNTRY_ERD_FLAG) { 366 if (rd & COUNTRY_ERD_FLAG) {
367 /* EEPROM value is a country code */ 367 /* EEPROM value is a country code */
368 u16 cc = rd & ~COUNTRY_ERD_FLAG; 368 u16 cc = rd & ~COUNTRY_ERD_FLAG;
369 printk(KERN_DEBUG
370 "ath: EEPROM indicates we should expect "
371 "a country code\n");
369 for (i = 0; i < ARRAY_SIZE(allCountries); i++) 372 for (i = 0; i < ARRAY_SIZE(allCountries); i++)
370 if (allCountries[i].countryCode == cc) 373 if (allCountries[i].countryCode == cc)
371 return true; 374 return true;
372 } else { 375 } else {
373 /* EEPROM value is a regpair value */ 376 /* EEPROM value is a regpair value */
377 if (rd != CTRY_DEFAULT)
378 printk(KERN_DEBUG "ath: EEPROM indicates we "
379 "should expect a direct regpair map\n");
374 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) 380 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
375 if (regDomainPairs[i].regDmnEnum == rd) 381 if (regDomainPairs[i].regDmnEnum == rd)
376 return true; 382 return true;
@@ -477,6 +483,11 @@ ath_regd_init(struct ath_regulatory *reg,
477 struct country_code_to_enum_rd *country = NULL; 483 struct country_code_to_enum_rd *country = NULL;
478 u16 regdmn; 484 u16 regdmn;
479 485
486 if (!reg)
487 return -EINVAL;
488
489 printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd);
490
480 if (!ath_regd_is_eeprom_valid(reg)) { 491 if (!ath_regd_is_eeprom_valid(reg)) {
481 printk(KERN_ERR "ath: Invalid EEPROM contents\n"); 492 printk(KERN_ERR "ath: Invalid EEPROM contents\n");
482 return -EINVAL; 493 return -EINVAL;
@@ -486,20 +497,30 @@ ath_regd_init(struct ath_regulatory *reg,
486 reg->country_code = ath_regd_get_default_country(regdmn); 497 reg->country_code = ath_regd_get_default_country(regdmn);
487 498
488 if (reg->country_code == CTRY_DEFAULT && 499 if (reg->country_code == CTRY_DEFAULT &&
489 regdmn == CTRY_DEFAULT) 500 regdmn == CTRY_DEFAULT) {
501 printk(KERN_DEBUG "ath: EEPROM indicates default "
502 "country code should be used\n");
490 reg->country_code = CTRY_UNITED_STATES; 503 reg->country_code = CTRY_UNITED_STATES;
504 }
491 505
492 if (reg->country_code == CTRY_DEFAULT) { 506 if (reg->country_code == CTRY_DEFAULT) {
493 country = NULL; 507 country = NULL;
494 } else { 508 } else {
509 printk(KERN_DEBUG "ath: doing EEPROM country->regdmn "
510 "map search\n");
495 country = ath_regd_find_country(reg->country_code); 511 country = ath_regd_find_country(reg->country_code);
496 if (country == NULL) { 512 if (country == NULL) {
497 printk(KERN_DEBUG 513 printk(KERN_DEBUG
498 "ath: Country is NULL!!!!, cc= %d\n", 514 "ath: no valid country maps found for "
515 "country code: 0x%0x\n",
499 reg->country_code); 516 reg->country_code);
500 return -EINVAL; 517 return -EINVAL;
501 } else 518 } else {
502 regdmn = country->regDmnEnum; 519 regdmn = country->regDmnEnum;
520 printk(KERN_DEBUG "ath: country maps to "
521 "regdmn code: 0x%0x\n",
522 regdmn);
523 }
503 } 524 }
504 525
505 reg->regpair = ath_get_regpair(regdmn); 526 reg->regpair = ath_get_regpair(regdmn);
@@ -523,7 +544,7 @@ ath_regd_init(struct ath_regulatory *reg,
523 544
524 printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n", 545 printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n",
525 reg->alpha2[0], reg->alpha2[1]); 546 reg->alpha2[0], reg->alpha2[1]);
526 printk(KERN_DEBUG "ath: Regpair detected: 0x%0x\n", 547 printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
527 reg->regpair->regDmnEnum); 548 reg->regpair->regDmnEnum);
528 549
529 ath_regd_init_wiphy(reg, wiphy, reg_notifier); 550 ath_regd_init_wiphy(reg, wiphy, reg_notifier);
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 21572e40b79d..67f564e37225 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -98,13 +98,6 @@ config B43_LEDS
98 depends on B43 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = B43) 98 depends on B43 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = B43)
99 default y 99 default y
100 100
101# This config option automatically enables b43 RFKILL support,
102# if it's possible.
103config B43_RFKILL
104 bool
105 depends on B43 && (RFKILL = y || RFKILL = B43) && RFKILL_INPUT && (INPUT_POLLDEV = y || INPUT_POLLDEV = B43)
106 default y
107
108# This config option automatically enables b43 HW-RNG support, 101# This config option automatically enables b43 HW-RNG support,
109# if the HW-RNG core is enabled. 102# if the HW-RNG core is enabled.
110config B43_HWRNG 103config B43_HWRNG
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 281ef8310350..da379f4b0c3a 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -13,7 +13,7 @@ b43-y += lo.o
13b43-y += wa.o 13b43-y += wa.o
14b43-y += dma.o 14b43-y += dma.o
15b43-$(CONFIG_B43_PIO) += pio.o 15b43-$(CONFIG_B43_PIO) += pio.o
16b43-$(CONFIG_B43_RFKILL) += rfkill.o 16b43-y += rfkill.o
17b43-$(CONFIG_B43_LEDS) += leds.o 17b43-$(CONFIG_B43_LEDS) += leds.o
18b43-$(CONFIG_B43_PCMCIA) += pcmcia.o 18b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
19b43-$(CONFIG_B43_DEBUG) += debugfs.o 19b43-$(CONFIG_B43_DEBUG) += debugfs.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 4e8ad841c3c5..f580c2812d91 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -163,6 +163,7 @@ enum {
163#define B43_SHM_SH_WLCOREREV 0x0016 /* 802.11 core revision */ 163#define B43_SHM_SH_WLCOREREV 0x0016 /* 802.11 core revision */
164#define B43_SHM_SH_PCTLWDPOS 0x0008 164#define B43_SHM_SH_PCTLWDPOS 0x0008
165#define B43_SHM_SH_RXPADOFF 0x0034 /* RX Padding data offset (PIO only) */ 165#define B43_SHM_SH_RXPADOFF 0x0034 /* RX Padding data offset (PIO only) */
166#define B43_SHM_SH_FWCAPA 0x0042 /* Firmware capabilities (Opensource firmware only) */
166#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */ 167#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */
167#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */ 168#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */
168#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */ 169#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */
@@ -297,6 +298,10 @@ enum {
297#define B43_HF_MLADVW 0x001000000000ULL /* N PHY ML ADV workaround (rev >= 13 only) */ 298#define B43_HF_MLADVW 0x001000000000ULL /* N PHY ML ADV workaround (rev >= 13 only) */
298#define B43_HF_PR45960W 0x080000000000ULL /* PR 45960 workaround (rev >= 13 only) */ 299#define B43_HF_PR45960W 0x080000000000ULL /* PR 45960 workaround (rev >= 13 only) */
299 300
301/* Firmware capabilities field in SHM (Opensource firmware only) */
302#define B43_FWCAPA_HWCRYPTO 0x0001
303#define B43_FWCAPA_QOS 0x0002
304
300/* MacFilter offsets. */ 305/* MacFilter offsets. */
301#define B43_MACFILTER_SELF 0x0000 306#define B43_MACFILTER_SELF 0x0000
302#define B43_MACFILTER_BSSID 0x0003 307#define B43_MACFILTER_BSSID 0x0003
@@ -596,6 +601,13 @@ struct b43_wl {
596 /* Pointer to the ieee80211 hardware data structure */ 601 /* Pointer to the ieee80211 hardware data structure */
597 struct ieee80211_hw *hw; 602 struct ieee80211_hw *hw;
598 603
604 /* The number of queues that were registered with the mac80211 subsystem
605 * initially. This is a backup copy of hw->queues in case hw->queues has
606 * to be dynamically lowered at runtime (Firmware does not support QoS).
607 * hw->queues has to be restored to the original value before unregistering
608 * from the mac80211 subsystem. */
609 u16 mac80211_initially_registered_queues;
610
599 struct mutex mutex; 611 struct mutex mutex;
600 spinlock_t irq_lock; 612 spinlock_t irq_lock;
601 /* R/W lock for data transmission. 613 /* R/W lock for data transmission.
@@ -631,9 +643,6 @@ struct b43_wl {
631 char rng_name[30 + 1]; 643 char rng_name[30 + 1];
632#endif /* CONFIG_B43_HWRNG */ 644#endif /* CONFIG_B43_HWRNG */
633 645
634 /* The RF-kill button */
635 struct b43_rfkill rfkill;
636
637 /* List of all wireless devices on this chip */ 646 /* List of all wireless devices on this chip */
638 struct list_head devlist; 647 struct list_head devlist;
639 u8 nr_devs; 648 u8 nr_devs;
@@ -752,6 +761,8 @@ struct b43_wldev {
752 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */ 761 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */
753 bool radio_hw_enable; /* saved state of radio hardware enabled state */ 762 bool radio_hw_enable; /* saved state of radio hardware enabled state */
754 bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */ 763 bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */
764 bool qos_enabled; /* TRUE, if QoS is used. */
765 bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
755 766
756 /* PHY/Radio device. */ 767 /* PHY/Radio device. */
757 struct b43_phy phy; 768 struct b43_phy phy;
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index eae680b53052..7964cc32b258 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1285,7 +1285,7 @@ static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1285{ 1285{
1286 struct b43_dmaring *ring; 1286 struct b43_dmaring *ring;
1287 1287
1288 if (b43_modparam_qos) { 1288 if (dev->qos_enabled) {
1289 /* 0 = highest priority */ 1289 /* 0 = highest priority */
1290 switch (queue_prio) { 1290 switch (queue_prio) {
1291 default: 1291 default:
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index 76f4c7bad8b8..c8b317094c31 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -28,6 +28,7 @@
28 28
29#include "b43.h" 29#include "b43.h"
30#include "leds.h" 30#include "leds.h"
31#include "rfkill.h"
31 32
32 33
33static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index, 34static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index,
@@ -87,7 +88,7 @@ static void b43_led_brightness_set(struct led_classdev *led_dev,
87} 88}
88 89
89static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, 90static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
90 const char *name, char *default_trigger, 91 const char *name, const char *default_trigger,
91 u8 led_index, bool activelow) 92 u8 led_index, bool activelow)
92{ 93{
93 int err; 94 int err;
@@ -164,10 +165,10 @@ static void b43_map_led(struct b43_wldev *dev,
164 snprintf(name, sizeof(name), 165 snprintf(name, sizeof(name),
165 "b43-%s::radio", wiphy_name(hw->wiphy)); 166 "b43-%s::radio", wiphy_name(hw->wiphy));
166 b43_register_led(dev, &dev->led_radio, name, 167 b43_register_led(dev, &dev->led_radio, name,
167 b43_rfkill_led_name(dev), 168 ieee80211_get_radio_led_name(hw),
168 led_index, activelow); 169 led_index, activelow);
169 /* Sync the RF-kill LED state with the switch state. */ 170 /* Sync the RF-kill LED state with radio and switch states. */
170 if (dev->radio_hw_enable) 171 if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev))
171 b43_led_turn_on(dev, led_index, activelow); 172 b43_led_turn_on(dev, led_index, activelow);
172 break; 173 break;
173 case B43_LED_WEIRD: 174 case B43_LED_WEIRD:
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index cb4a8712946a..6456afebdba1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -80,8 +80,8 @@ static int modparam_nohwcrypt;
80module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); 80module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
81MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 81MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
82 82
83int b43_modparam_qos = 1; 83static int modparam_qos = 1;
84module_param_named(qos, b43_modparam_qos, int, 0444); 84module_param_named(qos, modparam_qos, int, 0444);
85MODULE_PARM_DESC(qos, "Enable QOS support (default on)"); 85MODULE_PARM_DESC(qos, "Enable QOS support (default on)");
86 86
87static int modparam_btcoex = 1; 87static int modparam_btcoex = 1;
@@ -538,6 +538,13 @@ void b43_hf_write(struct b43_wldev *dev, u64 value)
538 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi); 538 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi);
539} 539}
540 540
541/* Read the firmware capabilities bitmask (Opensource firmware only) */
542static u16 b43_fwcapa_read(struct b43_wldev *dev)
543{
544 B43_WARN_ON(!dev->fw.opensource);
545 return b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_FWCAPA);
546}
547
541void b43_tsf_read(struct b43_wldev *dev, u64 *tsf) 548void b43_tsf_read(struct b43_wldev *dev, u64 *tsf)
542{ 549{
543 u32 low, high; 550 u32 low, high;
@@ -2307,12 +2314,34 @@ static int b43_upload_microcode(struct b43_wldev *dev)
2307 dev->fw.patch = fwpatch; 2314 dev->fw.patch = fwpatch;
2308 dev->fw.opensource = (fwdate == 0xFFFF); 2315 dev->fw.opensource = (fwdate == 0xFFFF);
2309 2316
2317 /* Default to use-all-queues. */
2318 dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
2319 dev->qos_enabled = !!modparam_qos;
2320 /* Default to firmware/hardware crypto acceleration. */
2321 dev->hwcrypto_enabled = 1;
2322
2310 if (dev->fw.opensource) { 2323 if (dev->fw.opensource) {
2324 u16 fwcapa;
2325
2311 /* Patchlevel info is encoded in the "time" field. */ 2326 /* Patchlevel info is encoded in the "time" field. */
2312 dev->fw.patch = fwtime; 2327 dev->fw.patch = fwtime;
2313 b43info(dev->wl, "Loading OpenSource firmware version %u.%u%s\n", 2328 b43info(dev->wl, "Loading OpenSource firmware version %u.%u\n",
2314 dev->fw.rev, dev->fw.patch, 2329 dev->fw.rev, dev->fw.patch);
2315 dev->fw.pcm_request_failed ? " (Hardware crypto not supported)" : ""); 2330
2331 fwcapa = b43_fwcapa_read(dev);
2332 if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) {
2333 b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n");
2334 /* Disable hardware crypto and fall back to software crypto. */
2335 dev->hwcrypto_enabled = 0;
2336 }
2337 if (!(fwcapa & B43_FWCAPA_QOS)) {
2338 b43info(dev->wl, "QoS not supported by firmware\n");
2339 /* Disable QoS. Tweak hw->queues to 1. It will be restored before
2340 * ieee80211_unregister to make sure the networking core can
2341 * properly free possible resources. */
2342 dev->wl->hw->queues = 1;
2343 dev->qos_enabled = 0;
2344 }
2316 } else { 2345 } else {
2317 b43info(dev->wl, "Loading firmware version %u.%u " 2346 b43info(dev->wl, "Loading firmware version %u.%u "
2318 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", 2347 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
@@ -3470,7 +3499,7 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3470 3499
3471 if (!!conf->radio_enabled != phy->radio_on) { 3500 if (!!conf->radio_enabled != phy->radio_on) {
3472 if (conf->radio_enabled) { 3501 if (conf->radio_enabled) {
3473 b43_software_rfkill(dev, RFKILL_STATE_UNBLOCKED); 3502 b43_software_rfkill(dev, false);
3474 b43info(dev->wl, "Radio turned on by software\n"); 3503 b43info(dev->wl, "Radio turned on by software\n");
3475 if (!dev->radio_hw_enable) { 3504 if (!dev->radio_hw_enable) {
3476 b43info(dev->wl, "The hardware RF-kill button " 3505 b43info(dev->wl, "The hardware RF-kill button "
@@ -3478,7 +3507,7 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3478 "Press the button to turn it on.\n"); 3507 "Press the button to turn it on.\n");
3479 } 3508 }
3480 } else { 3509 } else {
3481 b43_software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED); 3510 b43_software_rfkill(dev, true);
3482 b43info(dev->wl, "Radio turned off by software\n"); 3511 b43info(dev->wl, "Radio turned off by software\n");
3483 } 3512 }
3484 } 3513 }
@@ -3627,7 +3656,7 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3627 if (!dev || b43_status(dev) < B43_STAT_INITIALIZED) 3656 if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
3628 goto out_unlock; 3657 goto out_unlock;
3629 3658
3630 if (dev->fw.pcm_request_failed) { 3659 if (dev->fw.pcm_request_failed || !dev->hwcrypto_enabled) {
3631 /* We don't have firmware for the crypto engine. 3660 /* We don't have firmware for the crypto engine.
3632 * Must use software-crypto. */ 3661 * Must use software-crypto. */
3633 err = -EOPNOTSUPP; 3662 err = -EOPNOTSUPP;
@@ -4298,7 +4327,6 @@ static int b43_op_start(struct ieee80211_hw *hw)
4298 struct b43_wldev *dev = wl->current_dev; 4327 struct b43_wldev *dev = wl->current_dev;
4299 int did_init = 0; 4328 int did_init = 0;
4300 int err = 0; 4329 int err = 0;
4301 bool do_rfkill_exit = 0;
4302 4330
4303 /* Kill all old instance specific information to make sure 4331 /* Kill all old instance specific information to make sure
4304 * the card won't use it in the short timeframe between start 4332 * the card won't use it in the short timeframe between start
@@ -4312,18 +4340,12 @@ static int b43_op_start(struct ieee80211_hw *hw)
4312 wl->beacon1_uploaded = 0; 4340 wl->beacon1_uploaded = 0;
4313 wl->beacon_templates_virgin = 1; 4341 wl->beacon_templates_virgin = 1;
4314 4342
4315 /* First register RFkill.
4316 * LEDs that are registered later depend on it. */
4317 b43_rfkill_init(dev);
4318
4319 mutex_lock(&wl->mutex); 4343 mutex_lock(&wl->mutex);
4320 4344
4321 if (b43_status(dev) < B43_STAT_INITIALIZED) { 4345 if (b43_status(dev) < B43_STAT_INITIALIZED) {
4322 err = b43_wireless_core_init(dev); 4346 err = b43_wireless_core_init(dev);
4323 if (err) { 4347 if (err)
4324 do_rfkill_exit = 1;
4325 goto out_mutex_unlock; 4348 goto out_mutex_unlock;
4326 }
4327 did_init = 1; 4349 did_init = 1;
4328 } 4350 }
4329 4351
@@ -4332,17 +4354,16 @@ static int b43_op_start(struct ieee80211_hw *hw)
4332 if (err) { 4354 if (err) {
4333 if (did_init) 4355 if (did_init)
4334 b43_wireless_core_exit(dev); 4356 b43_wireless_core_exit(dev);
4335 do_rfkill_exit = 1;
4336 goto out_mutex_unlock; 4357 goto out_mutex_unlock;
4337 } 4358 }
4338 } 4359 }
4339 4360
4361 /* XXX: only do if device doesn't support rfkill irq */
4362 wiphy_rfkill_start_polling(hw->wiphy);
4363
4340 out_mutex_unlock: 4364 out_mutex_unlock:
4341 mutex_unlock(&wl->mutex); 4365 mutex_unlock(&wl->mutex);
4342 4366
4343 if (do_rfkill_exit)
4344 b43_rfkill_exit(dev);
4345
4346 return err; 4367 return err;
4347} 4368}
4348 4369
@@ -4351,7 +4372,6 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4351 struct b43_wl *wl = hw_to_b43_wl(hw); 4372 struct b43_wl *wl = hw_to_b43_wl(hw);
4352 struct b43_wldev *dev = wl->current_dev; 4373 struct b43_wldev *dev = wl->current_dev;
4353 4374
4354 b43_rfkill_exit(dev);
4355 cancel_work_sync(&(wl->beacon_update_trigger)); 4375 cancel_work_sync(&(wl->beacon_update_trigger));
4356 4376
4357 mutex_lock(&wl->mutex); 4377 mutex_lock(&wl->mutex);
@@ -4433,6 +4453,7 @@ static const struct ieee80211_ops b43_hw_ops = {
4433 .sta_notify = b43_op_sta_notify, 4453 .sta_notify = b43_op_sta_notify,
4434 .sw_scan_start = b43_op_sw_scan_start_notifier, 4454 .sw_scan_start = b43_op_sw_scan_start_notifier,
4435 .sw_scan_complete = b43_op_sw_scan_complete_notifier, 4455 .sw_scan_complete = b43_op_sw_scan_complete_notifier,
4456 .rfkill_poll = b43_rfkill_poll,
4436}; 4457};
4437 4458
4438/* Hard-reset the chip. Do not call this directly. 4459/* Hard-reset the chip. Do not call this directly.
@@ -4735,6 +4756,7 @@ static int b43_wireless_init(struct ssb_device *dev)
4735 b43err(NULL, "Could not allocate ieee80211 device\n"); 4756 b43err(NULL, "Could not allocate ieee80211 device\n");
4736 goto out; 4757 goto out;
4737 } 4758 }
4759 wl = hw_to_b43_wl(hw);
4738 4760
4739 /* fill hw info */ 4761 /* fill hw info */
4740 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 4762 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
@@ -4748,7 +4770,8 @@ static int b43_wireless_init(struct ssb_device *dev)
4748 BIT(NL80211_IFTYPE_WDS) | 4770 BIT(NL80211_IFTYPE_WDS) |
4749 BIT(NL80211_IFTYPE_ADHOC); 4771 BIT(NL80211_IFTYPE_ADHOC);
4750 4772
4751 hw->queues = b43_modparam_qos ? 4 : 1; 4773 hw->queues = modparam_qos ? 4 : 1;
4774 wl->mac80211_initially_registered_queues = hw->queues;
4752 hw->max_rates = 2; 4775 hw->max_rates = 2;
4753 SET_IEEE80211_DEV(hw, dev->dev); 4776 SET_IEEE80211_DEV(hw, dev->dev);
4754 if (is_valid_ether_addr(sprom->et1mac)) 4777 if (is_valid_ether_addr(sprom->et1mac))
@@ -4756,9 +4779,7 @@ static int b43_wireless_init(struct ssb_device *dev)
4756 else 4779 else
4757 SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac); 4780 SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac);
4758 4781
4759 /* Get and initialize struct b43_wl */ 4782 /* Initialize struct b43_wl */
4760 wl = hw_to_b43_wl(hw);
4761 memset(wl, 0, sizeof(*wl));
4762 wl->hw = hw; 4783 wl->hw = hw;
4763 spin_lock_init(&wl->irq_lock); 4784 spin_lock_init(&wl->irq_lock);
4764 rwlock_init(&wl->tx_lock); 4785 rwlock_init(&wl->tx_lock);
@@ -4824,8 +4845,13 @@ static void b43_remove(struct ssb_device *dev)
4824 cancel_work_sync(&wldev->restart_work); 4845 cancel_work_sync(&wldev->restart_work);
4825 4846
4826 B43_WARN_ON(!wl); 4847 B43_WARN_ON(!wl);
4827 if (wl->current_dev == wldev) 4848 if (wl->current_dev == wldev) {
4849 /* Restore the queues count before unregistering, because firmware detect
4850 * might have modified it. Restoring is important, so the networking
4851 * stack can properly free resources. */
4852 wl->hw->queues = wl->mac80211_initially_registered_queues;
4828 ieee80211_unregister_hw(wl->hw); 4853 ieee80211_unregister_hw(wl->hw);
4854 }
4829 4855
4830 b43_one_core_detach(dev); 4856 b43_one_core_detach(dev);
4831 4857
@@ -4920,7 +4946,7 @@ static struct ssb_driver b43_ssb_driver = {
4920static void b43_print_driverinfo(void) 4946static void b43_print_driverinfo(void)
4921{ 4947{
4922 const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "", 4948 const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "",
4923 *feat_leds = "", *feat_rfkill = ""; 4949 *feat_leds = "";
4924 4950
4925#ifdef CONFIG_B43_PCI_AUTOSELECT 4951#ifdef CONFIG_B43_PCI_AUTOSELECT
4926 feat_pci = "P"; 4952 feat_pci = "P";
@@ -4934,14 +4960,11 @@ static void b43_print_driverinfo(void)
4934#ifdef CONFIG_B43_LEDS 4960#ifdef CONFIG_B43_LEDS
4935 feat_leds = "L"; 4961 feat_leds = "L";
4936#endif 4962#endif
4937#ifdef CONFIG_B43_RFKILL
4938 feat_rfkill = "R";
4939#endif
4940 printk(KERN_INFO "Broadcom 43xx driver loaded " 4963 printk(KERN_INFO "Broadcom 43xx driver loaded "
4941 "[ Features: %s%s%s%s%s, Firmware-ID: " 4964 "[ Features: %s%s%s%s, Firmware-ID: "
4942 B43_SUPPORTED_FIRMWARE_ID " ]\n", 4965 B43_SUPPORTED_FIRMWARE_ID " ]\n",
4943 feat_pci, feat_pcmcia, feat_nphy, 4966 feat_pci, feat_pcmcia, feat_nphy,
4944 feat_leds, feat_rfkill); 4967 feat_leds);
4945} 4968}
4946 4969
4947static int __init b43_init(void) 4970static int __init b43_init(void)
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index 40abcf5d1b43..950fb1b0546d 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -39,7 +39,6 @@
39#define PAD_BYTES(nr_bytes) P4D_BYTES( __LINE__ , (nr_bytes)) 39#define PAD_BYTES(nr_bytes) P4D_BYTES( __LINE__ , (nr_bytes))
40 40
41 41
42extern int b43_modparam_qos;
43extern int b43_modparam_verbose; 42extern int b43_modparam_verbose;
44 43
45/* Logmessage verbosity levels. Update the b43_modparam_verbose helptext, if 44/* Logmessage verbosity levels. Update the b43_modparam_verbose helptext, if
diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c
index c836c077d51d..816e028a2620 100644
--- a/drivers/net/wireless/b43/phy_a.c
+++ b/drivers/net/wireless/b43/phy_a.c
@@ -480,11 +480,11 @@ static bool b43_aphy_op_supports_hwpctl(struct b43_wldev *dev)
480} 480}
481 481
482static void b43_aphy_op_software_rfkill(struct b43_wldev *dev, 482static void b43_aphy_op_software_rfkill(struct b43_wldev *dev,
483 enum rfkill_state state) 483 bool blocked)
484{ 484{
485 struct b43_phy *phy = &dev->phy; 485 struct b43_phy *phy = &dev->phy;
486 486
487 if (state == RFKILL_STATE_UNBLOCKED) { 487 if (!blocked) {
488 if (phy->radio_on) 488 if (phy->radio_on)
489 return; 489 return;
490 b43_radio_write16(dev, 0x0004, 0x00C0); 490 b43_radio_write16(dev, 0x0004, 0x00C0);
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index e176b6e0d9cf..6d241622210e 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -84,7 +84,7 @@ int b43_phy_init(struct b43_wldev *dev)
84 84
85 phy->channel = ops->get_default_chan(dev); 85 phy->channel = ops->get_default_chan(dev);
86 86
87 ops->software_rfkill(dev, RFKILL_STATE_UNBLOCKED); 87 ops->software_rfkill(dev, false);
88 err = ops->init(dev); 88 err = ops->init(dev);
89 if (err) { 89 if (err) {
90 b43err(dev->wl, "PHY init failed\n"); 90 b43err(dev->wl, "PHY init failed\n");
@@ -104,7 +104,7 @@ err_phy_exit:
104 if (ops->exit) 104 if (ops->exit)
105 ops->exit(dev); 105 ops->exit(dev);
106err_block_rf: 106err_block_rf:
107 ops->software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED); 107 ops->software_rfkill(dev, true);
108 108
109 return err; 109 return err;
110} 110}
@@ -113,7 +113,7 @@ void b43_phy_exit(struct b43_wldev *dev)
113{ 113{
114 const struct b43_phy_operations *ops = dev->phy.ops; 114 const struct b43_phy_operations *ops = dev->phy.ops;
115 115
116 ops->software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED); 116 ops->software_rfkill(dev, true);
117 if (ops->exit) 117 if (ops->exit)
118 ops->exit(dev); 118 ops->exit(dev);
119} 119}
@@ -295,18 +295,13 @@ err_restore_cookie:
295 return err; 295 return err;
296} 296}
297 297
298void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state) 298void b43_software_rfkill(struct b43_wldev *dev, bool blocked)
299{ 299{
300 struct b43_phy *phy = &dev->phy; 300 struct b43_phy *phy = &dev->phy;
301 301
302 if (state == RFKILL_STATE_HARD_BLOCKED) {
303 /* We cannot hardware-block the device */
304 state = RFKILL_STATE_SOFT_BLOCKED;
305 }
306
307 b43_mac_suspend(dev); 302 b43_mac_suspend(dev);
308 phy->ops->software_rfkill(dev, state); 303 phy->ops->software_rfkill(dev, blocked);
309 phy->radio_on = (state == RFKILL_STATE_UNBLOCKED); 304 phy->radio_on = !blocked;
310 b43_mac_enable(dev); 305 b43_mac_enable(dev);
311} 306}
312 307
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index b2d99101947b..44cc918e4fc6 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -1,7 +1,7 @@
1#ifndef LINUX_B43_PHY_COMMON_H_ 1#ifndef LINUX_B43_PHY_COMMON_H_
2#define LINUX_B43_PHY_COMMON_H_ 2#define LINUX_B43_PHY_COMMON_H_
3 3
4#include <linux/rfkill.h> 4#include <linux/types.h>
5 5
6struct b43_wldev; 6struct b43_wldev;
7 7
@@ -159,7 +159,7 @@ struct b43_phy_operations {
159 159
160 /* Radio */ 160 /* Radio */
161 bool (*supports_hwpctl)(struct b43_wldev *dev); 161 bool (*supports_hwpctl)(struct b43_wldev *dev);
162 void (*software_rfkill)(struct b43_wldev *dev, enum rfkill_state state); 162 void (*software_rfkill)(struct b43_wldev *dev, bool blocked);
163 void (*switch_analog)(struct b43_wldev *dev, bool on); 163 void (*switch_analog)(struct b43_wldev *dev, bool on);
164 int (*switch_channel)(struct b43_wldev *dev, unsigned int new_channel); 164 int (*switch_channel)(struct b43_wldev *dev, unsigned int new_channel);
165 unsigned int (*get_default_chan)(struct b43_wldev *dev); 165 unsigned int (*get_default_chan)(struct b43_wldev *dev);
@@ -364,7 +364,7 @@ int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel);
364/** 364/**
365 * b43_software_rfkill - Turn the radio ON or OFF in software. 365 * b43_software_rfkill - Turn the radio ON or OFF in software.
366 */ 366 */
367void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state); 367void b43_software_rfkill(struct b43_wldev *dev, bool blocked);
368 368
369/** 369/**
370 * b43_phy_txpower_check - Check TX power output. 370 * b43_phy_txpower_check - Check TX power output.
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index e7b98f013b0f..5300232449f6 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -2592,7 +2592,7 @@ static bool b43_gphy_op_supports_hwpctl(struct b43_wldev *dev)
2592} 2592}
2593 2593
2594static void b43_gphy_op_software_rfkill(struct b43_wldev *dev, 2594static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
2595 enum rfkill_state state) 2595 bool blocked)
2596{ 2596{
2597 struct b43_phy *phy = &dev->phy; 2597 struct b43_phy *phy = &dev->phy;
2598 struct b43_phy_g *gphy = phy->g; 2598 struct b43_phy_g *gphy = phy->g;
@@ -2600,7 +2600,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
2600 2600
2601 might_sleep(); 2601 might_sleep();
2602 2602
2603 if (state == RFKILL_STATE_UNBLOCKED) { 2603 if (!blocked) {
2604 /* Turn radio ON */ 2604 /* Turn radio ON */
2605 if (phy->radio_on) 2605 if (phy->radio_on)
2606 return; 2606 return;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 58e319d6b1ed..ea0d3a3a6a64 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -488,7 +488,7 @@ static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
488} 488}
489 489
490static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev, 490static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
491 enum rfkill_state state) 491 bool blocked)
492{ 492{
493 //TODO 493 //TODO
494} 494}
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 8bcfda5f3f07..be7b5604947b 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -579,7 +579,7 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
579} 579}
580 580
581static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, 581static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
582 enum rfkill_state state) 582 bool blocked)
583{//TODO 583{//TODO
584} 584}
585 585
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 8cd9776752e6..69138e8c1db6 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -313,7 +313,7 @@ static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
313{ 313{
314 struct b43_pio_txqueue *q; 314 struct b43_pio_txqueue *q;
315 315
316 if (b43_modparam_qos) { 316 if (dev->qos_enabled) {
317 /* 0 = highest priority */ 317 /* 0 = highest priority */
318 switch (queue_prio) { 318 switch (queue_prio) {
319 default: 319 default:
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 9e1d00bc24d3..31e55999893f 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -22,15 +22,11 @@
22 22
23*/ 23*/
24 24
25#include "rfkill.h"
26#include "b43.h" 25#include "b43.h"
27#include "phy_common.h"
28
29#include <linux/kmod.h>
30 26
31 27
32/* Returns TRUE, if the radio is enabled in hardware. */ 28/* Returns TRUE, if the radio is enabled in hardware. */
33static bool b43_is_hw_radio_enabled(struct b43_wldev *dev) 29bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
34{ 30{
35 if (dev->phy.rev >= 3) { 31 if (dev->phy.rev >= 3) {
36 if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI) 32 if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI)
@@ -45,165 +41,39 @@ static bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
45} 41}
46 42
47/* The poll callback for the hardware button. */ 43/* The poll callback for the hardware button. */
48static void b43_rfkill_poll(struct input_polled_dev *poll_dev) 44void b43_rfkill_poll(struct ieee80211_hw *hw)
49{ 45{
50 struct b43_wldev *dev = poll_dev->private; 46 struct b43_wl *wl = hw_to_b43_wl(hw);
51 struct b43_wl *wl = dev->wl; 47 struct b43_wldev *dev = wl->current_dev;
48 struct ssb_bus *bus = dev->dev->bus;
52 bool enabled; 49 bool enabled;
53 bool report_change = 0; 50 bool brought_up = false;
54 51
55 mutex_lock(&wl->mutex); 52 mutex_lock(&wl->mutex);
56 if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED)) { 53 if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED)) {
57 mutex_unlock(&wl->mutex); 54 if (ssb_bus_powerup(bus, 0)) {
58 return; 55 mutex_unlock(&wl->mutex);
56 return;
57 }
58 ssb_device_enable(dev->dev, 0);
59 brought_up = true;
59 } 60 }
61
60 enabled = b43_is_hw_radio_enabled(dev); 62 enabled = b43_is_hw_radio_enabled(dev);
63
61 if (unlikely(enabled != dev->radio_hw_enable)) { 64 if (unlikely(enabled != dev->radio_hw_enable)) {
62 dev->radio_hw_enable = enabled; 65 dev->radio_hw_enable = enabled;
63 report_change = 1;
64 b43info(wl, "Radio hardware status changed to %s\n", 66 b43info(wl, "Radio hardware status changed to %s\n",
65 enabled ? "ENABLED" : "DISABLED"); 67 enabled ? "ENABLED" : "DISABLED");
68 wiphy_rfkill_set_hw_state(hw->wiphy, !enabled);
69 if (enabled != dev->phy.radio_on)
70 b43_software_rfkill(dev, !enabled);
66 } 71 }
67 mutex_unlock(&wl->mutex);
68 72
69 /* send the radio switch event to the system - note both a key press 73 if (brought_up) {
70 * and a release are required */ 74 ssb_device_disable(dev->dev, 0);
71 if (unlikely(report_change)) { 75 ssb_bus_may_powerdown(bus);
72 input_report_key(poll_dev->input, KEY_WLAN, 1);
73 input_report_key(poll_dev->input, KEY_WLAN, 0);
74 } 76 }
75}
76
77/* Called when the RFKILL toggled in software. */
78static int b43_rfkill_soft_toggle(void *data, enum rfkill_state state)
79{
80 struct b43_wldev *dev = data;
81 struct b43_wl *wl = dev->wl;
82 int err = -EBUSY;
83 77
84 if (!wl->rfkill.registered)
85 return 0;
86
87 mutex_lock(&wl->mutex);
88 if (b43_status(dev) < B43_STAT_INITIALIZED)
89 goto out_unlock;
90 err = 0;
91 switch (state) {
92 case RFKILL_STATE_UNBLOCKED:
93 if (!dev->radio_hw_enable) {
94 /* No luck. We can't toggle the hardware RF-kill
95 * button from software. */
96 err = -EBUSY;
97 goto out_unlock;
98 }
99 if (!dev->phy.radio_on)
100 b43_software_rfkill(dev, state);
101 break;
102 case RFKILL_STATE_SOFT_BLOCKED:
103 if (dev->phy.radio_on)
104 b43_software_rfkill(dev, state);
105 break;
106 default:
107 b43warn(wl, "Received unexpected rfkill state %d.\n", state);
108 break;
109 }
110out_unlock:
111 mutex_unlock(&wl->mutex); 78 mutex_unlock(&wl->mutex);
112
113 return err;
114}
115
116char *b43_rfkill_led_name(struct b43_wldev *dev)
117{
118 struct b43_rfkill *rfk = &(dev->wl->rfkill);
119
120 if (!rfk->registered)
121 return NULL;
122 return rfkill_get_led_name(rfk->rfkill);
123}
124
125void b43_rfkill_init(struct b43_wldev *dev)
126{
127 struct b43_wl *wl = dev->wl;
128 struct b43_rfkill *rfk = &(wl->rfkill);
129 int err;
130
131 rfk->registered = 0;
132
133 rfk->rfkill = rfkill_allocate(dev->dev->dev, RFKILL_TYPE_WLAN);
134 if (!rfk->rfkill)
135 goto out_error;
136 snprintf(rfk->name, sizeof(rfk->name),
137 "b43-%s", wiphy_name(wl->hw->wiphy));
138 rfk->rfkill->name = rfk->name;
139 rfk->rfkill->state = RFKILL_STATE_UNBLOCKED;
140 rfk->rfkill->data = dev;
141 rfk->rfkill->toggle_radio = b43_rfkill_soft_toggle;
142
143 rfk->poll_dev = input_allocate_polled_device();
144 if (!rfk->poll_dev) {
145 rfkill_free(rfk->rfkill);
146 goto err_freed_rfk;
147 }
148
149 rfk->poll_dev->private = dev;
150 rfk->poll_dev->poll = b43_rfkill_poll;
151 rfk->poll_dev->poll_interval = 1000; /* msecs */
152
153 rfk->poll_dev->input->name = rfk->name;
154 rfk->poll_dev->input->id.bustype = BUS_HOST;
155 rfk->poll_dev->input->id.vendor = dev->dev->bus->boardinfo.vendor;
156 rfk->poll_dev->input->evbit[0] = BIT(EV_KEY);
157 set_bit(KEY_WLAN, rfk->poll_dev->input->keybit);
158
159 err = rfkill_register(rfk->rfkill);
160 if (err)
161 goto err_free_polldev;
162
163#ifdef CONFIG_RFKILL_INPUT_MODULE
164 /* B43 RF-kill isn't useful without the rfkill-input subsystem.
165 * Try to load the module. */
166 err = request_module("rfkill-input");
167 if (err)
168 b43warn(wl, "Failed to load the rfkill-input module. "
169 "The built-in radio LED will not work.\n");
170#endif /* CONFIG_RFKILL_INPUT */
171
172#if !defined(CONFIG_RFKILL_INPUT) && !defined(CONFIG_RFKILL_INPUT_MODULE)
173 b43warn(wl, "The rfkill-input subsystem is not available. "
174 "The built-in radio LED will not work.\n");
175#endif
176
177 err = input_register_polled_device(rfk->poll_dev);
178 if (err)
179 goto err_unreg_rfk;
180
181 rfk->registered = 1;
182
183 return;
184err_unreg_rfk:
185 rfkill_unregister(rfk->rfkill);
186err_free_polldev:
187 input_free_polled_device(rfk->poll_dev);
188 rfk->poll_dev = NULL;
189err_freed_rfk:
190 rfk->rfkill = NULL;
191out_error:
192 rfk->registered = 0;
193 b43warn(wl, "RF-kill button init failed\n");
194}
195
196void b43_rfkill_exit(struct b43_wldev *dev)
197{
198 struct b43_rfkill *rfk = &(dev->wl->rfkill);
199
200 if (!rfk->registered)
201 return;
202 rfk->registered = 0;
203
204 input_unregister_polled_device(rfk->poll_dev);
205 rfkill_unregister(rfk->rfkill);
206 input_free_polled_device(rfk->poll_dev);
207 rfk->poll_dev = NULL;
208 rfk->rfkill = NULL;
209} 79}
diff --git a/drivers/net/wireless/b43/rfkill.h b/drivers/net/wireless/b43/rfkill.h
index adacf936d815..f046c3ca0519 100644
--- a/drivers/net/wireless/b43/rfkill.h
+++ b/drivers/net/wireless/b43/rfkill.h
@@ -1,52 +1,11 @@
1#ifndef B43_RFKILL_H_ 1#ifndef B43_RFKILL_H_
2#define B43_RFKILL_H_ 2#define B43_RFKILL_H_
3 3
4struct ieee80211_hw;
4struct b43_wldev; 5struct b43_wldev;
5 6
7void b43_rfkill_poll(struct ieee80211_hw *hw);
6 8
7#ifdef CONFIG_B43_RFKILL 9bool b43_is_hw_radio_enabled(struct b43_wldev *dev);
8
9#include <linux/rfkill.h>
10#include <linux/input-polldev.h>
11
12
13struct b43_rfkill {
14 /* The RFKILL subsystem data structure */
15 struct rfkill *rfkill;
16 /* The poll device for the RFKILL input button */
17 struct input_polled_dev *poll_dev;
18 /* Did initialization succeed? Used for freeing. */
19 bool registered;
20 /* The unique name of this rfkill switch */
21 char name[sizeof("b43-phy4294967295")];
22};
23
24/* The init function returns void, because we are not interested
25 * in failing the b43 init process when rfkill init failed. */
26void b43_rfkill_init(struct b43_wldev *dev);
27void b43_rfkill_exit(struct b43_wldev *dev);
28
29char * b43_rfkill_led_name(struct b43_wldev *dev);
30
31
32#else /* CONFIG_B43_RFKILL */
33/* No RFKILL support. */
34
35struct b43_rfkill {
36 /* empty */
37};
38
39static inline void b43_rfkill_init(struct b43_wldev *dev)
40{
41}
42static inline void b43_rfkill_exit(struct b43_wldev *dev)
43{
44}
45static inline char * b43_rfkill_led_name(struct b43_wldev *dev)
46{
47 return NULL;
48}
49
50#endif /* CONFIG_B43_RFKILL */
51 10
52#endif /* B43_RFKILL_H_ */ 11#endif /* B43_RFKILL_H_ */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index a63d88841df8..55f36a7254d9 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -118,7 +118,6 @@ u8 b43_plcp_get_ratecode_ofdm(const u8 bitrate)
118void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp, 118void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
119 const u16 octets, const u8 bitrate) 119 const u16 octets, const u8 bitrate)
120{ 120{
121 __le32 *data = &(plcp->data);
122 __u8 *raw = plcp->raw; 121 __u8 *raw = plcp->raw;
123 122
124 if (b43_is_ofdm_rate(bitrate)) { 123 if (b43_is_ofdm_rate(bitrate)) {
@@ -127,7 +126,7 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
127 d = b43_plcp_get_ratecode_ofdm(bitrate); 126 d = b43_plcp_get_ratecode_ofdm(bitrate);
128 B43_WARN_ON(octets & 0xF000); 127 B43_WARN_ON(octets & 0xF000);
129 d |= (octets << 5); 128 d |= (octets << 5);
130 *data = cpu_to_le32(d); 129 plcp->data = cpu_to_le32(d);
131 } else { 130 } else {
132 u32 plen; 131 u32 plen;
133 132
@@ -141,7 +140,7 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
141 raw[1] = 0x04; 140 raw[1] = 0x04;
142 } else 141 } else
143 raw[1] = 0x04; 142 raw[1] = 0x04;
144 *data |= cpu_to_le32(plen << 16); 143 plcp->data |= cpu_to_le32(plen << 16);
145 raw[0] = b43_plcp_get_ratecode_cck(bitrate); 144 raw[0] = b43_plcp_get_ratecode_cck(bitrate);
146 } 145 }
147} 146}
diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig
index d4f628a74bbd..94a463478053 100644
--- a/drivers/net/wireless/b43legacy/Kconfig
+++ b/drivers/net/wireless/b43legacy/Kconfig
@@ -42,14 +42,6 @@ config B43LEGACY_LEDS
42 depends on B43LEGACY && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = B43LEGACY) 42 depends on B43LEGACY && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = B43LEGACY)
43 default y 43 default y
44 44
45# RFKILL support
46# This config option automatically enables b43legacy RFKILL support,
47# if it's possible.
48config B43LEGACY_RFKILL
49 bool
50 depends on B43LEGACY && (RFKILL = y || RFKILL = B43LEGACY) && RFKILL_INPUT && (INPUT_POLLDEV = y || INPUT_POLLDEV = B43LEGACY)
51 default y
52
53# This config option automatically enables b43 HW-RNG support, 45# This config option automatically enables b43 HW-RNG support,
54# if the HW-RNG core is enabled. 46# if the HW-RNG core is enabled.
55config B43LEGACY_HWRNG 47config B43LEGACY_HWRNG
diff --git a/drivers/net/wireless/b43legacy/Makefile b/drivers/net/wireless/b43legacy/Makefile
index 80cdb73bd140..227a77e84362 100644
--- a/drivers/net/wireless/b43legacy/Makefile
+++ b/drivers/net/wireless/b43legacy/Makefile
@@ -6,7 +6,7 @@ b43legacy-y += radio.o
6b43legacy-y += sysfs.o 6b43legacy-y += sysfs.o
7b43legacy-y += xmit.o 7b43legacy-y += xmit.o
8# b43 RFKILL button support 8# b43 RFKILL button support
9b43legacy-$(CONFIG_B43LEGACY_RFKILL) += rfkill.o 9b43legacy-y += rfkill.o
10# b43legacy LED support 10# b43legacy LED support
11b43legacy-$(CONFIG_B43LEGACY_LEDS) += leds.o 11b43legacy-$(CONFIG_B43LEGACY_LEDS) += leds.o
12# b43legacy debugging 12# b43legacy debugging
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 19a4b0bc0d87..77fda148ac46 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -602,9 +602,6 @@ struct b43legacy_wl {
602 char rng_name[30 + 1]; 602 char rng_name[30 + 1];
603#endif 603#endif
604 604
605 /* The RF-kill button */
606 struct b43legacy_rfkill rfkill;
607
608 /* List of all wireless devices on this chip */ 605 /* List of all wireless devices on this chip */
609 struct list_head devlist; 606 struct list_head devlist;
610 u8 nr_devs; 607 u8 nr_devs;
diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c
index 3ea55b18c700..37e9be893560 100644
--- a/drivers/net/wireless/b43legacy/leds.c
+++ b/drivers/net/wireless/b43legacy/leds.c
@@ -28,6 +28,7 @@
28 28
29#include "b43legacy.h" 29#include "b43legacy.h"
30#include "leds.h" 30#include "leds.h"
31#include "rfkill.h"
31 32
32 33
33static void b43legacy_led_turn_on(struct b43legacy_wldev *dev, u8 led_index, 34static void b43legacy_led_turn_on(struct b43legacy_wldev *dev, u8 led_index,
@@ -86,7 +87,8 @@ static void b43legacy_led_brightness_set(struct led_classdev *led_dev,
86 87
87static int b43legacy_register_led(struct b43legacy_wldev *dev, 88static int b43legacy_register_led(struct b43legacy_wldev *dev,
88 struct b43legacy_led *led, 89 struct b43legacy_led *led,
89 const char *name, char *default_trigger, 90 const char *name,
91 const char *default_trigger,
90 u8 led_index, bool activelow) 92 u8 led_index, bool activelow)
91{ 93{
92 int err; 94 int err;
@@ -163,10 +165,10 @@ static void b43legacy_map_led(struct b43legacy_wldev *dev,
163 snprintf(name, sizeof(name), 165 snprintf(name, sizeof(name),
164 "b43legacy-%s::radio", wiphy_name(hw->wiphy)); 166 "b43legacy-%s::radio", wiphy_name(hw->wiphy));
165 b43legacy_register_led(dev, &dev->led_radio, name, 167 b43legacy_register_led(dev, &dev->led_radio, name,
166 b43legacy_rfkill_led_name(dev), 168 ieee80211_get_radio_led_name(hw),
167 led_index, activelow); 169 led_index, activelow);
168 /* Sync the RF-kill LED state with the switch state. */ 170 /* Sync the RF-kill LED state with radio and switch states. */
169 if (dev->radio_hw_enable) 171 if (dev->phy.radio_on && b43legacy_is_hw_radio_enabled(dev))
170 b43legacy_led_turn_on(dev, led_index, activelow); 172 b43legacy_led_turn_on(dev, led_index, activelow);
171 break; 173 break;
172 case B43legacy_LED_WEIRD: 174 case B43legacy_LED_WEIRD:
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index f6f3fbf0a2f4..e5136fb65ddd 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3431,11 +3431,6 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
3431 struct b43legacy_wldev *dev = wl->current_dev; 3431 struct b43legacy_wldev *dev = wl->current_dev;
3432 int did_init = 0; 3432 int did_init = 0;
3433 int err = 0; 3433 int err = 0;
3434 bool do_rfkill_exit = 0;
3435
3436 /* First register RFkill.
3437 * LEDs that are registered later depend on it. */
3438 b43legacy_rfkill_init(dev);
3439 3434
3440 /* Kill all old instance specific information to make sure 3435 /* Kill all old instance specific information to make sure
3441 * the card won't use it in the short timeframe between start 3436 * the card won't use it in the short timeframe between start
@@ -3451,10 +3446,8 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
3451 3446
3452 if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { 3447 if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) {
3453 err = b43legacy_wireless_core_init(dev); 3448 err = b43legacy_wireless_core_init(dev);
3454 if (err) { 3449 if (err)
3455 do_rfkill_exit = 1;
3456 goto out_mutex_unlock; 3450 goto out_mutex_unlock;
3457 }
3458 did_init = 1; 3451 did_init = 1;
3459 } 3452 }
3460 3453
@@ -3463,17 +3456,15 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
3463 if (err) { 3456 if (err) {
3464 if (did_init) 3457 if (did_init)
3465 b43legacy_wireless_core_exit(dev); 3458 b43legacy_wireless_core_exit(dev);
3466 do_rfkill_exit = 1;
3467 goto out_mutex_unlock; 3459 goto out_mutex_unlock;
3468 } 3460 }
3469 } 3461 }
3470 3462
3463 wiphy_rfkill_start_polling(hw->wiphy);
3464
3471out_mutex_unlock: 3465out_mutex_unlock:
3472 mutex_unlock(&wl->mutex); 3466 mutex_unlock(&wl->mutex);
3473 3467
3474 if (do_rfkill_exit)
3475 b43legacy_rfkill_exit(dev);
3476
3477 return err; 3468 return err;
3478} 3469}
3479 3470
@@ -3482,7 +3473,6 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
3482 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3473 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3483 struct b43legacy_wldev *dev = wl->current_dev; 3474 struct b43legacy_wldev *dev = wl->current_dev;
3484 3475
3485 b43legacy_rfkill_exit(dev);
3486 cancel_work_sync(&(wl->beacon_update_trigger)); 3476 cancel_work_sync(&(wl->beacon_update_trigger));
3487 3477
3488 mutex_lock(&wl->mutex); 3478 mutex_lock(&wl->mutex);
@@ -3518,6 +3508,7 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
3518 .start = b43legacy_op_start, 3508 .start = b43legacy_op_start,
3519 .stop = b43legacy_op_stop, 3509 .stop = b43legacy_op_stop,
3520 .set_tim = b43legacy_op_beacon_set_tim, 3510 .set_tim = b43legacy_op_beacon_set_tim,
3511 .rfkill_poll = b43legacy_rfkill_poll,
3521}; 3512};
3522 3513
3523/* Hard-reset the chip. Do not call this directly. 3514/* Hard-reset the chip. Do not call this directly.
diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c
index 4b0c7d27a51f..8783022db11e 100644
--- a/drivers/net/wireless/b43legacy/rfkill.c
+++ b/drivers/net/wireless/b43legacy/rfkill.c
@@ -22,15 +22,12 @@
22 22
23*/ 23*/
24 24
25#include "rfkill.h"
26#include "radio.h" 25#include "radio.h"
27#include "b43legacy.h" 26#include "b43legacy.h"
28 27
29#include <linux/kmod.h>
30
31 28
32/* Returns TRUE, if the radio is enabled in hardware. */ 29/* Returns TRUE, if the radio is enabled in hardware. */
33static bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev) 30bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
34{ 31{
35 if (dev->phy.rev >= 3) { 32 if (dev->phy.rev >= 3) {
36 if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI) 33 if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI)
@@ -45,164 +42,43 @@ static bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
45} 42}
46 43
47/* The poll callback for the hardware button. */ 44/* The poll callback for the hardware button. */
48static void b43legacy_rfkill_poll(struct input_polled_dev *poll_dev) 45void b43legacy_rfkill_poll(struct ieee80211_hw *hw)
49{ 46{
50 struct b43legacy_wldev *dev = poll_dev->private; 47 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
51 struct b43legacy_wl *wl = dev->wl; 48 struct b43legacy_wldev *dev = wl->current_dev;
49 struct ssb_bus *bus = dev->dev->bus;
52 bool enabled; 50 bool enabled;
53 bool report_change = 0; 51 bool brought_up = false;
54 52
55 mutex_lock(&wl->mutex); 53 mutex_lock(&wl->mutex);
56 if (unlikely(b43legacy_status(dev) < B43legacy_STAT_INITIALIZED)) { 54 if (unlikely(b43legacy_status(dev) < B43legacy_STAT_INITIALIZED)) {
57 mutex_unlock(&wl->mutex); 55 if (ssb_bus_powerup(bus, 0)) {
58 return; 56 mutex_unlock(&wl->mutex);
57 return;
58 }
59 ssb_device_enable(dev->dev, 0);
60 brought_up = true;
59 } 61 }
62
60 enabled = b43legacy_is_hw_radio_enabled(dev); 63 enabled = b43legacy_is_hw_radio_enabled(dev);
64
61 if (unlikely(enabled != dev->radio_hw_enable)) { 65 if (unlikely(enabled != dev->radio_hw_enable)) {
62 dev->radio_hw_enable = enabled; 66 dev->radio_hw_enable = enabled;
63 report_change = 1;
64 b43legacyinfo(wl, "Radio hardware status changed to %s\n", 67 b43legacyinfo(wl, "Radio hardware status changed to %s\n",
65 enabled ? "ENABLED" : "DISABLED"); 68 enabled ? "ENABLED" : "DISABLED");
66 } 69 wiphy_rfkill_set_hw_state(hw->wiphy, !enabled);
67 mutex_unlock(&wl->mutex); 70 if (enabled != dev->phy.radio_on) {
68 71 if (enabled)
69 /* send the radio switch event to the system - note both a key press 72 b43legacy_radio_turn_on(dev);
70 * and a release are required */ 73 else
71 if (unlikely(report_change)) { 74 b43legacy_radio_turn_off(dev, 0);
72 input_report_key(poll_dev->input, KEY_WLAN, 1);
73 input_report_key(poll_dev->input, KEY_WLAN, 0);
74 }
75}
76
77/* Called when the RFKILL toggled in software.
78 * This is called without locking. */
79static int b43legacy_rfkill_soft_toggle(void *data, enum rfkill_state state)
80{
81 struct b43legacy_wldev *dev = data;
82 struct b43legacy_wl *wl = dev->wl;
83 int err = -EBUSY;
84
85 if (!wl->rfkill.registered)
86 return 0;
87
88 mutex_lock(&wl->mutex);
89 if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED)
90 goto out_unlock;
91 err = 0;
92 switch (state) {
93 case RFKILL_STATE_UNBLOCKED:
94 if (!dev->radio_hw_enable) {
95 /* No luck. We can't toggle the hardware RF-kill
96 * button from software. */
97 err = -EBUSY;
98 goto out_unlock;
99 } 75 }
100 if (!dev->phy.radio_on)
101 b43legacy_radio_turn_on(dev);
102 break;
103 case RFKILL_STATE_SOFT_BLOCKED:
104 if (dev->phy.radio_on)
105 b43legacy_radio_turn_off(dev, 0);
106 break;
107 default:
108 b43legacywarn(wl, "Received unexpected rfkill state %d.\n",
109 state);
110 break;
111 } 76 }
112 77
113out_unlock: 78 if (brought_up) {
114 mutex_unlock(&wl->mutex); 79 ssb_device_disable(dev->dev, 0);
115 80 ssb_bus_may_powerdown(bus);
116 return err;
117}
118
119char *b43legacy_rfkill_led_name(struct b43legacy_wldev *dev)
120{
121 struct b43legacy_rfkill *rfk = &(dev->wl->rfkill);
122
123 if (!rfk->registered)
124 return NULL;
125 return rfkill_get_led_name(rfk->rfkill);
126}
127
128void b43legacy_rfkill_init(struct b43legacy_wldev *dev)
129{
130 struct b43legacy_wl *wl = dev->wl;
131 struct b43legacy_rfkill *rfk = &(wl->rfkill);
132 int err;
133
134 rfk->registered = 0;
135
136 rfk->rfkill = rfkill_allocate(dev->dev->dev, RFKILL_TYPE_WLAN);
137 if (!rfk->rfkill)
138 goto out_error;
139 snprintf(rfk->name, sizeof(rfk->name),
140 "b43legacy-%s", wiphy_name(wl->hw->wiphy));
141 rfk->rfkill->name = rfk->name;
142 rfk->rfkill->state = RFKILL_STATE_UNBLOCKED;
143 rfk->rfkill->data = dev;
144 rfk->rfkill->toggle_radio = b43legacy_rfkill_soft_toggle;
145
146 rfk->poll_dev = input_allocate_polled_device();
147 if (!rfk->poll_dev) {
148 rfkill_free(rfk->rfkill);
149 goto err_freed_rfk;
150 } 81 }
151 82
152 rfk->poll_dev->private = dev; 83 mutex_unlock(&wl->mutex);
153 rfk->poll_dev->poll = b43legacy_rfkill_poll;
154 rfk->poll_dev->poll_interval = 1000; /* msecs */
155
156 rfk->poll_dev->input->name = rfk->name;
157 rfk->poll_dev->input->id.bustype = BUS_HOST;
158 rfk->poll_dev->input->id.vendor = dev->dev->bus->boardinfo.vendor;
159 rfk->poll_dev->input->evbit[0] = BIT(EV_KEY);
160 set_bit(KEY_WLAN, rfk->poll_dev->input->keybit);
161
162 err = rfkill_register(rfk->rfkill);
163 if (err)
164 goto err_free_polldev;
165
166#ifdef CONFIG_RFKILL_INPUT_MODULE
167 /* B43legacy RF-kill isn't useful without the rfkill-input subsystem.
168 * Try to load the module. */
169 err = request_module("rfkill-input");
170 if (err)
171 b43legacywarn(wl, "Failed to load the rfkill-input module."
172 "The built-in radio LED will not work.\n");
173#endif /* CONFIG_RFKILL_INPUT */
174
175 err = input_register_polled_device(rfk->poll_dev);
176 if (err)
177 goto err_unreg_rfk;
178
179 rfk->registered = 1;
180
181 return;
182err_unreg_rfk:
183 rfkill_unregister(rfk->rfkill);
184err_free_polldev:
185 input_free_polled_device(rfk->poll_dev);
186 rfk->poll_dev = NULL;
187err_freed_rfk:
188 rfk->rfkill = NULL;
189out_error:
190 rfk->registered = 0;
191 b43legacywarn(wl, "RF-kill button init failed\n");
192}
193
194void b43legacy_rfkill_exit(struct b43legacy_wldev *dev)
195{
196 struct b43legacy_rfkill *rfk = &(dev->wl->rfkill);
197
198 if (!rfk->registered)
199 return;
200 rfk->registered = 0;
201
202 input_unregister_polled_device(rfk->poll_dev);
203 rfkill_unregister(rfk->rfkill);
204 input_free_polled_device(rfk->poll_dev);
205 rfk->poll_dev = NULL;
206 rfk->rfkill = NULL;
207} 84}
208
diff --git a/drivers/net/wireless/b43legacy/rfkill.h b/drivers/net/wireless/b43legacy/rfkill.h
index 11150a8032f0..75585571c544 100644
--- a/drivers/net/wireless/b43legacy/rfkill.h
+++ b/drivers/net/wireless/b43legacy/rfkill.h
@@ -1,59 +1,11 @@
1#ifndef B43legacy_RFKILL_H_ 1#ifndef B43legacy_RFKILL_H_
2#define B43legacy_RFKILL_H_ 2#define B43legacy_RFKILL_H_
3 3
4struct ieee80211_hw;
4struct b43legacy_wldev; 5struct b43legacy_wldev;
5 6
6#ifdef CONFIG_B43LEGACY_RFKILL 7void b43legacy_rfkill_poll(struct ieee80211_hw *hw);
7 8
8#include <linux/rfkill.h> 9bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev);
9#include <linux/workqueue.h>
10#include <linux/input-polldev.h>
11
12
13
14struct b43legacy_rfkill {
15 /* The RFKILL subsystem data structure */
16 struct rfkill *rfkill;
17 /* The poll device for the RFKILL input button */
18 struct input_polled_dev *poll_dev;
19 /* Did initialization succeed? Used for freeing. */
20 bool registered;
21 /* The unique name of this rfkill switch */
22 char name[sizeof("b43legacy-phy4294967295")];
23};
24
25/* The init function returns void, because we are not interested
26 * in failing the b43 init process when rfkill init failed. */
27void b43legacy_rfkill_init(struct b43legacy_wldev *dev);
28void b43legacy_rfkill_exit(struct b43legacy_wldev *dev);
29
30char *b43legacy_rfkill_led_name(struct b43legacy_wldev *dev);
31
32
33#else /* CONFIG_B43LEGACY_RFKILL */
34/* No RFKILL support. */
35
36struct b43legacy_rfkill {
37 /* empty */
38};
39
40static inline void b43legacy_rfkill_alloc(struct b43legacy_wldev *dev)
41{
42}
43static inline void b43legacy_rfkill_free(struct b43legacy_wldev *dev)
44{
45}
46static inline void b43legacy_rfkill_init(struct b43legacy_wldev *dev)
47{
48}
49static inline void b43legacy_rfkill_exit(struct b43legacy_wldev *dev)
50{
51}
52static inline char *b43legacy_rfkill_led_name(struct b43legacy_wldev *dev)
53{
54 return NULL;
55}
56
57#endif /* CONFIG_B43LEGACY_RFKILL */
58 10
59#endif /* B43legacy_RFKILL_H_ */ 11#endif /* B43legacy_RFKILL_H_ */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 8304f6406a17..029ccb6bdbaa 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -5,16 +5,11 @@ config IWLWIFI
5 select FW_LOADER 5 select FW_LOADER
6 select MAC80211_LEDS if IWLWIFI_LEDS 6 select MAC80211_LEDS if IWLWIFI_LEDS
7 select LEDS_CLASS if IWLWIFI_LEDS 7 select LEDS_CLASS if IWLWIFI_LEDS
8 select RFKILL if IWLWIFI_RFKILL
9 8
10config IWLWIFI_LEDS 9config IWLWIFI_LEDS
11 bool "Enable LED support in iwlagn and iwl3945 drivers" 10 bool "Enable LED support in iwlagn and iwl3945 drivers"
12 depends on IWLWIFI 11 depends on IWLWIFI
13 12
14config IWLWIFI_RFKILL
15 bool "Enable RF kill support in iwlagn and iwl3945 drivers"
16 depends on IWLWIFI
17
18config IWLWIFI_SPECTRUM_MEASUREMENT 13config IWLWIFI_SPECTRUM_MEASUREMENT
19 bool "Enable Spectrum Measurement in iwlagn driver" 14 bool "Enable Spectrum Measurement in iwlagn driver"
20 depends on IWLWIFI 15 depends on IWLWIFI
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index d79d97ad61a5..1d4e0a226fd4 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -4,7 +4,6 @@ iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
4iwlcore-objs += iwl-scan.o 4iwlcore-objs += iwl-scan.o
5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o 6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
7iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
8iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o 7iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
9 8
10obj-$(CONFIG_IWLAGN) += iwlagn.o 9obj-$(CONFIG_IWLAGN) += iwlagn.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index bd7e520d98c2..225e5f889346 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -167,10 +167,6 @@ static int iwl3945_led_disassociate(struct iwl_priv *priv, int led_id)
167 IWL_DEBUG_LED(priv, "Disassociated\n"); 167 IWL_DEBUG_LED(priv, "Disassociated\n");
168 168
169 priv->allow_blinking = 0; 169 priv->allow_blinking = 0;
170 if (iwl_is_rfkill(priv))
171 iwl3945_led_off(priv, led_id);
172 else
173 iwl3945_led_on(priv, led_id);
174 170
175 return 0; 171 return 0;
176} 172}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 814afaf6d10b..5eb538d18a80 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -38,6 +38,7 @@
38 38
39#include "iwl-commands.h" 39#include "iwl-commands.h"
40#include "iwl-3945.h" 40#include "iwl-3945.h"
41#include "iwl-sta.h"
41 42
42#define RS_NAME "iwl-3945-rs" 43#define RS_NAME "iwl-3945-rs"
43 44
@@ -714,13 +715,13 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
714 715
715 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && 716 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
716 !rs_sta->ibss_sta_added) { 717 !rs_sta->ibss_sta_added) {
717 u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1); 718 u8 sta_id = iwl_find_station(priv, hdr->addr1);
718 719
719 if (sta_id == IWL_INVALID_STATION) { 720 if (sta_id == IWL_INVALID_STATION) {
720 IWL_DEBUG_RATE(priv, "LQ: ADD station %pm\n", 721 IWL_DEBUG_RATE(priv, "LQ: ADD station %pm\n",
721 hdr->addr1); 722 hdr->addr1);
722 sta_id = iwl3945_add_station(priv, 723 sta_id = iwl_add_station(priv, hdr->addr1, false,
723 hdr->addr1, 0, CMD_ASYNC, NULL); 724 CMD_ASYNC, NULL);
724 } 725 }
725 if (sta_id != IWL_INVALID_STATION) 726 if (sta_id != IWL_INVALID_STATION)
726 rs_sta->ibss_sta_added = 1; 727 rs_sta->ibss_sta_added = 1;
@@ -975,7 +976,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
975 976
976 rcu_read_lock(); 977 rcu_read_lock();
977 978
978 sta = ieee80211_find_sta(hw, priv->stations_39[sta_id].sta.sta.addr); 979 sta = ieee80211_find_sta(hw, priv->stations[sta_id].sta.sta.addr);
979 if (!sta) { 980 if (!sta) {
980 rcu_read_unlock(); 981 rcu_read_unlock();
981 return; 982 return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index fd65e1c3e055..46288e724889 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -769,35 +769,6 @@ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
769 return ; 769 return ;
770} 770}
771 771
772u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *addr)
773{
774 int i, start = IWL_AP_ID;
775 int ret = IWL_INVALID_STATION;
776 unsigned long flags;
777
778 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
779 (priv->iw_mode == NL80211_IFTYPE_AP))
780 start = IWL_STA_ID;
781
782 if (is_broadcast_ether_addr(addr))
783 return priv->hw_params.bcast_sta_id;
784
785 spin_lock_irqsave(&priv->sta_lock, flags);
786 for (i = start; i < priv->hw_params.max_stations; i++)
787 if ((priv->stations_39[i].used) &&
788 (!compare_ether_addr
789 (priv->stations_39[i].sta.sta.addr, addr))) {
790 ret = i;
791 goto out;
792 }
793
794 IWL_DEBUG_INFO(priv, "can not find STA %pM (total %d)\n",
795 addr, priv->num_stations);
796 out:
797 spin_unlock_irqrestore(&priv->sta_lock, flags);
798 return ret;
799}
800
801/** 772/**
802 * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: 773 * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
803 * 774 *
@@ -875,13 +846,13 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, struct iwl_cmd *cmd,
875u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags) 846u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
876{ 847{
877 unsigned long flags_spin; 848 unsigned long flags_spin;
878 struct iwl3945_station_entry *station; 849 struct iwl_station_entry *station;
879 850
880 if (sta_id == IWL_INVALID_STATION) 851 if (sta_id == IWL_INVALID_STATION)
881 return IWL_INVALID_STATION; 852 return IWL_INVALID_STATION;
882 853
883 spin_lock_irqsave(&priv->sta_lock, flags_spin); 854 spin_lock_irqsave(&priv->sta_lock, flags_spin);
884 station = &priv->stations_39[sta_id]; 855 station = &priv->stations[sta_id];
885 856
886 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; 857 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
887 station->sta.rate_n_flags = cpu_to_le16(tx_rate); 858 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
@@ -889,8 +860,7 @@ u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
889 860
890 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 861 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
891 862
892 iwl_send_add_sta(priv, 863 iwl_send_add_sta(priv, &station->sta, flags);
893 (struct iwl_addsta_cmd *)&station->sta, flags);
894 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", 864 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
895 sta_id, tx_rate); 865 sta_id, tx_rate);
896 return sta_id; 866 return sta_id;
@@ -2029,7 +1999,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
2029 1999
2030 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); 2000 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
2031 2001
2032 priv->cfg->ops->smgmt->clear_station_table(priv); 2002 iwl_clear_stations_table(priv);
2033 2003
2034 /* If we issue a new RXON command which required a tune then we must 2004 /* If we issue a new RXON command which required a tune then we must
2035 * send a new TXPOWER command or we won't be able to Tx any frames */ 2005 * send a new TXPOWER command or we won't be able to Tx any frames */
@@ -2040,7 +2010,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
2040 } 2010 }
2041 2011
2042 /* Add the broadcast address so we can send broadcast frames */ 2012 /* Add the broadcast address so we can send broadcast frames */
2043 if (priv->cfg->ops->smgmt->add_station(priv, iwl_bcast_addr, 0, 0, NULL) == 2013 if (iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL) ==
2044 IWL_INVALID_STATION) { 2014 IWL_INVALID_STATION) {
2045 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n"); 2015 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
2046 return -EIO; 2016 return -EIO;
@@ -2050,9 +2020,8 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
2050 * add the IWL_AP_ID to the station rate table */ 2020 * add the IWL_AP_ID to the station rate table */
2051 if (iwl_is_associated(priv) && 2021 if (iwl_is_associated(priv) &&
2052 (priv->iw_mode == NL80211_IFTYPE_STATION)) 2022 (priv->iw_mode == NL80211_IFTYPE_STATION))
2053 if (priv->cfg->ops->smgmt->add_station(priv, 2023 if (iwl_add_station(priv, priv->active_rxon.bssid_addr,
2054 priv->active_rxon.bssid_addr, 1, 0, NULL) 2024 true, CMD_SYNC, NULL) == IWL_INVALID_STATION) {
2055 == IWL_INVALID_STATION) {
2056 IWL_ERR(priv, "Error adding AP address for transmit\n"); 2025 IWL_ERR(priv, "Error adding AP address for transmit\n");
2057 return -EIO; 2026 return -EIO;
2058 } 2027 }
@@ -2466,13 +2435,25 @@ static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
2466 } 2435 }
2467} 2436}
2468 2437
2438
2469static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 2439static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
2470{ 2440{
2471 u16 size = (u16)sizeof(struct iwl3945_addsta_cmd); 2441 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
2472 memcpy(data, cmd, size); 2442 addsta->mode = cmd->mode;
2473 return size; 2443 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2444 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
2445 addsta->station_flags = cmd->station_flags;
2446 addsta->station_flags_msk = cmd->station_flags_msk;
2447 addsta->tid_disable_tx = cpu_to_le16(0);
2448 addsta->rate_n_flags = cmd->rate_n_flags;
2449 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2450 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2451 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2452
2453 return (u16)sizeof(struct iwl3945_addsta_cmd);
2474} 2454}
2475 2455
2456
2476/** 2457/**
2477 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table 2458 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
2478 */ 2459 */
@@ -2842,15 +2823,6 @@ static struct iwl_lib_ops iwl3945_lib = {
2842 .config_ap = iwl3945_config_ap, 2823 .config_ap = iwl3945_config_ap,
2843}; 2824};
2844 2825
2845static struct iwl_station_mgmt_ops iwl3945_station_mgmt = {
2846 .add_station = iwl3945_add_station,
2847#if 0
2848 .remove_station = iwl3945_remove_station,
2849#endif
2850 .find_station = iwl3945_hw_find_station,
2851 .clear_station_table = iwl3945_clear_stations_table,
2852};
2853
2854static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2826static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2855 .get_hcmd_size = iwl3945_get_hcmd_size, 2827 .get_hcmd_size = iwl3945_get_hcmd_size,
2856 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2828 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
@@ -2860,7 +2832,6 @@ static struct iwl_ops iwl3945_ops = {
2860 .lib = &iwl3945_lib, 2832 .lib = &iwl3945_lib,
2861 .hcmd = &iwl3945_hcmd, 2833 .hcmd = &iwl3945_hcmd,
2862 .utils = &iwl3945_hcmd_utils, 2834 .utils = &iwl3945_hcmd_utils,
2863 .smgmt = &iwl3945_station_mgmt,
2864}; 2835};
2865 2836
2866static struct iwl_cfg iwl3945_bg_cfg = { 2837static struct iwl_cfg iwl3945_bg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index da87528f355f..fbb3a573463e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -36,10 +36,6 @@
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h> 37#include <net/ieee80211_radiotap.h>
38 38
39/*used for rfkill*/
40#include <linux/rfkill.h>
41#include <linux/input.h>
42
43/* Hardware specific file defines the PCI IDs table for that hardware module */ 39/* Hardware specific file defines the PCI IDs table for that hardware module */
44extern struct pci_device_id iwl3945_hw_card_ids[]; 40extern struct pci_device_id iwl3945_hw_card_ids[];
45 41
@@ -155,7 +151,6 @@ struct iwl3945_frame {
155#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */ 151#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
156#define STATUS_INT_ENABLED 2 152#define STATUS_INT_ENABLED 2
157#define STATUS_RF_KILL_HW 3 153#define STATUS_RF_KILL_HW 3
158#define STATUS_RF_KILL_SW 4
159#define STATUS_INIT 5 154#define STATUS_INIT 5
160#define STATUS_ALIVE 6 155#define STATUS_ALIVE 6
161#define STATUS_READY 7 156#define STATUS_READY 7
@@ -202,12 +197,6 @@ struct iwl3945_ibss_seq {
202 * for use by iwl-*.c 197 * for use by iwl-*.c
203 * 198 *
204 *****************************************************************************/ 199 *****************************************************************************/
205struct iwl3945_addsta_cmd;
206extern int iwl3945_send_add_station(struct iwl_priv *priv,
207 struct iwl3945_addsta_cmd *sta, u8 flags);
208extern u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *bssid,
209 int is_ap, u8 flags, struct ieee80211_sta_ht_cap *ht_info);
210extern void iwl3945_clear_stations_table(struct iwl_priv *priv);
211extern int iwl3945_power_init_handle(struct iwl_priv *priv); 200extern int iwl3945_power_init_handle(struct iwl_priv *priv);
212extern int iwl3945_eeprom_init(struct iwl_priv *priv); 201extern int iwl3945_eeprom_init(struct iwl_priv *priv);
213extern int iwl3945_calc_db_from_ratio(int sig_ratio); 202extern int iwl3945_calc_db_from_ratio(int sig_ratio);
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index a0b29411a4b3..8f3d4bc6a03f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2221,13 +2221,6 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2221 cancel_work_sync(&priv->txpower_work); 2221 cancel_work_sync(&priv->txpower_work);
2222} 2222}
2223 2223
2224static struct iwl_station_mgmt_ops iwl4965_station_mgmt = {
2225 .add_station = iwl_add_station_flags,
2226 .remove_station = iwl_remove_station,
2227 .find_station = iwl_find_station,
2228 .clear_station_table = iwl_clear_stations_table,
2229};
2230
2231static struct iwl_hcmd_ops iwl4965_hcmd = { 2224static struct iwl_hcmd_ops iwl4965_hcmd = {
2232 .rxon_assoc = iwl4965_send_rxon_assoc, 2225 .rxon_assoc = iwl4965_send_rxon_assoc,
2233 .commit_rxon = iwl_commit_rxon, 2226 .commit_rxon = iwl_commit_rxon,
@@ -2297,7 +2290,6 @@ static struct iwl_ops iwl4965_ops = {
2297 .lib = &iwl4965_lib, 2290 .lib = &iwl4965_lib,
2298 .hcmd = &iwl4965_hcmd, 2291 .hcmd = &iwl4965_hcmd,
2299 .utils = &iwl4965_hcmd_utils, 2292 .utils = &iwl4965_hcmd_utils,
2300 .smgmt = &iwl4965_station_mgmt,
2301}; 2293};
2302 2294
2303struct iwl_cfg iwl4965_agn_cfg = { 2295struct iwl_cfg iwl4965_agn_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index ab29aab6b2d5..b3c648ce8c7b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -651,7 +651,7 @@ static void iwl5000_init_alive_start(struct iwl_priv *priv)
651 goto restart; 651 goto restart;
652 } 652 }
653 653
654 priv->cfg->ops->smgmt->clear_station_table(priv); 654 iwl_clear_stations_table(priv);
655 ret = priv->cfg->ops->lib->alive_notify(priv); 655 ret = priv->cfg->ops->lib->alive_notify(priv);
656 if (ret) { 656 if (ret) {
657 IWL_WARN(priv, 657 IWL_WARN(priv,
@@ -1049,7 +1049,10 @@ static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1049u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 1049u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
1050{ 1050{
1051 u16 size = (u16)sizeof(struct iwl_addsta_cmd); 1051 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
1052 memcpy(data, cmd, size); 1052 struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
1053 memcpy(addsta, cmd, size);
1054 /* resrved in 5000 */
1055 addsta->rate_n_flags = cpu_to_le16(0);
1053 return size; 1056 return size;
1054} 1057}
1055 1058
@@ -1423,13 +1426,6 @@ int iwl5000_calc_rssi(struct iwl_priv *priv,
1423 return max_rssi - agc - IWL49_RSSI_OFFSET; 1426 return max_rssi - agc - IWL49_RSSI_OFFSET;
1424} 1427}
1425 1428
1426struct iwl_station_mgmt_ops iwl5000_station_mgmt = {
1427 .add_station = iwl_add_station_flags,
1428 .remove_station = iwl_remove_station,
1429 .find_station = iwl_find_station,
1430 .clear_station_table = iwl_clear_stations_table,
1431};
1432
1433struct iwl_hcmd_ops iwl5000_hcmd = { 1429struct iwl_hcmd_ops iwl5000_hcmd = {
1434 .rxon_assoc = iwl5000_send_rxon_assoc, 1430 .rxon_assoc = iwl5000_send_rxon_assoc,
1435 .commit_rxon = iwl_commit_rxon, 1431 .commit_rxon = iwl_commit_rxon,
@@ -1549,14 +1545,12 @@ struct iwl_ops iwl5000_ops = {
1549 .lib = &iwl5000_lib, 1545 .lib = &iwl5000_lib,
1550 .hcmd = &iwl5000_hcmd, 1546 .hcmd = &iwl5000_hcmd,
1551 .utils = &iwl5000_hcmd_utils, 1547 .utils = &iwl5000_hcmd_utils,
1552 .smgmt = &iwl5000_station_mgmt,
1553}; 1548};
1554 1549
1555static struct iwl_ops iwl5150_ops = { 1550static struct iwl_ops iwl5150_ops = {
1556 .lib = &iwl5150_lib, 1551 .lib = &iwl5150_lib,
1557 .hcmd = &iwl5000_hcmd, 1552 .hcmd = &iwl5000_hcmd,
1558 .utils = &iwl5000_hcmd_utils, 1553 .utils = &iwl5000_hcmd_utils,
1559 .smgmt = &iwl5000_station_mgmt,
1560}; 1554};
1561 1555
1562struct iwl_mod_params iwl50_mod_params = { 1556struct iwl_mod_params iwl50_mod_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 7236382aeaa6..bd438d8acf55 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -72,7 +72,6 @@ static struct iwl_ops iwl6000_ops = {
72 .lib = &iwl5000_lib, 72 .lib = &iwl5000_lib,
73 .hcmd = &iwl5000_hcmd, 73 .hcmd = &iwl5000_hcmd,
74 .utils = &iwl6000_hcmd_utils, 74 .utils = &iwl6000_hcmd_utils,
75 .smgmt = &iwl5000_station_mgmt,
76}; 75};
77 76
78struct iwl_cfg iwl6000_2ag_cfg = { 77struct iwl_cfg iwl6000_2ag_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 23a58b00f180..ff20e5048a55 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -2502,15 +2502,13 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2502 2502
2503 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && 2503 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
2504 !lq_sta->ibss_sta_added) { 2504 !lq_sta->ibss_sta_added) {
2505 u8 sta_id = priv->cfg->ops->smgmt->find_station(priv, 2505 u8 sta_id = iwl_find_station(priv, hdr->addr1);
2506 hdr->addr1);
2507 2506
2508 if (sta_id == IWL_INVALID_STATION) { 2507 if (sta_id == IWL_INVALID_STATION) {
2509 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", 2508 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
2510 hdr->addr1); 2509 hdr->addr1);
2511 sta_id = priv->cfg->ops->smgmt->add_station(priv, 2510 sta_id = iwl_add_station(priv, hdr->addr1,
2512 hdr->addr1, 0, 2511 false, CMD_ASYNC, NULL);
2513 CMD_ASYNC, NULL);
2514 } 2512 }
2515 if ((sta_id != IWL_INVALID_STATION)) { 2513 if ((sta_id != IWL_INVALID_STATION)) {
2516 lq_sta->lq.sta_id = sta_id; 2514 lq_sta->lq.sta_id = sta_id;
@@ -2598,7 +2596,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2598 2596
2599 lq_sta->ibss_sta_added = 0; 2597 lq_sta->ibss_sta_added = 0;
2600 if (priv->iw_mode == NL80211_IFTYPE_AP) { 2598 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2601 u8 sta_id = priv->cfg->ops->smgmt->find_station(priv, 2599 u8 sta_id = iwl_find_station(priv,
2602 sta->addr); 2600 sta->addr);
2603 2601
2604 /* for IBSS the call are from tasklet */ 2602 /* for IBSS the call are from tasklet */
@@ -2606,9 +2604,8 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2606 2604
2607 if (sta_id == IWL_INVALID_STATION) { 2605 if (sta_id == IWL_INVALID_STATION) {
2608 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr); 2606 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
2609 sta_id = priv->cfg->ops->smgmt->add_station(priv, 2607 sta_id = iwl_add_station(priv, sta->addr, false,
2610 sta->addr, 0, 2608 CMD_ASYNC, NULL);
2611 CMD_ASYNC, NULL);
2612 } 2609 }
2613 if ((sta_id != IWL_INVALID_STATION)) { 2610 if ((sta_id != IWL_INVALID_STATION)) {
2614 lq_sta->lq.sta_id = sta_id; 2611 lq_sta->lq.sta_id = sta_id;
@@ -2790,9 +2787,10 @@ static void rs_fill_link_cmd(const struct iwl_priv *priv,
2790 repeat_rate--; 2787 repeat_rate--;
2791 } 2788 }
2792 2789
2793 lq_cmd->agg_params.agg_frame_cnt_limit = 64; 2790 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_MAX;
2794 lq_cmd->agg_params.agg_dis_start_th = 3; 2791 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2795 lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000); 2792 lq_cmd->agg_params.agg_time_limit =
2793 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2796} 2794}
2797 2795
2798static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 2796static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 0a5507cbeb3f..a5637c4aa85d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -188,7 +188,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
188 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 188 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
189 } 189 }
190 190
191 priv->cfg->ops->smgmt->clear_station_table(priv); 191 iwl_clear_stations_table(priv);
192 192
193 priv->start_calib = 0; 193 priv->start_calib = 0;
194 194
@@ -737,19 +737,13 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
737 clear_bit(STATUS_RF_KILL_HW, &priv->status); 737 clear_bit(STATUS_RF_KILL_HW, &priv->status);
738 738
739 739
740 if (flags & SW_CARD_DISABLED)
741 set_bit(STATUS_RF_KILL_SW, &priv->status);
742 else
743 clear_bit(STATUS_RF_KILL_SW, &priv->status);
744
745 if (!(flags & RXON_CARD_DISABLED)) 740 if (!(flags & RXON_CARD_DISABLED))
746 iwl_scan_cancel(priv); 741 iwl_scan_cancel(priv);
747 742
748 if ((test_bit(STATUS_RF_KILL_HW, &status) != 743 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
749 test_bit(STATUS_RF_KILL_HW, &priv->status)) || 744 test_bit(STATUS_RF_KILL_HW, &priv->status)))
750 (test_bit(STATUS_RF_KILL_SW, &status) != 745 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
751 test_bit(STATUS_RF_KILL_SW, &priv->status))) 746 test_bit(STATUS_RF_KILL_HW, &priv->status));
752 queue_work(priv->workqueue, &priv->rf_kill);
753 else 747 else
754 wake_up_interruptible(&priv->wait_command_queue); 748 wake_up_interruptible(&priv->wait_command_queue);
755} 749}
@@ -1045,7 +1039,7 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1045 set_bit(STATUS_RF_KILL_HW, &priv->status); 1039 set_bit(STATUS_RF_KILL_HW, &priv->status);
1046 else 1040 else
1047 clear_bit(STATUS_RF_KILL_HW, &priv->status); 1041 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1048 queue_work(priv->workqueue, &priv->rf_kill); 1042 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
1049 } 1043 }
1050 1044
1051 handled |= CSR_INT_BIT_RF_KILL; 1045 handled |= CSR_INT_BIT_RF_KILL;
@@ -1218,7 +1212,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1218 set_bit(STATUS_RF_KILL_HW, &priv->status); 1212 set_bit(STATUS_RF_KILL_HW, &priv->status);
1219 else 1213 else
1220 clear_bit(STATUS_RF_KILL_HW, &priv->status); 1214 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1221 queue_work(priv->workqueue, &priv->rf_kill); 1215 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
1222 } 1216 }
1223 1217
1224 handled |= CSR_INT_BIT_RF_KILL; 1218 handled |= CSR_INT_BIT_RF_KILL;
@@ -1617,7 +1611,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
1617 goto restart; 1611 goto restart;
1618 } 1612 }
1619 1613
1620 priv->cfg->ops->smgmt->clear_station_table(priv); 1614 iwl_clear_stations_table(priv);
1621 ret = priv->cfg->ops->lib->alive_notify(priv); 1615 ret = priv->cfg->ops->lib->alive_notify(priv);
1622 if (ret) { 1616 if (ret) {
1623 IWL_WARN(priv, 1617 IWL_WARN(priv,
@@ -1703,7 +1697,7 @@ static void __iwl_down(struct iwl_priv *priv)
1703 1697
1704 iwl_leds_unregister(priv); 1698 iwl_leds_unregister(priv);
1705 1699
1706 priv->cfg->ops->smgmt->clear_station_table(priv); 1700 iwl_clear_stations_table(priv);
1707 1701
1708 /* Unblock any waiting calls */ 1702 /* Unblock any waiting calls */
1709 wake_up_interruptible_all(&priv->wait_command_queue); 1703 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -1726,12 +1720,10 @@ static void __iwl_down(struct iwl_priv *priv)
1726 ieee80211_stop_queues(priv->hw); 1720 ieee80211_stop_queues(priv->hw);
1727 1721
1728 /* If we have not previously called iwl_init() then 1722 /* If we have not previously called iwl_init() then
1729 * clear all bits but the RF Kill bits and return */ 1723 * clear all bits but the RF Kill bit and return */
1730 if (!iwl_is_init(priv)) { 1724 if (!iwl_is_init(priv)) {
1731 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << 1725 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1732 STATUS_RF_KILL_HW | 1726 STATUS_RF_KILL_HW |
1733 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
1734 STATUS_RF_KILL_SW |
1735 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 1727 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1736 STATUS_GEO_CONFIGURED | 1728 STATUS_GEO_CONFIGURED |
1737 test_bit(STATUS_EXIT_PENDING, &priv->status) << 1729 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
@@ -1740,11 +1732,9 @@ static void __iwl_down(struct iwl_priv *priv)
1740 } 1732 }
1741 1733
1742 /* ...otherwise clear out all the status bits but the RF Kill 1734 /* ...otherwise clear out all the status bits but the RF Kill
1743 * bits and continue taking the NIC down. */ 1735 * bit and continue taking the NIC down. */
1744 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << 1736 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1745 STATUS_RF_KILL_HW | 1737 STATUS_RF_KILL_HW |
1746 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
1747 STATUS_RF_KILL_SW |
1748 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 1738 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1749 STATUS_GEO_CONFIGURED | 1739 STATUS_GEO_CONFIGURED |
1750 test_bit(STATUS_FW_ERROR, &priv->status) << 1740 test_bit(STATUS_FW_ERROR, &priv->status) <<
@@ -1866,9 +1856,10 @@ static int __iwl_up(struct iwl_priv *priv)
1866 set_bit(STATUS_RF_KILL_HW, &priv->status); 1856 set_bit(STATUS_RF_KILL_HW, &priv->status);
1867 1857
1868 if (iwl_is_rfkill(priv)) { 1858 if (iwl_is_rfkill(priv)) {
1859 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
1860
1869 iwl_enable_interrupts(priv); 1861 iwl_enable_interrupts(priv);
1870 IWL_WARN(priv, "Radio disabled by %s RF Kill switch\n", 1862 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
1871 test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW");
1872 return 0; 1863 return 0;
1873 } 1864 }
1874 1865
@@ -1887,8 +1878,6 @@ static int __iwl_up(struct iwl_priv *priv)
1887 1878
1888 /* clear (again), then enable host interrupts */ 1879 /* clear (again), then enable host interrupts */
1889 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 1880 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
1890 /* enable dram interrupt */
1891 iwl_reset_ict(priv);
1892 iwl_enable_interrupts(priv); 1881 iwl_enable_interrupts(priv);
1893 1882
1894 /* really make sure rfkill handshake bits are cleared */ 1883 /* really make sure rfkill handshake bits are cleared */
@@ -1903,7 +1892,7 @@ static int __iwl_up(struct iwl_priv *priv)
1903 1892
1904 for (i = 0; i < MAX_HW_RESTARTS; i++) { 1893 for (i = 0; i < MAX_HW_RESTARTS; i++) {
1905 1894
1906 priv->cfg->ops->smgmt->clear_station_table(priv); 1895 iwl_clear_stations_table(priv);
1907 1896
1908 /* load bootstrap state machine, 1897 /* load bootstrap state machine,
1909 * load bootstrap program into processor's memory, 1898 * load bootstrap program into processor's memory,
@@ -1962,6 +1951,9 @@ static void iwl_bg_alive_start(struct work_struct *data)
1962 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1951 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1963 return; 1952 return;
1964 1953
1954 /* enable dram interrupt */
1955 iwl_reset_ict(priv);
1956
1965 mutex_lock(&priv->mutex); 1957 mutex_lock(&priv->mutex);
1966 iwl_alive_start(priv); 1958 iwl_alive_start(priv);
1967 mutex_unlock(&priv->mutex); 1959 mutex_unlock(&priv->mutex);
@@ -2000,7 +1992,6 @@ static void iwl_bg_up(struct work_struct *data)
2000 mutex_lock(&priv->mutex); 1992 mutex_lock(&priv->mutex);
2001 __iwl_up(priv); 1993 __iwl_up(priv);
2002 mutex_unlock(&priv->mutex); 1994 mutex_unlock(&priv->mutex);
2003 iwl_rfkill_set_hw_state(priv);
2004} 1995}
2005 1996
2006static void iwl_bg_restart(struct work_struct *data) 1997static void iwl_bg_restart(struct work_struct *data)
@@ -2178,8 +2169,6 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
2178 2169
2179 mutex_unlock(&priv->mutex); 2170 mutex_unlock(&priv->mutex);
2180 2171
2181 iwl_rfkill_set_hw_state(priv);
2182
2183 if (ret) 2172 if (ret)
2184 return ret; 2173 return ret;
2185 2174
@@ -2348,7 +2337,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2348 return -EOPNOTSUPP; 2337 return -EOPNOTSUPP;
2349 } 2338 }
2350 addr = sta ? sta->addr : iwl_bcast_addr; 2339 addr = sta ? sta->addr : iwl_bcast_addr;
2351 sta_id = priv->cfg->ops->smgmt->find_station(priv, addr); 2340 sta_id = iwl_find_station(priv, addr);
2352 if (sta_id == IWL_INVALID_STATION) { 2341 if (sta_id == IWL_INVALID_STATION) {
2353 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", 2342 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
2354 addr); 2343 addr);
@@ -2774,7 +2763,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
2774 INIT_WORK(&priv->up, iwl_bg_up); 2763 INIT_WORK(&priv->up, iwl_bg_up);
2775 INIT_WORK(&priv->restart, iwl_bg_restart); 2764 INIT_WORK(&priv->restart, iwl_bg_restart);
2776 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); 2765 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
2777 INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
2778 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); 2766 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
2779 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); 2767 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
2780 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); 2768 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
@@ -3045,12 +3033,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3045 else 3033 else
3046 set_bit(STATUS_RF_KILL_HW, &priv->status); 3034 set_bit(STATUS_RF_KILL_HW, &priv->status);
3047 3035
3048 err = iwl_rfkill_init(priv); 3036 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3049 if (err) 3037 test_bit(STATUS_RF_KILL_HW, &priv->status));
3050 IWL_ERR(priv, "Unable to initialize RFKILL system. "
3051 "Ignoring error: %d\n", err);
3052 else
3053 iwl_rfkill_set_hw_state(priv);
3054 3038
3055 iwl_power_initialize(priv); 3039 iwl_power_initialize(priv);
3056 return 0; 3040 return 0;
@@ -3114,14 +3098,13 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3114 3098
3115 iwl_synchronize_irq(priv); 3099 iwl_synchronize_irq(priv);
3116 3100
3117 iwl_rfkill_unregister(priv);
3118 iwl_dealloc_ucode_pci(priv); 3101 iwl_dealloc_ucode_pci(priv);
3119 3102
3120 if (priv->rxq.bd) 3103 if (priv->rxq.bd)
3121 iwl_rx_queue_free(priv, &priv->rxq); 3104 iwl_rx_queue_free(priv, &priv->rxq);
3122 iwl_hw_txq_ctx_free(priv); 3105 iwl_hw_txq_ctx_free(priv);
3123 3106
3124 priv->cfg->ops->smgmt->clear_station_table(priv); 3107 iwl_clear_stations_table(priv);
3125 iwl_eeprom_free(priv); 3108 iwl_eeprom_free(priv);
3126 3109
3127 3110
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e581dc323f0a..c87033bf3ad2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -1067,7 +1067,7 @@ struct iwl_addsta_cmd {
1067 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ 1067 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1068 __le16 tid_disable_tx; 1068 __le16 tid_disable_tx;
1069 1069
1070 __le16 reserved1; 1070 __le16 rate_n_flags; /* 3945 only */
1071 1071
1072 /* TID for which to add block-ack support. 1072 /* TID for which to add block-ack support.
1073 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1073 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1913,6 +1913,18 @@ struct iwl_link_qual_general_params {
1913 u8 start_rate_index[LINK_QUAL_AC_NUM]; 1913 u8 start_rate_index[LINK_QUAL_AC_NUM];
1914} __attribute__ ((packed)); 1914} __attribute__ ((packed));
1915 1915
1916#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1917#define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535)
1918#define LINK_QUAL_AGG_TIME_LIMIT_MIN (0)
1919
1920#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
1921#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
1922#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
1923
1924#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31)
1925#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (64)
1926#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1927
1916/** 1928/**
1917 * struct iwl_link_qual_agg_params 1929 * struct iwl_link_qual_agg_params
1918 * 1930 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index e93ddb74457e..f9d16ca5b3d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -36,7 +36,6 @@
36#include "iwl-debug.h" 36#include "iwl-debug.h"
37#include "iwl-core.h" 37#include "iwl-core.h"
38#include "iwl-io.h" 38#include "iwl-io.h"
39#include "iwl-rfkill.h"
40#include "iwl-power.h" 39#include "iwl-power.h"
41#include "iwl-sta.h" 40#include "iwl-sta.h"
42#include "iwl-helpers.h" 41#include "iwl-helpers.h"
@@ -1389,7 +1388,7 @@ int iwl_init_drv(struct iwl_priv *priv)
1389 mutex_init(&priv->mutex); 1388 mutex_init(&priv->mutex);
1390 1389
1391 /* Clear the driver's (not device's) station table */ 1390 /* Clear the driver's (not device's) station table */
1392 priv->cfg->ops->smgmt->clear_station_table(priv); 1391 iwl_clear_stations_table(priv);
1393 1392
1394 priv->data_retry_limit = -1; 1393 priv->data_retry_limit = -1;
1395 priv->ieee_channels = NULL; 1394 priv->ieee_channels = NULL;
@@ -1704,8 +1703,9 @@ static irqreturn_t iwl_isr(int irq, void *data)
1704{ 1703{
1705 struct iwl_priv *priv = data; 1704 struct iwl_priv *priv = data;
1706 u32 inta, inta_mask; 1705 u32 inta, inta_mask;
1706#ifdef CONFIG_IWLWIFI_DEBUG
1707 u32 inta_fh; 1707 u32 inta_fh;
1708 1708#endif
1709 if (!priv) 1709 if (!priv)
1710 return IRQ_NONE; 1710 return IRQ_NONE;
1711 1711
@@ -2210,126 +2210,6 @@ int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
2210} 2210}
2211EXPORT_SYMBOL(iwl_send_card_state); 2211EXPORT_SYMBOL(iwl_send_card_state);
2212 2212
2213void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv)
2214{
2215 unsigned long flags;
2216
2217 if (test_bit(STATUS_RF_KILL_SW, &priv->status))
2218 return;
2219
2220 IWL_DEBUG_RF_KILL(priv, "Manual SW RF KILL set to: RADIO OFF\n");
2221
2222 iwl_scan_cancel(priv);
2223 /* FIXME: This is a workaround for AP */
2224 if (priv->iw_mode != NL80211_IFTYPE_AP) {
2225 spin_lock_irqsave(&priv->lock, flags);
2226 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2227 CSR_UCODE_SW_BIT_RFKILL);
2228 spin_unlock_irqrestore(&priv->lock, flags);
2229 /* call the host command only if no hw rf-kill set */
2230 if (!test_bit(STATUS_RF_KILL_HW, &priv->status) &&
2231 iwl_is_ready(priv))
2232 iwl_send_card_state(priv,
2233 CARD_STATE_CMD_DISABLE, 0);
2234 set_bit(STATUS_RF_KILL_SW, &priv->status);
2235 /* make sure mac80211 stop sending Tx frame */
2236 if (priv->mac80211_registered)
2237 ieee80211_stop_queues(priv->hw);
2238 }
2239}
2240EXPORT_SYMBOL(iwl_radio_kill_sw_disable_radio);
2241
2242int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv)
2243{
2244 unsigned long flags;
2245
2246 if (!test_bit(STATUS_RF_KILL_SW, &priv->status))
2247 return 0;
2248
2249 IWL_DEBUG_RF_KILL(priv, "Manual SW RF KILL set to: RADIO ON\n");
2250
2251 spin_lock_irqsave(&priv->lock, flags);
2252 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2253
2254 /* If the driver is up it will receive CARD_STATE_NOTIFICATION
2255 * notification where it will clear SW rfkill status.
2256 * Setting it here would break the handler. Only if the
2257 * interface is down we can set here since we don't
2258 * receive any further notification.
2259 */
2260 if (!priv->is_open)
2261 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2262 spin_unlock_irqrestore(&priv->lock, flags);
2263
2264 /* wake up ucode */
2265 msleep(10);
2266
2267 iwl_read32(priv, CSR_UCODE_DRV_GP1);
2268 spin_lock_irqsave(&priv->reg_lock, flags);
2269 if (!iwl_grab_nic_access(priv))
2270 iwl_release_nic_access(priv);
2271 spin_unlock_irqrestore(&priv->reg_lock, flags);
2272
2273 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
2274 IWL_DEBUG_RF_KILL(priv, "Can not turn radio back on - "
2275 "disabled by HW switch\n");
2276 return 0;
2277 }
2278
2279 /* when driver is up while rfkill is on, it wont receive
2280 * any CARD_STATE_NOTIFICATION notifications so we have to
2281 * restart it in here
2282 */
2283 if (priv->is_open && !test_bit(STATUS_ALIVE, &priv->status)) {
2284 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2285 if (!iwl_is_rfkill(priv))
2286 queue_work(priv->workqueue, &priv->up);
2287 }
2288
2289 /* If the driver is already loaded, it will receive
2290 * CARD_STATE_NOTIFICATION notifications and the handler will
2291 * call restart to reload the driver.
2292 */
2293 return 1;
2294}
2295EXPORT_SYMBOL(iwl_radio_kill_sw_enable_radio);
2296
2297void iwl_bg_rf_kill(struct work_struct *work)
2298{
2299 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
2300
2301 wake_up_interruptible(&priv->wait_command_queue);
2302
2303 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2304 return;
2305
2306 mutex_lock(&priv->mutex);
2307
2308 if (!iwl_is_rfkill(priv)) {
2309 IWL_DEBUG_RF_KILL(priv,
2310 "HW and/or SW RF Kill no longer active, restarting "
2311 "device\n");
2312 if (!test_bit(STATUS_EXIT_PENDING, &priv->status) &&
2313 priv->is_open)
2314 queue_work(priv->workqueue, &priv->restart);
2315 } else {
2316 /* make sure mac80211 stop sending Tx frame */
2317 if (priv->mac80211_registered)
2318 ieee80211_stop_queues(priv->hw);
2319
2320 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
2321 IWL_DEBUG_RF_KILL(priv, "Can not turn radio back on - "
2322 "disabled by SW switch\n");
2323 else
2324 IWL_WARN(priv, "Radio Frequency Kill Switch is On:\n"
2325 "Kill switch must be turned off for "
2326 "wireless networking to work.\n");
2327 }
2328 mutex_unlock(&priv->mutex);
2329 iwl_rfkill_set_hw_state(priv);
2330}
2331EXPORT_SYMBOL(iwl_bg_rf_kill);
2332
2333void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, 2213void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
2334 struct iwl_rx_mem_buffer *rxb) 2214 struct iwl_rx_mem_buffer *rxb)
2335{ 2215{
@@ -2679,19 +2559,12 @@ int iwl_set_mode(struct iwl_priv *priv, int mode)
2679 2559
2680 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 2560 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2681 2561
2682 priv->cfg->ops->smgmt->clear_station_table(priv); 2562 iwl_clear_stations_table(priv);
2683 2563
2684 /* dont commit rxon if rf-kill is on*/ 2564 /* dont commit rxon if rf-kill is on*/
2685 if (!iwl_is_ready_rf(priv)) 2565 if (!iwl_is_ready_rf(priv))
2686 return -EAGAIN; 2566 return -EAGAIN;
2687 2567
2688 cancel_delayed_work(&priv->scan_check);
2689 if (iwl_scan_cancel_timeout(priv, 100)) {
2690 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
2691 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
2692 return -EAGAIN;
2693 }
2694
2695 iwlcore_commit_rxon(priv); 2568 iwlcore_commit_rxon(priv);
2696 2569
2697 return 0; 2570 return 0;
@@ -2855,23 +2728,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2855 if (priv->cfg->ops->hcmd->set_rxon_chain) 2728 if (priv->cfg->ops->hcmd->set_rxon_chain)
2856 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2729 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2857 2730
2858 if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
2859 if (conf->radio_enabled &&
2860 iwl_radio_kill_sw_enable_radio(priv)) {
2861 IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - "
2862 "waiting for uCode\n");
2863 goto out;
2864 }
2865
2866 if (!conf->radio_enabled)
2867 iwl_radio_kill_sw_disable_radio(priv);
2868 }
2869
2870 if (!conf->radio_enabled) {
2871 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
2872 goto out;
2873 }
2874
2875 if (!iwl_is_ready(priv)) { 2731 if (!iwl_is_ready(priv)) {
2876 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2732 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2877 goto out; 2733 goto out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 87df1b767941..dabf663e36e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -83,15 +83,6 @@ struct iwl_cmd;
83#define IWL_SKU_A 0x2 83#define IWL_SKU_A 0x2
84#define IWL_SKU_N 0x8 84#define IWL_SKU_N 0x8
85 85
86struct iwl_station_mgmt_ops {
87 u8 (*add_station)(struct iwl_priv *priv, const u8 *addr,
88 int is_ap, u8 flags, struct ieee80211_sta_ht_cap *ht_info);
89 int (*remove_station)(struct iwl_priv *priv, const u8 *addr,
90 int is_ap);
91 u8 (*find_station)(struct iwl_priv *priv, const u8 *addr);
92 void (*clear_station_table)(struct iwl_priv *priv);
93};
94
95struct iwl_hcmd_ops { 86struct iwl_hcmd_ops {
96 int (*rxon_assoc)(struct iwl_priv *priv); 87 int (*rxon_assoc)(struct iwl_priv *priv);
97 int (*commit_rxon)(struct iwl_priv *priv); 88 int (*commit_rxon)(struct iwl_priv *priv);
@@ -183,7 +174,6 @@ struct iwl_ops {
183 const struct iwl_lib_ops *lib; 174 const struct iwl_lib_ops *lib;
184 const struct iwl_hcmd_ops *hcmd; 175 const struct iwl_hcmd_ops *hcmd;
185 const struct iwl_hcmd_utils_ops *utils; 176 const struct iwl_hcmd_utils_ops *utils;
186 const struct iwl_station_mgmt_ops *smgmt;
187}; 177};
188 178
189struct iwl_mod_params { 179struct iwl_mod_params {
@@ -192,7 +182,7 @@ struct iwl_mod_params {
192 int disable_hw_scan; /* def: 0 = use h/w scan */ 182 int disable_hw_scan; /* def: 0 = use h/w scan */
193 int num_of_queues; /* def: HW dependent */ 183 int num_of_queues; /* def: HW dependent */
194 int num_of_ampdu_queues;/* def: HW dependent */ 184 int num_of_ampdu_queues;/* def: HW dependent */
195 int disable_11n; /* def: 0 = disable 11n capabilities */ 185 int disable_11n; /* def: 0 = 11n capabilities enabled */
196 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 186 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
197 int antenna; /* def: 0 = both antennas (use diversity) */ 187 int antenna; /* def: 0 = both antennas (use diversity) */
198 int restart_fw; /* def: 1 = restart firmware */ 188 int restart_fw; /* def: 1 = restart firmware */
@@ -358,14 +348,6 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
358 ****************************************************/ 348 ****************************************************/
359int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force); 349int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
360 350
361/*****************************************************
362 * RF -Kill - here and not in iwl-rfkill.h to be available when
363 * RF-kill subsystem is not compiled.
364 ****************************************************/
365void iwl_bg_rf_kill(struct work_struct *work);
366void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv);
367int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv);
368
369/******************************************************************************* 351/*******************************************************************************
370 * Rate 352 * Rate
371 ******************************************************************************/ 353 ******************************************************************************/
@@ -508,7 +490,6 @@ void iwlcore_free_geos(struct iwl_priv *priv);
508#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */ 490#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
509#define STATUS_INT_ENABLED 2 491#define STATUS_INT_ENABLED 2
510#define STATUS_RF_KILL_HW 3 492#define STATUS_RF_KILL_HW 3
511#define STATUS_RF_KILL_SW 4
512#define STATUS_INIT 5 493#define STATUS_INIT 5
513#define STATUS_ALIVE 6 494#define STATUS_ALIVE 6
514#define STATUS_READY 7 495#define STATUS_READY 7
@@ -543,11 +524,6 @@ static inline int iwl_is_init(struct iwl_priv *priv)
543 return test_bit(STATUS_INIT, &priv->status); 524 return test_bit(STATUS_INIT, &priv->status);
544} 525}
545 526
546static inline int iwl_is_rfkill_sw(struct iwl_priv *priv)
547{
548 return test_bit(STATUS_RF_KILL_SW, &priv->status);
549}
550
551static inline int iwl_is_rfkill_hw(struct iwl_priv *priv) 527static inline int iwl_is_rfkill_hw(struct iwl_priv *priv)
552{ 528{
553 return test_bit(STATUS_RF_KILL_HW, &priv->status); 529 return test_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -555,7 +531,7 @@ static inline int iwl_is_rfkill_hw(struct iwl_priv *priv)
555 531
556static inline int iwl_is_rfkill(struct iwl_priv *priv) 532static inline int iwl_is_rfkill(struct iwl_priv *priv)
557{ 533{
558 return iwl_is_rfkill_hw(priv) || iwl_is_rfkill_sw(priv); 534 return iwl_is_rfkill_hw(priv);
559} 535}
560 536
561static inline int iwl_is_ready_rf(struct iwl_priv *priv) 537static inline int iwl_is_ready_rf(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index af70229144b3..11e08c068917 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -449,8 +449,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
449 test_bit(STATUS_INT_ENABLED, &priv->status)); 449 test_bit(STATUS_INT_ENABLED, &priv->status));
450 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", 450 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
451 test_bit(STATUS_RF_KILL_HW, &priv->status)); 451 test_bit(STATUS_RF_KILL_HW, &priv->status));
452 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_SW:\t %d\n",
453 test_bit(STATUS_RF_KILL_SW, &priv->status));
454 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n", 452 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
455 test_bit(STATUS_INIT, &priv->status)); 453 test_bit(STATUS_INIT, &priv->status));
456 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", 454 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 2dafc26fb6a8..e2d620f0b6e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -41,7 +41,6 @@
41#include "iwl-prph.h" 41#include "iwl-prph.h"
42#include "iwl-fh.h" 42#include "iwl-fh.h"
43#include "iwl-debug.h" 43#include "iwl-debug.h"
44#include "iwl-rfkill.h"
45#include "iwl-4965-hw.h" 44#include "iwl-4965-hw.h"
46#include "iwl-3945-hw.h" 45#include "iwl-3945-hw.h"
47#include "iwl-3945-led.h" 46#include "iwl-3945-led.h"
@@ -70,7 +69,6 @@ extern struct iwl_ops iwl5000_ops;
70extern struct iwl_lib_ops iwl5000_lib; 69extern struct iwl_lib_ops iwl5000_lib;
71extern struct iwl_hcmd_ops iwl5000_hcmd; 70extern struct iwl_hcmd_ops iwl5000_hcmd;
72extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils; 71extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils;
73extern struct iwl_station_mgmt_ops iwl5000_station_mgmt;
74 72
75/* shared functions from iwl-5000.c */ 73/* shared functions from iwl-5000.c */
76extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len); 74extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len);
@@ -290,11 +288,11 @@ struct iwl_frame {
290#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) 288#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
291 289
292enum { 290enum {
293 /* CMD_SIZE_NORMAL = 0, */ 291 CMD_SYNC = 0,
292 CMD_SIZE_NORMAL = 0,
293 CMD_NO_SKB = 0,
294 CMD_SIZE_HUGE = (1 << 0), 294 CMD_SIZE_HUGE = (1 << 0),
295 /* CMD_SYNC = 0, */
296 CMD_ASYNC = (1 << 1), 295 CMD_ASYNC = (1 << 1),
297 /* CMD_NO_SKB = 0, */
298 CMD_WANT_SKB = (1 << 2), 296 CMD_WANT_SKB = (1 << 2),
299}; 297};
300 298
@@ -937,9 +935,6 @@ struct iwl_priv {
937 * 4965's initialize alive response contains some calibration data. */ 935 * 4965's initialize alive response contains some calibration data. */
938 struct iwl_init_alive_resp card_alive_init; 936 struct iwl_init_alive_resp card_alive_init;
939 struct iwl_alive_resp card_alive; 937 struct iwl_alive_resp card_alive;
940#if defined(CONFIG_IWLWIFI_RFKILL)
941 struct rfkill *rfkill;
942#endif
943 938
944#ifdef CONFIG_IWLWIFI_LEDS 939#ifdef CONFIG_IWLWIFI_LEDS
945 unsigned long last_blink_time; 940 unsigned long last_blink_time;
@@ -1073,7 +1068,6 @@ struct iwl_priv {
1073 struct work_struct calibrated_work; 1068 struct work_struct calibrated_work;
1074 struct work_struct scan_completed; 1069 struct work_struct scan_completed;
1075 struct work_struct rx_replenish; 1070 struct work_struct rx_replenish;
1076 struct work_struct rf_kill;
1077 struct work_struct abort_scan; 1071 struct work_struct abort_scan;
1078 struct work_struct update_link_led; 1072 struct work_struct update_link_led;
1079 struct work_struct auth_work; 1073 struct work_struct auth_work;
@@ -1119,8 +1113,6 @@ struct iwl_priv {
1119 1113
1120 struct iwl3945_notif_statistics statistics_39; 1114 struct iwl3945_notif_statistics statistics_39;
1121 1115
1122 struct iwl3945_station_entry stations_39[IWL_STATION_COUNT];
1123
1124 u32 sta_supp_rates; 1116 u32 sta_supp_rates;
1125}; /*iwl_priv */ 1117}; /*iwl_priv */
1126 1118
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index cefa501e5971..7d7554a2f341 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -240,13 +240,11 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
240 if (ret < 0) 240 if (ret < 0)
241 IWL_ERR(priv, "Time out access OTP\n"); 241 IWL_ERR(priv, "Time out access OTP\n");
242 else { 242 else {
243 if (!ret) { 243 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
244 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, 244 APMG_PS_CTRL_VAL_RESET_REQ);
245 APMG_PS_CTRL_VAL_RESET_REQ); 245 udelay(5);
246 udelay(5); 246 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
247 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, 247 APMG_PS_CTRL_VAL_RESET_REQ);
248 APMG_PS_CTRL_VAL_RESET_REQ);
249 }
250 } 248 }
251 return ret; 249 return ret;
252} 250}
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 19680f72087f..5e64252f80f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -176,10 +176,6 @@ static int iwl_led_associate(struct iwl_priv *priv, int led_id)
176static int iwl_led_disassociate(struct iwl_priv *priv, int led_id) 176static int iwl_led_disassociate(struct iwl_priv *priv, int led_id)
177{ 177{
178 priv->allow_blinking = 0; 178 priv->allow_blinking = 0;
179 if (iwl_is_rfkill(priv))
180 iwl4965_led_off_reg(priv, led_id);
181 else
182 iwl4965_led_on_reg(priv, led_id);
183 179
184 return 0; 180 return 0;
185} 181}
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
deleted file mode 100644
index 65605ad44e4b..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+++ /dev/null
@@ -1,144 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31
32#include <net/mac80211.h>
33
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37
38/* software rf-kill from user */
39static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
40{
41 struct iwl_priv *priv = data;
42 int err = 0;
43
44 if (!priv->rfkill)
45 return 0;
46
47 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
48 return 0;
49
50 IWL_DEBUG_RF_KILL(priv, "we received soft RFKILL set to state %d\n", state);
51 mutex_lock(&priv->mutex);
52
53 switch (state) {
54 case RFKILL_STATE_UNBLOCKED:
55 if (iwl_is_rfkill_hw(priv)) {
56 err = -EBUSY;
57 goto out_unlock;
58 }
59 iwl_radio_kill_sw_enable_radio(priv);
60 break;
61 case RFKILL_STATE_SOFT_BLOCKED:
62 iwl_radio_kill_sw_disable_radio(priv);
63 break;
64 default:
65 IWL_WARN(priv, "we received unexpected RFKILL state %d\n",
66 state);
67 break;
68 }
69out_unlock:
70 mutex_unlock(&priv->mutex);
71
72 return err;
73}
74
75int iwl_rfkill_init(struct iwl_priv *priv)
76{
77 struct device *device = wiphy_dev(priv->hw->wiphy);
78 int ret = 0;
79
80 BUG_ON(device == NULL);
81
82 IWL_DEBUG_RF_KILL(priv, "Initializing RFKILL.\n");
83 priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
84 if (!priv->rfkill) {
85 IWL_ERR(priv, "Unable to allocate RFKILL device.\n");
86 ret = -ENOMEM;
87 goto error;
88 }
89
90 priv->rfkill->name = priv->cfg->name;
91 priv->rfkill->data = priv;
92 priv->rfkill->state = RFKILL_STATE_UNBLOCKED;
93 priv->rfkill->toggle_radio = iwl_rfkill_soft_rf_kill;
94
95 priv->rfkill->dev.class->suspend = NULL;
96 priv->rfkill->dev.class->resume = NULL;
97
98 ret = rfkill_register(priv->rfkill);
99 if (ret) {
100 IWL_ERR(priv, "Unable to register RFKILL: %d\n", ret);
101 goto free_rfkill;
102 }
103
104 IWL_DEBUG_RF_KILL(priv, "RFKILL initialization complete.\n");
105 return ret;
106
107free_rfkill:
108 if (priv->rfkill != NULL)
109 rfkill_free(priv->rfkill);
110 priv->rfkill = NULL;
111
112error:
113 IWL_DEBUG_RF_KILL(priv, "RFKILL initialization complete.\n");
114 return ret;
115}
116EXPORT_SYMBOL(iwl_rfkill_init);
117
118void iwl_rfkill_unregister(struct iwl_priv *priv)
119{
120
121 if (priv->rfkill)
122 rfkill_unregister(priv->rfkill);
123
124 priv->rfkill = NULL;
125}
126EXPORT_SYMBOL(iwl_rfkill_unregister);
127
128/* set RFKILL to the right state. */
129void iwl_rfkill_set_hw_state(struct iwl_priv *priv)
130{
131 if (!priv->rfkill)
132 return;
133
134 if (iwl_is_rfkill_hw(priv)) {
135 rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED);
136 return;
137 }
138
139 if (!iwl_is_rfkill_sw(priv))
140 rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
141 else
142 rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
143}
144EXPORT_SYMBOL(iwl_rfkill_set_hw_state);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.h b/drivers/net/wireless/iwlwifi/iwl-rfkill.h
deleted file mode 100644
index 633dafb4bf1b..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_rf_kill_h__
29#define __iwl_rf_kill_h__
30
31struct iwl_priv;
32
33#include <linux/rfkill.h>
34
35#ifdef CONFIG_IWLWIFI_RFKILL
36
37void iwl_rfkill_set_hw_state(struct iwl_priv *priv);
38void iwl_rfkill_unregister(struct iwl_priv *priv);
39int iwl_rfkill_init(struct iwl_priv *priv);
40#else
41static inline void iwl_rfkill_set_hw_state(struct iwl_priv *priv) {}
42static inline void iwl_rfkill_unregister(struct iwl_priv *priv) {}
43static inline int iwl_rfkill_init(struct iwl_priv *priv) { return 0; }
44#endif
45
46
47
48#endif /* __iwl_rf_kill_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 0eb939c40ac1..2addf735b193 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -75,7 +75,7 @@ int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
75 return IWL_AP_ID; 75 return IWL_AP_ID;
76 } else { 76 } else {
77 u8 *da = ieee80211_get_DA(hdr); 77 u8 *da = ieee80211_get_DA(hdr);
78 return priv->cfg->ops->smgmt->find_station(priv, da); 78 return iwl_find_station(priv, da);
79 } 79 }
80} 80}
81EXPORT_SYMBOL(iwl_get_ra_sta_id); 81EXPORT_SYMBOL(iwl_get_ra_sta_id);
@@ -86,8 +86,7 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
86 86
87 spin_lock_irqsave(&priv->sta_lock, flags); 87 spin_lock_irqsave(&priv->sta_lock, flags);
88 88
89 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && 89 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
90 !(priv->stations_39[sta_id].used & IWL_STA_DRIVER_ACTIVE))
91 IWL_ERR(priv, "ACTIVATE a non DRIVER active station %d\n", 90 IWL_ERR(priv, "ACTIVATE a non DRIVER active station %d\n",
92 sta_id); 91 sta_id);
93 92
@@ -228,15 +227,16 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
228} 227}
229 228
230/** 229/**
231 * iwl_add_station_flags - Add station to tables in driver and device 230 * iwl_add_station - Add station to tables in driver and device
232 */ 231 */
233u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap, 232u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
234 u8 flags, struct ieee80211_sta_ht_cap *ht_info) 233 struct ieee80211_sta_ht_cap *ht_info)
235{ 234{
236 int i;
237 int sta_id = IWL_INVALID_STATION;
238 struct iwl_station_entry *station; 235 struct iwl_station_entry *station;
239 unsigned long flags_spin; 236 unsigned long flags_spin;
237 int i;
238 int sta_id = IWL_INVALID_STATION;
239 u16 rate;
240 240
241 spin_lock_irqsave(&priv->sta_lock, flags_spin); 241 spin_lock_irqsave(&priv->sta_lock, flags_spin);
242 if (is_ap) 242 if (is_ap)
@@ -288,6 +288,12 @@ u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
288 priv->iw_mode != NL80211_IFTYPE_ADHOC) 288 priv->iw_mode != NL80211_IFTYPE_ADHOC)
289 iwl_set_ht_add_station(priv, sta_id, ht_info); 289 iwl_set_ht_add_station(priv, sta_id, ht_info);
290 290
291 /* 3945 only */
292 rate = (priv->band == IEEE80211_BAND_5GHZ) ?
293 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
294 /* Turn on both antennas for the station... */
295 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
296
291 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 297 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
292 298
293 /* Add station to device's station table */ 299 /* Add station to device's station table */
@@ -295,12 +301,12 @@ u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
295 return sta_id; 301 return sta_id;
296 302
297} 303}
298EXPORT_SYMBOL(iwl_add_station_flags); 304EXPORT_SYMBOL(iwl_add_station);
299 305
300static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr) 306static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
301{ 307{
302 unsigned long flags; 308 unsigned long flags;
303 u8 sta_id = priv->cfg->ops->smgmt->find_station(priv, addr); 309 u8 sta_id = iwl_find_station(priv, addr);
304 310
305 BUG_ON(sta_id == IWL_INVALID_STATION); 311 BUG_ON(sta_id == IWL_INVALID_STATION);
306 312
@@ -408,7 +414,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
408/** 414/**
409 * iwl_remove_station - Remove driver's knowledge of station. 415 * iwl_remove_station - Remove driver's knowledge of station.
410 */ 416 */
411int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap) 417int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
412{ 418{
413 int sta_id = IWL_INVALID_STATION; 419 int sta_id = IWL_INVALID_STATION;
414 int i, ret = -EINVAL; 420 int i, ret = -EINVAL;
@@ -767,7 +773,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
767 unsigned long flags; 773 unsigned long flags;
768 int i; 774 int i;
769 775
770 sta_id = priv->cfg->ops->smgmt->find_station(priv, addr); 776 sta_id = iwl_find_station(priv, addr);
771 if (sta_id == IWL_INVALID_STATION) { 777 if (sta_id == IWL_INVALID_STATION) {
772 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", 778 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
773 addr); 779 addr);
@@ -946,7 +952,7 @@ EXPORT_SYMBOL(iwl_send_lq_cmd);
946 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, 952 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
947 * which requires station table entry to exist). 953 * which requires station table entry to exist).
948 */ 954 */
949static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap) 955static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap)
950{ 956{
951 int i, r; 957 int i, r;
952 struct iwl_link_quality_cmd link_cmd = { 958 struct iwl_link_quality_cmd link_cmd = {
@@ -979,8 +985,9 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
979 link_cmd.general_params.single_stream_ant_msk = 985 link_cmd.general_params.single_stream_ant_msk =
980 first_antenna(priv->hw_params.valid_tx_ant); 986 first_antenna(priv->hw_params.valid_tx_ant);
981 link_cmd.general_params.dual_stream_ant_msk = 3; 987 link_cmd.general_params.dual_stream_ant_msk = 3;
982 link_cmd.agg_params.agg_dis_start_th = 3; 988 link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
983 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000); 989 link_cmd.agg_params.agg_time_limit =
990 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
984 991
985 /* Update the rate scaling for control frame Tx to AP */ 992 /* Update the rate scaling for control frame Tx to AP */
986 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id; 993 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
@@ -995,7 +1002,7 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
995 * there is only one AP station with id= IWL_AP_ID 1002 * there is only one AP station with id= IWL_AP_ID
996 * NOTE: mutex must be held before calling this function 1003 * NOTE: mutex must be held before calling this function
997 */ 1004 */
998int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap) 1005int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
999{ 1006{
1000 struct ieee80211_sta *sta; 1007 struct ieee80211_sta *sta;
1001 struct ieee80211_sta_ht_cap ht_config; 1008 struct ieee80211_sta_ht_cap ht_config;
@@ -1020,8 +1027,7 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
1020 rcu_read_unlock(); 1027 rcu_read_unlock();
1021 } 1028 }
1022 1029
1023 sta_id = priv->cfg->ops->smgmt->add_station(priv, addr, is_ap, 1030 sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
1024 0, cur_ht_config);
1025 1031
1026 /* Set up default rate scaling table in device's station table */ 1032 /* Set up default rate scaling table in device's station table */
1027 iwl_sta_init_lq(priv, addr, is_ap); 1033 iwl_sta_init_lq(priv, addr, is_ap);
@@ -1054,7 +1060,7 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1054 1060
1055 /* If we are an AP, then find the station, or use BCAST */ 1061 /* If we are an AP, then find the station, or use BCAST */
1056 case NL80211_IFTYPE_AP: 1062 case NL80211_IFTYPE_AP:
1057 sta_id = priv->cfg->ops->smgmt->find_station(priv, hdr->addr1); 1063 sta_id = iwl_find_station(priv, hdr->addr1);
1058 if (sta_id != IWL_INVALID_STATION) 1064 if (sta_id != IWL_INVALID_STATION)
1059 return sta_id; 1065 return sta_id;
1060 return priv->hw_params.bcast_sta_id; 1066 return priv->hw_params.bcast_sta_id;
@@ -1062,13 +1068,13 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1062 /* If this frame is going out to an IBSS network, find the station, 1068 /* If this frame is going out to an IBSS network, find the station,
1063 * or create a new station table entry */ 1069 * or create a new station table entry */
1064 case NL80211_IFTYPE_ADHOC: 1070 case NL80211_IFTYPE_ADHOC:
1065 sta_id = priv->cfg->ops->smgmt->find_station(priv, hdr->addr1); 1071 sta_id = iwl_find_station(priv, hdr->addr1);
1066 if (sta_id != IWL_INVALID_STATION) 1072 if (sta_id != IWL_INVALID_STATION)
1067 return sta_id; 1073 return sta_id;
1068 1074
1069 /* Create new station table entry */ 1075 /* Create new station table entry */
1070 sta_id = priv->cfg->ops->smgmt->add_station(priv, hdr->addr1, 1076 sta_id = iwl_add_station(priv, hdr->addr1, false,
1071 0, CMD_ASYNC, NULL); 1077 CMD_ASYNC, NULL);
1072 1078
1073 if (sta_id != IWL_INVALID_STATION) 1079 if (sta_id != IWL_INVALID_STATION)
1074 return sta_id; 1080 return sta_id;
@@ -1111,7 +1117,7 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv,
1111 unsigned long flags; 1117 unsigned long flags;
1112 int sta_id; 1118 int sta_id;
1113 1119
1114 sta_id = priv->cfg->ops->smgmt->find_station(priv, addr); 1120 sta_id = iwl_find_station(priv, addr);
1115 if (sta_id == IWL_INVALID_STATION) 1121 if (sta_id == IWL_INVALID_STATION)
1116 return -ENXIO; 1122 return -ENXIO;
1117 1123
@@ -1133,7 +1139,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
1133 unsigned long flags; 1139 unsigned long flags;
1134 int sta_id; 1140 int sta_id;
1135 1141
1136 sta_id = priv->cfg->ops->smgmt->find_station(priv, addr); 1142 sta_id = iwl_find_station(priv, addr);
1137 if (sta_id == IWL_INVALID_STATION) { 1143 if (sta_id == IWL_INVALID_STATION) {
1138 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); 1144 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1139 return -ENXIO; 1145 return -ENXIO;
@@ -1168,7 +1174,7 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1168void iwl_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr) 1174void iwl_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
1169{ 1175{
1170 /* FIXME: need locking over ps_status ??? */ 1176 /* FIXME: need locking over ps_status ??? */
1171 u8 sta_id = priv->cfg->ops->smgmt->find_station(priv, addr); 1177 u8 sta_id = iwl_find_station(priv, addr);
1172 1178
1173 if (sta_id != IWL_INVALID_STATION) { 1179 if (sta_id != IWL_INVALID_STATION) {
1174 u8 sta_awake = priv->stations[sta_id]. 1180 u8 sta_awake = priv->stations[sta_id].
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 59a586b6b56c..6deebade6361 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -51,16 +51,15 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
51 struct ieee80211_key_conf *keyconf, 51 struct ieee80211_key_conf *keyconf,
52 const u8 *addr, u32 iv32, u16 *phase1key); 52 const u8 *addr, u32 iv32, u16 *phase1key);
53 53
54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap); 54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
55int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap); 55int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
56void iwl_clear_stations_table(struct iwl_priv *priv); 56void iwl_clear_stations_table(struct iwl_priv *priv);
57int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 57int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
58int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr); 58int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
59int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr); 59int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
60int iwl_send_add_sta(struct iwl_priv *priv, 60int iwl_send_add_sta(struct iwl_priv *priv,
61 struct iwl_addsta_cmd *sta, u8 flags); 61 struct iwl_addsta_cmd *sta, u8 flags);
62u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, 62u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
63 int is_ap, u8 flags,
64 struct ieee80211_sta_ht_cap *ht_info); 63 struct ieee80211_sta_ht_cap *ht_info);
65void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); 64void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
66int iwl_sta_rx_agg_start(struct iwl_priv *priv, 65int iwl_sta_rx_agg_start(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 5c10b87d0336..83d31606dd00 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -95,144 +95,6 @@ struct iwl_mod_params iwl3945_mod_params = {
95 /* the rest are 0 by default */ 95 /* the rest are 0 by default */
96}; 96};
97 97
98/*************** STATION TABLE MANAGEMENT ****
99 * mac80211 should be examined to determine if sta_info is duplicating
100 * the functionality provided here
101 */
102
103/**************************************************************/
104#if 0 /* temporary disable till we add real remove station */
105/**
106 * iwl3945_remove_station - Remove driver's knowledge of station.
107 *
108 * NOTE: This does not remove station from device's station table.
109 */
110static u8 iwl3945_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
111{
112 int index = IWL_INVALID_STATION;
113 int i;
114 unsigned long flags;
115
116 spin_lock_irqsave(&priv->sta_lock, flags);
117
118 if (is_ap)
119 index = IWL_AP_ID;
120 else if (is_broadcast_ether_addr(addr))
121 index = priv->hw_params.bcast_sta_id;
122 else
123 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
124 if (priv->stations_39[i].used &&
125 !compare_ether_addr(priv->stations_39[i].sta.sta.addr,
126 addr)) {
127 index = i;
128 break;
129 }
130
131 if (unlikely(index == IWL_INVALID_STATION))
132 goto out;
133
134 if (priv->stations_39[index].used) {
135 priv->stations_39[index].used = 0;
136 priv->num_stations--;
137 }
138
139 BUG_ON(priv->num_stations < 0);
140
141out:
142 spin_unlock_irqrestore(&priv->sta_lock, flags);
143 return 0;
144}
145#endif
146
147/**
148 * iwl3945_clear_stations_table - Clear the driver's station table
149 *
150 * NOTE: This does not clear or otherwise alter the device's station table.
151 */
152void iwl3945_clear_stations_table(struct iwl_priv *priv)
153{
154 unsigned long flags;
155
156 spin_lock_irqsave(&priv->sta_lock, flags);
157
158 priv->num_stations = 0;
159 memset(priv->stations_39, 0, sizeof(priv->stations_39));
160
161 spin_unlock_irqrestore(&priv->sta_lock, flags);
162}
163
164/**
165 * iwl3945_add_station - Add station to station tables in driver and device
166 */
167u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags, struct ieee80211_sta_ht_cap *ht_info)
168{
169 int i;
170 int index = IWL_INVALID_STATION;
171 struct iwl3945_station_entry *station;
172 unsigned long flags_spin;
173 u8 rate;
174
175 spin_lock_irqsave(&priv->sta_lock, flags_spin);
176 if (is_ap)
177 index = IWL_AP_ID;
178 else if (is_broadcast_ether_addr(addr))
179 index = priv->hw_params.bcast_sta_id;
180 else
181 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
182 if (!compare_ether_addr(priv->stations_39[i].sta.sta.addr,
183 addr)) {
184 index = i;
185 break;
186 }
187
188 if (!priv->stations_39[i].used &&
189 index == IWL_INVALID_STATION)
190 index = i;
191 }
192
193 /* These two conditions has the same outcome but keep them separate
194 since they have different meaning */
195 if (unlikely(index == IWL_INVALID_STATION)) {
196 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
197 return index;
198 }
199
200 if (priv->stations_39[index].used &&
201 !compare_ether_addr(priv->stations_39[index].sta.sta.addr, addr)) {
202 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
203 return index;
204 }
205
206 IWL_DEBUG_ASSOC(priv, "Add STA ID %d: %pM\n", index, addr);
207 station = &priv->stations_39[index];
208 station->used = 1;
209 priv->num_stations++;
210
211 /* Set up the REPLY_ADD_STA command to send to device */
212 memset(&station->sta, 0, sizeof(struct iwl3945_addsta_cmd));
213 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
214 station->sta.mode = 0;
215 station->sta.sta.sta_id = index;
216 station->sta.station_flags = 0;
217
218 if (priv->band == IEEE80211_BAND_5GHZ)
219 rate = IWL_RATE_6M_PLCP;
220 else
221 rate = IWL_RATE_1M_PLCP;
222
223 /* Turn on both antennas for the station... */
224 station->sta.rate_n_flags =
225 iwl3945_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK);
226
227 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
228
229 /* Add station to device's station table */
230 iwl_send_add_sta(priv,
231 (struct iwl_addsta_cmd *)&station->sta, flags);
232 return index;
233
234}
235
236/** 98/**
237 * iwl3945_get_antenna_flags - Get antenna flags for RXON command 99 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
238 * @priv: eeprom and antenna fields are used to determine antenna flags 100 * @priv: eeprom and antenna fields are used to determine antenna flags
@@ -289,32 +151,31 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
289 key_flags &= ~STA_KEY_FLG_INVALID; 151 key_flags &= ~STA_KEY_FLG_INVALID;
290 152
291 spin_lock_irqsave(&priv->sta_lock, flags); 153 spin_lock_irqsave(&priv->sta_lock, flags);
292 priv->stations_39[sta_id].keyinfo.alg = keyconf->alg; 154 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
293 priv->stations_39[sta_id].keyinfo.keylen = keyconf->keylen; 155 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
294 memcpy(priv->stations_39[sta_id].keyinfo.key, keyconf->key, 156 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
295 keyconf->keylen); 157 keyconf->keylen);
296 158
297 memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key, 159 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
298 keyconf->keylen); 160 keyconf->keylen);
299 161
300 if ((priv->stations_39[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) 162 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
301 == STA_KEY_FLG_NO_ENC) 163 == STA_KEY_FLG_NO_ENC)
302 priv->stations_39[sta_id].sta.key.key_offset = 164 priv->stations[sta_id].sta.key.key_offset =
303 iwl_get_free_ucode_key_index(priv); 165 iwl_get_free_ucode_key_index(priv);
304 /* else, we are overriding an existing key => no need to allocated room 166 /* else, we are overriding an existing key => no need to allocated room
305 * in uCode. */ 167 * in uCode. */
306 168
307 WARN(priv->stations_39[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 169 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
308 "no space for a new key"); 170 "no space for a new key");
309 171
310 priv->stations_39[sta_id].sta.key.key_flags = key_flags; 172 priv->stations[sta_id].sta.key.key_flags = key_flags;
311 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 173 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
312 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 174 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
313 175
314 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); 176 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
315 177
316 ret = iwl_send_add_sta(priv, 178 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
317 (struct iwl_addsta_cmd *)&priv->stations_39[sta_id].sta, CMD_ASYNC);
318 179
319 spin_unlock_irqrestore(&priv->sta_lock, flags); 180 spin_unlock_irqrestore(&priv->sta_lock, flags);
320 181
@@ -340,17 +201,16 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
340 unsigned long flags; 201 unsigned long flags;
341 202
342 spin_lock_irqsave(&priv->sta_lock, flags); 203 spin_lock_irqsave(&priv->sta_lock, flags);
343 memset(&priv->stations_39[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); 204 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
344 memset(&priv->stations_39[sta_id].sta.key, 0, 205 memset(&priv->stations[sta_id].sta.key, 0,
345 sizeof(struct iwl4965_keyinfo)); 206 sizeof(struct iwl4965_keyinfo));
346 priv->stations_39[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 207 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
347 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 208 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
348 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 209 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
349 spin_unlock_irqrestore(&priv->sta_lock, flags); 210 spin_unlock_irqrestore(&priv->sta_lock, flags);
350 211
351 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); 212 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
352 iwl_send_add_sta(priv, 213 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, 0);
353 (struct iwl_addsta_cmd *)&priv->stations_39[sta_id].sta, 0);
354 return 0; 214 return 0;
355} 215}
356 216
@@ -578,7 +438,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
578 int sta_id) 438 int sta_id)
579{ 439{
580 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 440 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
581 struct iwl_hw_key *keyinfo = &priv->stations_39[sta_id].keyinfo; 441 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
582 442
583 switch (keyinfo->alg) { 443 switch (keyinfo->alg) {
584 case ALG_CCMP: 444 case ALG_CCMP:
@@ -753,7 +613,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
753 if (ieee80211_is_data_qos(fc)) { 613 if (ieee80211_is_data_qos(fc)) {
754 qc = ieee80211_get_qos_ctl(hdr); 614 qc = ieee80211_get_qos_ctl(hdr);
755 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 615 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
756 seq_number = priv->stations_39[sta_id].tid[tid].seq_number & 616 seq_number = priv->stations[sta_id].tid[tid].seq_number &
757 IEEE80211_SCTL_SEQ; 617 IEEE80211_SCTL_SEQ;
758 hdr->seq_ctrl = cpu_to_le16(seq_number) | 618 hdr->seq_ctrl = cpu_to_le16(seq_number) |
759 (hdr->seq_ctrl & 619 (hdr->seq_ctrl &
@@ -813,7 +673,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
813 if (!ieee80211_has_morefrags(hdr->frame_control)) { 673 if (!ieee80211_has_morefrags(hdr->frame_control)) {
814 txq->need_update = 1; 674 txq->need_update = 1;
815 if (qc) 675 if (qc)
816 priv->stations_39[sta_id].tid[tid].seq_number = seq_number; 676 priv->stations[sta_id].tid[tid].seq_number = seq_number;
817 } else { 677 } else {
818 wait_write_ptr = 1; 678 wait_write_ptr = 1;
819 txq->need_update = 0; 679 txq->need_update = 0;
@@ -1149,18 +1009,12 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
1149 clear_bit(STATUS_RF_KILL_HW, &priv->status); 1009 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1150 1010
1151 1011
1152 if (flags & SW_CARD_DISABLED)
1153 set_bit(STATUS_RF_KILL_SW, &priv->status);
1154 else
1155 clear_bit(STATUS_RF_KILL_SW, &priv->status);
1156
1157 iwl_scan_cancel(priv); 1012 iwl_scan_cancel(priv);
1158 1013
1159 if ((test_bit(STATUS_RF_KILL_HW, &status) != 1014 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
1160 test_bit(STATUS_RF_KILL_HW, &priv->status)) || 1015 test_bit(STATUS_RF_KILL_HW, &priv->status)))
1161 (test_bit(STATUS_RF_KILL_SW, &status) != 1016 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
1162 test_bit(STATUS_RF_KILL_SW, &priv->status))) 1017 test_bit(STATUS_RF_KILL_HW, &priv->status));
1163 queue_work(priv->workqueue, &priv->rf_kill);
1164 else 1018 else
1165 wake_up_interruptible(&priv->wait_command_queue); 1019 wake_up_interruptible(&priv->wait_command_queue);
1166} 1020}
@@ -1316,7 +1170,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
1316 1170
1317 /* If we've added more space for the firmware to place data, tell it. 1171 /* If we've added more space for the firmware to place data, tell it.
1318 * Increment device's write pointer in multiples of 8. */ 1172 * Increment device's write pointer in multiples of 8. */
1319 if ((write != (rxq->write & ~0x7)) 1173 if ((rxq->write_actual != (rxq->write & ~0x7))
1320 || (abs(rxq->write - rxq->read) > 7)) { 1174 || (abs(rxq->write - rxq->read) > 7)) {
1321 spin_lock_irqsave(&rxq->lock, flags); 1175 spin_lock_irqsave(&rxq->lock, flags);
1322 rxq->need_update = 1; 1176 rxq->need_update = 1;
@@ -1337,7 +1191,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
1337 * Also restock the Rx queue via iwl3945_rx_queue_restock. 1191 * Also restock the Rx queue via iwl3945_rx_queue_restock.
1338 * This is called as a scheduled work item (except for during initialization) 1192 * This is called as a scheduled work item (except for during initialization)
1339 */ 1193 */
1340static void iwl3945_rx_allocate(struct iwl_priv *priv) 1194static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1341{ 1195{
1342 struct iwl_rx_queue *rxq = &priv->rxq; 1196 struct iwl_rx_queue *rxq = &priv->rxq;
1343 struct list_head *element; 1197 struct list_head *element;
@@ -1360,7 +1214,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
1360 /* Alloc a new receive buffer */ 1214 /* Alloc a new receive buffer */
1361 rxb->skb = 1215 rxb->skb =
1362 alloc_skb(priv->hw_params.rx_buf_size, 1216 alloc_skb(priv->hw_params.rx_buf_size,
1363 GFP_KERNEL); 1217 priority);
1364 if (!rxb->skb) { 1218 if (!rxb->skb) {
1365 if (net_ratelimit()) 1219 if (net_ratelimit())
1366 IWL_CRIT(priv, ": Can not allocate SKB buffers\n"); 1220 IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
@@ -1419,6 +1273,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1419 * not restocked the Rx queue with fresh buffers */ 1273 * not restocked the Rx queue with fresh buffers */
1420 rxq->read = rxq->write = 0; 1274 rxq->read = rxq->write = 0;
1421 rxq->free_count = 0; 1275 rxq->free_count = 0;
1276 rxq->write_actual = 0;
1422 spin_unlock_irqrestore(&rxq->lock, flags); 1277 spin_unlock_irqrestore(&rxq->lock, flags);
1423} 1278}
1424 1279
@@ -1427,13 +1282,21 @@ void iwl3945_rx_replenish(void *data)
1427 struct iwl_priv *priv = data; 1282 struct iwl_priv *priv = data;
1428 unsigned long flags; 1283 unsigned long flags;
1429 1284
1430 iwl3945_rx_allocate(priv); 1285 iwl3945_rx_allocate(priv, GFP_KERNEL);
1431 1286
1432 spin_lock_irqsave(&priv->lock, flags); 1287 spin_lock_irqsave(&priv->lock, flags);
1433 iwl3945_rx_queue_restock(priv); 1288 iwl3945_rx_queue_restock(priv);
1434 spin_unlock_irqrestore(&priv->lock, flags); 1289 spin_unlock_irqrestore(&priv->lock, flags);
1435} 1290}
1436 1291
1292static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
1293{
1294 iwl3945_rx_allocate(priv, GFP_ATOMIC);
1295
1296 iwl3945_rx_queue_restock(priv);
1297}
1298
1299
1437/* Assumes that the skb field of the buffers in 'pool' is kept accurate. 1300/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1438 * If an SKB has been detached, the POOL needs to have its SKB set to NULL 1301 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1439 * This free routine walks the list of POOL entries and if SKB is set to 1302 * This free routine walks the list of POOL entries and if SKB is set to
@@ -1556,13 +1419,19 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1556 unsigned long flags; 1419 unsigned long flags;
1557 u8 fill_rx = 0; 1420 u8 fill_rx = 0;
1558 u32 count = 8; 1421 u32 count = 8;
1422 int total_empty = 0;
1559 1423
1560 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1424 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1561 * buffer that the driver may process (last buffer filled by ucode). */ 1425 * buffer that the driver may process (last buffer filled by ucode). */
1562 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; 1426 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1563 i = rxq->read; 1427 i = rxq->read;
1564 1428
1565 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2)) 1429 /* calculate total frames need to be restock after handling RX */
1430 total_empty = r - priv->rxq.write_actual;
1431 if (total_empty < 0)
1432 total_empty += RX_QUEUE_SIZE;
1433
1434 if (total_empty > (RX_QUEUE_SIZE / 2))
1566 fill_rx = 1; 1435 fill_rx = 1;
1567 /* Rx interrupt, but nothing sent from uCode */ 1436 /* Rx interrupt, but nothing sent from uCode */
1568 if (i == r) 1437 if (i == r)
@@ -1639,7 +1508,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1639 count++; 1508 count++;
1640 if (count >= 8) { 1509 if (count >= 8) {
1641 priv->rxq.read = i; 1510 priv->rxq.read = i;
1642 iwl3945_rx_queue_restock(priv); 1511 iwl3945_rx_replenish_now(priv);
1643 count = 0; 1512 count = 0;
1644 } 1513 }
1645 } 1514 }
@@ -1647,7 +1516,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1647 1516
1648 /* Backtrack one entry */ 1517 /* Backtrack one entry */
1649 priv->rxq.read = i; 1518 priv->rxq.read = i;
1650 iwl3945_rx_queue_restock(priv); 1519 if (fill_rx)
1520 iwl3945_rx_replenish_now(priv);
1521 else
1522 iwl3945_rx_queue_restock(priv);
1651} 1523}
1652 1524
1653/* call this function to flush any scheduled tasklet */ 1525/* call this function to flush any scheduled tasklet */
@@ -2589,7 +2461,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2589 goto restart; 2461 goto restart;
2590 } 2462 }
2591 2463
2592 priv->cfg->ops->smgmt->clear_station_table(priv); 2464 iwl_clear_stations_table(priv);
2593 2465
2594 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG); 2466 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
2595 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); 2467 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
@@ -2681,7 +2553,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
2681 set_bit(STATUS_EXIT_PENDING, &priv->status); 2553 set_bit(STATUS_EXIT_PENDING, &priv->status);
2682 2554
2683 iwl3945_led_unregister(priv); 2555 iwl3945_led_unregister(priv);
2684 priv->cfg->ops->smgmt->clear_station_table(priv); 2556 iwl_clear_stations_table(priv);
2685 2557
2686 /* Unblock any waiting calls */ 2558 /* Unblock any waiting calls */
2687 wake_up_interruptible_all(&priv->wait_command_queue); 2559 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2708,8 +2580,6 @@ static void __iwl3945_down(struct iwl_priv *priv)
2708 if (!iwl_is_init(priv)) { 2580 if (!iwl_is_init(priv)) {
2709 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << 2581 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2710 STATUS_RF_KILL_HW | 2582 STATUS_RF_KILL_HW |
2711 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
2712 STATUS_RF_KILL_SW |
2713 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2583 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2714 STATUS_GEO_CONFIGURED | 2584 STATUS_GEO_CONFIGURED |
2715 test_bit(STATUS_EXIT_PENDING, &priv->status) << 2585 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
@@ -2718,11 +2588,9 @@ static void __iwl3945_down(struct iwl_priv *priv)
2718 } 2588 }
2719 2589
2720 /* ...otherwise clear out all the status bits but the RF Kill 2590 /* ...otherwise clear out all the status bits but the RF Kill
2721 * bits and continue taking the NIC down. */ 2591 * bit and continue taking the NIC down. */
2722 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << 2592 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2723 STATUS_RF_KILL_HW | 2593 STATUS_RF_KILL_HW |
2724 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
2725 STATUS_RF_KILL_SW |
2726 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2594 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2727 STATUS_GEO_CONFIGURED | 2595 STATUS_GEO_CONFIGURED |
2728 test_bit(STATUS_FW_ERROR, &priv->status) << 2596 test_bit(STATUS_FW_ERROR, &priv->status) <<
@@ -2779,12 +2647,6 @@ static int __iwl3945_up(struct iwl_priv *priv)
2779 return -EIO; 2647 return -EIO;
2780 } 2648 }
2781 2649
2782 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
2783 IWL_WARN(priv, "Radio disabled by SW RF kill (module "
2784 "parameter)\n");
2785 return -ENODEV;
2786 }
2787
2788 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 2650 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2789 IWL_ERR(priv, "ucode not available for device bring up\n"); 2651 IWL_ERR(priv, "ucode not available for device bring up\n");
2790 return -EIO; 2652 return -EIO;
@@ -2833,7 +2695,7 @@ static int __iwl3945_up(struct iwl_priv *priv)
2833 2695
2834 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2696 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2835 2697
2836 priv->cfg->ops->smgmt->clear_station_table(priv); 2698 iwl_clear_stations_table(priv);
2837 2699
2838 /* load bootstrap state machine, 2700 /* load bootstrap state machine,
2839 * load bootstrap program into processor's memory, 2701 * load bootstrap program into processor's memory,
@@ -2901,15 +2763,14 @@ static void iwl3945_rfkill_poll(struct work_struct *data)
2901{ 2763{
2902 struct iwl_priv *priv = 2764 struct iwl_priv *priv =
2903 container_of(data, struct iwl_priv, rfkill_poll.work); 2765 container_of(data, struct iwl_priv, rfkill_poll.work);
2904 unsigned long status = priv->status;
2905 2766
2906 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 2767 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2907 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2768 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2908 else 2769 else
2909 set_bit(STATUS_RF_KILL_HW, &priv->status); 2770 set_bit(STATUS_RF_KILL_HW, &priv->status);
2910 2771
2911 if (test_bit(STATUS_RF_KILL_HW, &status) != test_bit(STATUS_RF_KILL_HW, &priv->status)) 2772 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
2912 queue_work(priv->workqueue, &priv->rf_kill); 2773 test_bit(STATUS_RF_KILL_HW, &priv->status));
2913 2774
2914 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 2775 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
2915 round_jiffies_relative(2 * HZ)); 2776 round_jiffies_relative(2 * HZ));
@@ -3141,7 +3002,6 @@ static void iwl3945_bg_up(struct work_struct *data)
3141 mutex_lock(&priv->mutex); 3002 mutex_lock(&priv->mutex);
3142 __iwl3945_up(priv); 3003 __iwl3945_up(priv);
3143 mutex_unlock(&priv->mutex); 3004 mutex_unlock(&priv->mutex);
3144 iwl_rfkill_set_hw_state(priv);
3145} 3005}
3146 3006
3147static void iwl3945_bg_restart(struct work_struct *data) 3007static void iwl3945_bg_restart(struct work_struct *data)
@@ -3247,7 +3107,7 @@ void iwl3945_post_associate(struct iwl_priv *priv)
3247 case NL80211_IFTYPE_ADHOC: 3107 case NL80211_IFTYPE_ADHOC:
3248 3108
3249 priv->assoc_id = 1; 3109 priv->assoc_id = 1;
3250 priv->cfg->ops->smgmt->add_station(priv, priv->bssid, 0, 0, NULL); 3110 iwl_add_station(priv, priv->bssid, 0, CMD_SYNC, NULL);
3251 iwl3945_sync_sta(priv, IWL_STA_ID, 3111 iwl3945_sync_sta(priv, IWL_STA_ID,
3252 (priv->band == IEEE80211_BAND_5GHZ) ? 3112 (priv->band == IEEE80211_BAND_5GHZ) ?
3253 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP, 3113 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
@@ -3304,8 +3164,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
3304 3164
3305 mutex_unlock(&priv->mutex); 3165 mutex_unlock(&priv->mutex);
3306 3166
3307 iwl_rfkill_set_hw_state(priv);
3308
3309 if (ret) 3167 if (ret)
3310 goto out_release_irq; 3168 goto out_release_irq;
3311 3169
@@ -3438,7 +3296,7 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3438 /* restore RXON assoc */ 3296 /* restore RXON assoc */
3439 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3297 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
3440 iwlcore_commit_rxon(priv); 3298 iwlcore_commit_rxon(priv);
3441 priv->cfg->ops->smgmt->add_station(priv, iwl_bcast_addr, 0, 0, NULL); 3299 iwl_add_station(priv, iwl_bcast_addr, 0, CMD_SYNC, NULL);
3442 } 3300 }
3443 iwl3945_send_beacon_cmd(priv); 3301 iwl3945_send_beacon_cmd(priv);
3444 3302
@@ -3469,7 +3327,7 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3469 static_key = !iwl_is_associated(priv); 3327 static_key = !iwl_is_associated(priv);
3470 3328
3471 if (!static_key) { 3329 if (!static_key) {
3472 sta_id = priv->cfg->ops->smgmt->find_station(priv, addr); 3330 sta_id = iwl_find_station(priv, addr);
3473 if (sta_id == IWL_INVALID_STATION) { 3331 if (sta_id == IWL_INVALID_STATION) {
3474 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", 3332 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
3475 addr); 3333 addr);
@@ -3958,7 +3816,6 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3958 INIT_WORK(&priv->up, iwl3945_bg_up); 3816 INIT_WORK(&priv->up, iwl3945_bg_up);
3959 INIT_WORK(&priv->restart, iwl3945_bg_restart); 3817 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3960 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); 3818 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3961 INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
3962 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 3819 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
3963 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 3820 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3964 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 3821 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
@@ -4044,7 +3901,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
4044 mutex_init(&priv->mutex); 3901 mutex_init(&priv->mutex);
4045 3902
4046 /* Clear the driver's (not device's) station table */ 3903 /* Clear the driver's (not device's) station table */
4047 priv->cfg->ops->smgmt->clear_station_table(priv); 3904 iwl_clear_stations_table(priv);
4048 3905
4049 priv->data_retry_limit = -1; 3906 priv->data_retry_limit = -1;
4050 priv->ieee_channels = NULL; 3907 priv->ieee_channels = NULL;
@@ -4325,13 +4182,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4325 if (err) 4182 if (err)
4326 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); 4183 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
4327 4184
4328 err = iwl_rfkill_init(priv);
4329 if (err)
4330 IWL_ERR(priv, "Unable to initialize RFKILL system. "
4331 "Ignoring error: %d\n", err);
4332 else
4333 iwl_rfkill_set_hw_state(priv);
4334
4335 /* Start monitoring the killswitch */ 4185 /* Start monitoring the killswitch */
4336 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 4186 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
4337 2 * HZ); 4187 2 * HZ);
@@ -4397,7 +4247,6 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4397 4247
4398 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 4248 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4399 4249
4400 iwl_rfkill_unregister(priv);
4401 cancel_delayed_work_sync(&priv->rfkill_poll); 4250 cancel_delayed_work_sync(&priv->rfkill_poll);
4402 4251
4403 iwl3945_dealloc_ucode_pci(priv); 4252 iwl3945_dealloc_ucode_pci(priv);
@@ -4407,7 +4256,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4407 iwl3945_hw_txq_ctx_free(priv); 4256 iwl3945_hw_txq_ctx_free(priv);
4408 4257
4409 iwl3945_unset_hw_params(priv); 4258 iwl3945_unset_hw_params(priv);
4410 priv->cfg->ops->smgmt->clear_station_table(priv); 4259 iwl_clear_stations_table(priv);
4411 4260
4412 /*netif_stop_queue(dev); */ 4261 /*netif_stop_queue(dev); */
4413 flush_workqueue(priv->workqueue); 4262 flush_workqueue(priv->workqueue);
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index 41bd4b2b5411..1eccb6df46dd 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -1,10 +1,9 @@
1config IWM 1config IWM
2 tristate "Intel Wireless Multicomm 3200 WiFi driver" 2 tristate "Intel Wireless Multicomm 3200 WiFi driver"
3 depends on MMC && WLAN_80211 && EXPERIMENTAL 3 depends on MMC && WLAN_80211 && EXPERIMENTAL
4 depends on CFG80211
4 select WIRELESS_EXT 5 select WIRELESS_EXT
5 select CFG80211
6 select FW_LOADER 6 select FW_LOADER
7 select RFKILL
8 7
9config IWM_DEBUG 8config IWM_DEBUG
10 bool "Enable full debugging output in iwmc3200wifi" 9 bool "Enable full debugging output in iwmc3200wifi"
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
index 7cb415e5c11b..927f022545c1 100644
--- a/drivers/net/wireless/iwmc3200wifi/Makefile
+++ b/drivers/net/wireless/iwmc3200wifi/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_IWM) := iwmc3200wifi.o 1obj-$(CONFIG_IWM) := iwmc3200wifi.o
2iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o 2iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
3iwmc3200wifi-objs += commands.o wext.o cfg80211.o eeprom.o rfkill.o 3iwmc3200wifi-objs += commands.o wext.o cfg80211.o eeprom.o
4 4
5iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o 5iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index 3256ad2c96ce..96f714e6e12b 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -268,7 +268,7 @@ static int iwm_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
268 268
269 iwm->conf.frag_threshold = wiphy->frag_threshold; 269 iwm->conf.frag_threshold = wiphy->frag_threshold;
270 270
271 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, 271 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
272 CFG_FRAG_THRESHOLD, 272 CFG_FRAG_THRESHOLD,
273 iwm->conf.frag_threshold); 273 iwm->conf.frag_threshold);
274 if (ret < 0) 274 if (ret < 0)
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
index db4ba0864730..ec1a15a5a0e4 100644
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ b/drivers/net/wireless/iwmc3200wifi/fw.c
@@ -72,7 +72,7 @@ static int iwm_fw_op_offset(struct iwm_priv *iwm, const struct firmware *fw,
72 } 72 }
73 73
74 if (fw->size < IWM_HDR_LEN) { 74 if (fw->size < IWM_HDR_LEN) {
75 IWM_ERR(iwm, "FW is too small (%d)\n", fw->size); 75 IWM_ERR(iwm, "FW is too small (%zu)\n", fw->size);
76 return -EINVAL; 76 return -EINVAL;
77 } 77 }
78 78
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 3b29681792bb..635c16ee6186 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -343,8 +343,4 @@ int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size,
343 struct iwm_wifi_cmd *cmd); 343 struct iwm_wifi_cmd *cmd);
344void iwm_rx_free(struct iwm_priv *iwm); 344void iwm_rx_free(struct iwm_priv *iwm);
345 345
346/* RF Kill API */
347int iwm_rfkill_init(struct iwm_priv *iwm);
348void iwm_rfkill_exit(struct iwm_priv *iwm);
349
350#endif 346#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index eec7201e91a8..68e2c3b6c7a1 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -136,17 +136,8 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
136 136
137 wdev->netdev = ndev; 137 wdev->netdev = ndev;
138 138
139 ret = iwm_rfkill_init(iwm);
140 if (ret) {
141 dev_err(dev, "Failed to init rfkill\n");
142 goto out_rfkill;
143 }
144
145 return iwm; 139 return iwm;
146 140
147 out_rfkill:
148 unregister_netdev(ndev);
149
150 out_ndev: 141 out_ndev:
151 free_netdev(ndev); 142 free_netdev(ndev);
152 143
@@ -162,7 +153,6 @@ void iwm_if_free(struct iwm_priv *iwm)
162 if (!iwm_to_ndev(iwm)) 153 if (!iwm_to_ndev(iwm))
163 return; 154 return;
164 155
165 iwm_rfkill_exit(iwm);
166 unregister_netdev(iwm_to_ndev(iwm)); 156 unregister_netdev(iwm_to_ndev(iwm));
167 free_netdev(iwm_to_ndev(iwm)); 157 free_netdev(iwm_to_ndev(iwm));
168 iwm_wdev_free(iwm); 158 iwm_wdev_free(iwm);
diff --git a/drivers/net/wireless/iwmc3200wifi/rfkill.c b/drivers/net/wireless/iwmc3200wifi/rfkill.c
deleted file mode 100644
index 4ca8b495f82d..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/rfkill.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
5 * Samuel Ortiz <samuel.ortiz@intel.com>
6 * Zhu Yi <yi.zhu@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23
24#include <linux/rfkill.h>
25
26#include "iwm.h"
27
28static int iwm_rfkill_soft_toggle(void *data, enum rfkill_state state)
29{
30 struct iwm_priv *iwm = data;
31
32 switch (state) {
33 case RFKILL_STATE_UNBLOCKED:
34 if (test_bit(IWM_RADIO_RFKILL_HW, &iwm->radio))
35 return -EBUSY;
36
37 if (test_and_clear_bit(IWM_RADIO_RFKILL_SW, &iwm->radio) &&
38 (iwm_to_ndev(iwm)->flags & IFF_UP))
39 iwm_up(iwm);
40
41 break;
42 case RFKILL_STATE_SOFT_BLOCKED:
43 if (!test_and_set_bit(IWM_RADIO_RFKILL_SW, &iwm->radio))
44 iwm_down(iwm);
45
46 break;
47 default:
48 break;
49 }
50
51 return 0;
52}
53
54int iwm_rfkill_init(struct iwm_priv *iwm)
55{
56 int ret;
57
58 iwm->rfkill = rfkill_allocate(iwm_to_dev(iwm), RFKILL_TYPE_WLAN);
59 if (!iwm->rfkill) {
60 IWM_ERR(iwm, "Unable to allocate rfkill device\n");
61 return -ENOMEM;
62 }
63
64 iwm->rfkill->name = KBUILD_MODNAME;
65 iwm->rfkill->data = iwm;
66 iwm->rfkill->state = RFKILL_STATE_UNBLOCKED;
67 iwm->rfkill->toggle_radio = iwm_rfkill_soft_toggle;
68
69 ret = rfkill_register(iwm->rfkill);
70 if (ret) {
71 IWM_ERR(iwm, "Failed to register rfkill device\n");
72 goto fail;
73 }
74
75 return 0;
76 fail:
77 rfkill_free(iwm->rfkill);
78 return ret;
79}
80
81void iwm_rfkill_exit(struct iwm_priv *iwm)
82{
83 if (iwm->rfkill)
84 rfkill_unregister(iwm->rfkill);
85
86 rfkill_free(iwm->rfkill);
87 iwm->rfkill = NULL;
88}
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index edc0a0091058..b54da677b371 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -395,7 +395,7 @@ static struct iwm_if_ops if_sdio_ops = {
395 .debugfs_init = if_sdio_debugfs_init, 395 .debugfs_init = if_sdio_debugfs_init,
396 .debugfs_exit = if_sdio_debugfs_exit, 396 .debugfs_exit = if_sdio_debugfs_exit,
397 .umac_name = "iwmc3200wifi-umac-sdio.bin", 397 .umac_name = "iwmc3200wifi-umac-sdio.bin",
398 .calib_lmac_name = "iwmc3200wifi-lmac-calib-sdio.bin", 398 .calib_lmac_name = "iwmc3200wifi-calib-sdio.bin",
399 .lmac_name = "iwmc3200wifi-lmac-sdio.bin", 399 .lmac_name = "iwmc3200wifi-lmac-sdio.bin",
400}; 400};
401 401
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
index 4bc46a60ae2f..9a5408e7d94a 100644
--- a/drivers/net/wireless/libertas/11d.c
+++ b/drivers/net/wireless/libertas/11d.c
@@ -207,7 +207,7 @@ static int generate_domain_info_11d(struct parsed_region_chan_11d
207 lbs_deb_11d("nr_subband=%x\n", domaininfo->nr_subband); 207 lbs_deb_11d("nr_subband=%x\n", domaininfo->nr_subband);
208 lbs_deb_hex(LBS_DEB_11D, "domaininfo", (char *)domaininfo, 208 lbs_deb_hex(LBS_DEB_11D, "domaininfo", (char *)domaininfo,
209 COUNTRY_CODE_LEN + 1 + 209 COUNTRY_CODE_LEN + 1 +
210 sizeof(struct ieeetypes_subbandset) * nr_subband); 210 sizeof(struct ieee_subbandset) * nr_subband);
211 return 0; 211 return 0;
212} 212}
213 213
@@ -302,11 +302,9 @@ done:
302 * @param parsed_region_chan pointer to parsed_region_chan_11d 302 * @param parsed_region_chan pointer to parsed_region_chan_11d
303 * @return 0 303 * @return 0
304*/ 304*/
305static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* 305static int parse_domain_info_11d(struct ieee_ie_country_info_full_set *countryinfo,
306 countryinfo,
307 u8 band, 306 u8 band,
308 struct parsed_region_chan_11d * 307 struct parsed_region_chan_11d *parsed_region_chan)
309 parsed_region_chan)
310{ 308{
311 u8 nr_subband, nrchan; 309 u8 nr_subband, nrchan;
312 u8 lastchan, firstchan; 310 u8 lastchan, firstchan;
@@ -331,7 +329,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset*
331 lbs_deb_hex(LBS_DEB_11D, "countryinfo", (u8 *) countryinfo, 30); 329 lbs_deb_hex(LBS_DEB_11D, "countryinfo", (u8 *) countryinfo, 30);
332 330
333 if ((*(countryinfo->countrycode)) == 0 331 if ((*(countryinfo->countrycode)) == 0
334 || (countryinfo->len <= COUNTRY_CODE_LEN)) { 332 || (countryinfo->header.len <= COUNTRY_CODE_LEN)) {
335 /* No region Info or Wrong region info: treat as No 11D info */ 333 /* No region Info or Wrong region info: treat as No 11D info */
336 goto done; 334 goto done;
337 } 335 }
@@ -349,8 +347,8 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset*
349 memcpy(parsed_region_chan->countrycode, countryinfo->countrycode, 347 memcpy(parsed_region_chan->countrycode, countryinfo->countrycode,
350 COUNTRY_CODE_LEN); 348 COUNTRY_CODE_LEN);
351 349
352 nr_subband = (countryinfo->len - COUNTRY_CODE_LEN) / 350 nr_subband = (countryinfo->header.len - COUNTRY_CODE_LEN) /
353 sizeof(struct ieeetypes_subbandset); 351 sizeof(struct ieee_subbandset);
354 352
355 for (j = 0, lastchan = 0; j < nr_subband; j++) { 353 for (j = 0, lastchan = 0; j < nr_subband; j++) {
356 354
@@ -502,7 +500,7 @@ int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
502{ 500{
503 struct cmd_ds_802_11d_domain_info *pdomaininfo = 501 struct cmd_ds_802_11d_domain_info *pdomaininfo =
504 &cmd->params.domaininfo; 502 &cmd->params.domaininfo;
505 struct mrvlietypes_domainparamset *domain = &pdomaininfo->domain; 503 struct mrvl_ie_domain_param_set *domain = &pdomaininfo->domain;
506 u8 nr_subband = priv->domainreg.nr_subband; 504 u8 nr_subband = priv->domainreg.nr_subband;
507 505
508 lbs_deb_enter(LBS_DEB_11D); 506 lbs_deb_enter(LBS_DEB_11D);
@@ -524,16 +522,16 @@ int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
524 sizeof(domain->countrycode)); 522 sizeof(domain->countrycode));
525 523
526 domain->header.len = 524 domain->header.len =
527 cpu_to_le16(nr_subband * sizeof(struct ieeetypes_subbandset) + 525 cpu_to_le16(nr_subband * sizeof(struct ieee_subbandset) +
528 sizeof(domain->countrycode)); 526 sizeof(domain->countrycode));
529 527
530 if (nr_subband) { 528 if (nr_subband) {
531 memcpy(domain->subband, priv->domainreg.subband, 529 memcpy(domain->subband, priv->domainreg.subband,
532 nr_subband * sizeof(struct ieeetypes_subbandset)); 530 nr_subband * sizeof(struct ieee_subbandset));
533 531
534 cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) + 532 cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
535 le16_to_cpu(domain->header.len) + 533 le16_to_cpu(domain->header.len) +
536 sizeof(struct mrvlietypesheader) + 534 sizeof(struct mrvl_ie_header) +
537 S_DS_GEN); 535 S_DS_GEN);
538 } else { 536 } else {
539 cmd->size = 537 cmd->size =
@@ -556,7 +554,7 @@ done:
556int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp) 554int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
557{ 555{
558 struct cmd_ds_802_11d_domain_info *domaininfo = &resp->params.domaininforesp; 556 struct cmd_ds_802_11d_domain_info *domaininfo = &resp->params.domaininforesp;
559 struct mrvlietypes_domainparamset *domain = &domaininfo->domain; 557 struct mrvl_ie_domain_param_set *domain = &domaininfo->domain;
560 u16 action = le16_to_cpu(domaininfo->action); 558 u16 action = le16_to_cpu(domaininfo->action);
561 s16 ret = 0; 559 s16 ret = 0;
562 u8 nr_subband = 0; 560 u8 nr_subband = 0;
@@ -567,7 +565,7 @@ int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
567 (int)le16_to_cpu(resp->size)); 565 (int)le16_to_cpu(resp->size));
568 566
569 nr_subband = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) / 567 nr_subband = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) /
570 sizeof(struct ieeetypes_subbandset); 568 sizeof(struct ieee_subbandset);
571 569
572 lbs_deb_11d("domain info resp: nr_subband %d\n", nr_subband); 570 lbs_deb_11d("domain info resp: nr_subband %d\n", nr_subband);
573 571
diff --git a/drivers/net/wireless/libertas/11d.h b/drivers/net/wireless/libertas/11d.h
index 4f4f47f0f878..fb75d3e321a0 100644
--- a/drivers/net/wireless/libertas/11d.h
+++ b/drivers/net/wireless/libertas/11d.h
@@ -20,35 +20,36 @@
20struct cmd_ds_command; 20struct cmd_ds_command;
21 21
22/** Data structure for Country IE*/ 22/** Data structure for Country IE*/
23struct ieeetypes_subbandset { 23struct ieee_subbandset {
24 u8 firstchan; 24 u8 firstchan;
25 u8 nrchan; 25 u8 nrchan;
26 u8 maxtxpwr; 26 u8 maxtxpwr;
27} __attribute__ ((packed)); 27} __attribute__ ((packed));
28 28
29struct ieeetypes_countryinfoset { 29struct ieee_ie_country_info_set {
30 u8 element_id; 30 struct ieee_ie_header header;
31 u8 len; 31
32 u8 countrycode[COUNTRY_CODE_LEN]; 32 u8 countrycode[COUNTRY_CODE_LEN];
33 struct ieeetypes_subbandset subband[1]; 33 struct ieee_subbandset subband[1];
34}; 34};
35 35
36struct ieeetypes_countryinfofullset { 36struct ieee_ie_country_info_full_set {
37 u8 element_id; 37 struct ieee_ie_header header;
38 u8 len; 38
39 u8 countrycode[COUNTRY_CODE_LEN]; 39 u8 countrycode[COUNTRY_CODE_LEN];
40 struct ieeetypes_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D]; 40 struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
41} __attribute__ ((packed)); 41} __attribute__ ((packed));
42 42
43struct mrvlietypes_domainparamset { 43struct mrvl_ie_domain_param_set {
44 struct mrvlietypesheader header; 44 struct mrvl_ie_header header;
45
45 u8 countrycode[COUNTRY_CODE_LEN]; 46 u8 countrycode[COUNTRY_CODE_LEN];
46 struct ieeetypes_subbandset subband[1]; 47 struct ieee_subbandset subband[1];
47} __attribute__ ((packed)); 48} __attribute__ ((packed));
48 49
49struct cmd_ds_802_11d_domain_info { 50struct cmd_ds_802_11d_domain_info {
50 __le16 action; 51 __le16 action;
51 struct mrvlietypes_domainparamset domain; 52 struct mrvl_ie_domain_param_set domain;
52} __attribute__ ((packed)); 53} __attribute__ ((packed));
53 54
54/** domain regulatory information */ 55/** domain regulatory information */
@@ -57,7 +58,7 @@ struct lbs_802_11d_domain_reg {
57 u8 countrycode[COUNTRY_CODE_LEN]; 58 u8 countrycode[COUNTRY_CODE_LEN];
58 /** No. of subband*/ 59 /** No. of subband*/
59 u8 nr_subband; 60 u8 nr_subband;
60 struct ieeetypes_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D]; 61 struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
61}; 62};
62 63
63struct chan_power_11d { 64struct chan_power_11d {
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index a0e440cd8967..b9b374119033 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -12,15 +12,14 @@
12#include "scan.h" 12#include "scan.h"
13#include "cmd.h" 13#include "cmd.h"
14 14
15static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp);
16
17static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) = 15static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) =
18 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 16 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
19static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) = 17static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) =
20 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 18 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
21 19
22/* The firmware needs certain bits masked out of the beacon-derviced capability 20/* The firmware needs the following bits masked out of the beacon-derived
23 * field when associating/joining to BSSs. 21 * capability field when associating/joining to a BSS:
22 * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused)
24 */ 23 */
25#define CAPINFO_MASK (~(0xda00)) 24#define CAPINFO_MASK (~(0xda00))
26 25
@@ -102,6 +101,295 @@ static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
102} 101}
103 102
104 103
104static u8 iw_auth_to_ieee_auth(u8 auth)
105{
106 if (auth == IW_AUTH_ALG_OPEN_SYSTEM)
107 return 0x00;
108 else if (auth == IW_AUTH_ALG_SHARED_KEY)
109 return 0x01;
110 else if (auth == IW_AUTH_ALG_LEAP)
111 return 0x80;
112
113 lbs_deb_join("%s: invalid auth alg 0x%X\n", __func__, auth);
114 return 0;
115}
116
117/**
118 * @brief This function prepares the authenticate command. AUTHENTICATE only
119 * sets the authentication suite for future associations, as the firmware
120 * handles authentication internally during the ASSOCIATE command.
121 *
122 * @param priv A pointer to struct lbs_private structure
123 * @param bssid The peer BSSID with which to authenticate
124 * @param auth The authentication mode to use (from wireless.h)
125 *
126 * @return 0 or -1
127 */
128static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth)
129{
130 struct cmd_ds_802_11_authenticate cmd;
131 int ret = -1;
132 DECLARE_MAC_BUF(mac);
133
134 lbs_deb_enter(LBS_DEB_JOIN);
135
136 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
137 memcpy(cmd.bssid, bssid, ETH_ALEN);
138
139 cmd.authtype = iw_auth_to_ieee_auth(auth);
140
141 lbs_deb_join("AUTH_CMD: BSSID %s, auth 0x%x\n",
142 print_mac(mac, bssid), cmd.authtype);
143
144 ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd);
145
146 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
147 return ret;
148}
149
150
151static int lbs_assoc_post(struct lbs_private *priv,
152 struct cmd_ds_802_11_associate_response *resp)
153{
154 int ret = 0;
155 union iwreq_data wrqu;
156 struct bss_descriptor *bss;
157 u16 status_code;
158
159 lbs_deb_enter(LBS_DEB_ASSOC);
160
161 if (!priv->in_progress_assoc_req) {
162 lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n");
163 ret = -1;
164 goto done;
165 }
166 bss = &priv->in_progress_assoc_req->bss;
167
168 /*
169 * Older FW versions map the IEEE 802.11 Status Code in the association
170 * response to the following values returned in resp->statuscode:
171 *
172 * IEEE Status Code Marvell Status Code
173 * 0 -> 0x0000 ASSOC_RESULT_SUCCESS
174 * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
175 * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
176 * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
177 * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
178 * others -> 0x0003 ASSOC_RESULT_REFUSED
179 *
180 * Other response codes:
181 * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
182 * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
183 * association response from the AP)
184 */
185
186 status_code = le16_to_cpu(resp->statuscode);
187 if (priv->fwrelease < 0x09000000) {
188 switch (status_code) {
189 case 0x00:
190 break;
191 case 0x01:
192 lbs_deb_assoc("ASSOC_RESP: invalid parameters\n");
193 break;
194 case 0x02:
195 lbs_deb_assoc("ASSOC_RESP: internal timer "
196 "expired while waiting for the AP\n");
197 break;
198 case 0x03:
199 lbs_deb_assoc("ASSOC_RESP: association "
200 "refused by AP\n");
201 break;
202 case 0x04:
203 lbs_deb_assoc("ASSOC_RESP: authentication "
204 "refused by AP\n");
205 break;
206 default:
207 lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x "
208 " unknown\n", status_code);
209 break;
210 }
211 } else {
212 /* v9+ returns the AP's association response */
213 lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x\n", status_code);
214 }
215
216 if (status_code) {
217 lbs_mac_event_disconnected(priv);
218 ret = -1;
219 goto done;
220 }
221
222 lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP",
223 (void *) (resp + sizeof (resp->hdr)),
224 le16_to_cpu(resp->hdr.size) - sizeof (resp->hdr));
225
226 /* Send a Media Connected event, according to the Spec */
227 priv->connect_status = LBS_CONNECTED;
228
229 /* Update current SSID and BSSID */
230 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
231 priv->curbssparams.ssid_len = bss->ssid_len;
232 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
233
234 priv->SNR[TYPE_RXPD][TYPE_AVG] = 0;
235 priv->NF[TYPE_RXPD][TYPE_AVG] = 0;
236
237 memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
238 memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
239 priv->nextSNRNF = 0;
240 priv->numSNRNF = 0;
241
242 netif_carrier_on(priv->dev);
243 if (!priv->tx_pending_len)
244 netif_wake_queue(priv->dev);
245
246 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
247 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
248 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
249
250done:
251 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
252 return ret;
253}
254
255/**
256 * @brief This function prepares an association-class command.
257 *
258 * @param priv A pointer to struct lbs_private structure
259 * @param assoc_req The association request describing the BSS to associate
260 * or reassociate with
261 * @param command The actual command, either CMD_802_11_ASSOCIATE or
262 * CMD_802_11_REASSOCIATE
263 *
264 * @return 0 or -1
265 */
266static int lbs_associate(struct lbs_private *priv,
267 struct assoc_request *assoc_req,
268 u16 command)
269{
270 struct cmd_ds_802_11_associate cmd;
271 int ret = 0;
272 struct bss_descriptor *bss = &assoc_req->bss;
273 u8 *pos = &(cmd.iebuf[0]);
274 u16 tmpcap, tmplen, tmpauth;
275 struct mrvl_ie_ssid_param_set *ssid;
276 struct mrvl_ie_ds_param_set *ds;
277 struct mrvl_ie_cf_param_set *cf;
278 struct mrvl_ie_rates_param_set *rates;
279 struct mrvl_ie_rsn_param_set *rsn;
280 struct mrvl_ie_auth_type *auth;
281
282 lbs_deb_enter(LBS_DEB_ASSOC);
283
284 BUG_ON((command != CMD_802_11_ASSOCIATE) &&
285 (command != CMD_802_11_REASSOCIATE));
286
287 memset(&cmd, 0, sizeof(cmd));
288 cmd.hdr.command = cpu_to_le16(command);
289
290 /* Fill in static fields */
291 memcpy(cmd.bssid, bss->bssid, ETH_ALEN);
292 cmd.listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
293
294 /* Capability info */
295 tmpcap = (bss->capability & CAPINFO_MASK);
296 if (bss->mode == IW_MODE_INFRA)
297 tmpcap |= WLAN_CAPABILITY_ESS;
298 cmd.capability = cpu_to_le16(tmpcap);
299 lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap);
300
301 /* SSID */
302 ssid = (struct mrvl_ie_ssid_param_set *) pos;
303 ssid->header.type = cpu_to_le16(TLV_TYPE_SSID);
304 tmplen = bss->ssid_len;
305 ssid->header.len = cpu_to_le16(tmplen);
306 memcpy(ssid->ssid, bss->ssid, tmplen);
307 pos += sizeof(ssid->header) + tmplen;
308
309 ds = (struct mrvl_ie_ds_param_set *) pos;
310 ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
311 ds->header.len = cpu_to_le16(1);
312 ds->channel = bss->phy.ds.channel;
313 pos += sizeof(ds->header) + 1;
314
315 cf = (struct mrvl_ie_cf_param_set *) pos;
316 cf->header.type = cpu_to_le16(TLV_TYPE_CF);
317 tmplen = sizeof(*cf) - sizeof (cf->header);
318 cf->header.len = cpu_to_le16(tmplen);
319 /* IE payload should be zeroed, firmware fills it in for us */
320 pos += sizeof(*cf);
321
322 rates = (struct mrvl_ie_rates_param_set *) pos;
323 rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
324 memcpy(&rates->rates, &bss->rates, MAX_RATES);
325 tmplen = MAX_RATES;
326 if (get_common_rates(priv, rates->rates, &tmplen)) {
327 ret = -1;
328 goto done;
329 }
330 pos += sizeof(rates->header) + tmplen;
331 rates->header.len = cpu_to_le16(tmplen);
332 lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen);
333
334 /* Copy the infra. association rates into Current BSS state structure */
335 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
336 memcpy(&priv->curbssparams.rates, &rates->rates, tmplen);
337
338 /* Set MSB on basic rates as the firmware requires, but _after_
339 * copying to current bss rates.
340 */
341 lbs_set_basic_rate_flags(rates->rates, tmplen);
342
343 /* Firmware v9+ indicate authentication suites as a TLV */
344 if (priv->fwrelease >= 0x09000000) {
345 DECLARE_MAC_BUF(mac);
346
347 auth = (struct mrvl_ie_auth_type *) pos;
348 auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
349 auth->header.len = cpu_to_le16(2);
350 tmpauth = iw_auth_to_ieee_auth(priv->secinfo.auth_mode);
351 auth->auth = cpu_to_le16(tmpauth);
352 pos += sizeof(auth->header) + 2;
353
354 lbs_deb_join("AUTH_CMD: BSSID %s, auth 0x%x\n",
355 print_mac(mac, bss->bssid), priv->secinfo.auth_mode);
356 }
357
358 /* WPA/WPA2 IEs */
359 if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
360 rsn = (struct mrvl_ie_rsn_param_set *) pos;
361 /* WPA_IE or WPA2_IE */
362 rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]);
363 tmplen = (u16) assoc_req->wpa_ie[1];
364 rsn->header.len = cpu_to_le16(tmplen);
365 memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen);
366 lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: WPA/RSN IE", (u8 *) rsn,
367 sizeof(rsn->header) + tmplen);
368 pos += sizeof(rsn->header) + tmplen;
369 }
370
371 cmd.hdr.size = cpu_to_le16((sizeof(cmd) - sizeof(cmd.iebuf)) +
372 (u16)(pos - (u8 *) &cmd.iebuf));
373
374 /* update curbssparams */
375 priv->curbssparams.channel = bss->phy.ds.channel;
376
377 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
378 ret = -1;
379 goto done;
380 }
381
382 ret = lbs_cmd_with_response(priv, command, &cmd);
383 if (ret == 0) {
384 ret = lbs_assoc_post(priv,
385 (struct cmd_ds_802_11_associate_response *) &cmd);
386 }
387
388done:
389 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
390 return ret;
391}
392
105/** 393/**
106 * @brief Associate to a specific BSS discovered in a scan 394 * @brief Associate to a specific BSS discovered in a scan
107 * 395 *
@@ -110,7 +398,7 @@ static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
110 * 398 *
111 * @return 0-success, otherwise fail 399 * @return 0-success, otherwise fail
112 */ 400 */
113static int lbs_associate(struct lbs_private *priv, 401static int lbs_try_associate(struct lbs_private *priv,
114 struct assoc_request *assoc_req) 402 struct assoc_request *assoc_req)
115{ 403{
116 int ret; 404 int ret;
@@ -118,11 +406,15 @@ static int lbs_associate(struct lbs_private *priv,
118 406
119 lbs_deb_enter(LBS_DEB_ASSOC); 407 lbs_deb_enter(LBS_DEB_ASSOC);
120 408
121 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE, 409 /* FW v9 and higher indicate authentication suites as a TLV in the
122 0, CMD_OPTION_WAITFORRSP, 410 * association command, not as a separate authentication command.
123 0, assoc_req->bss.bssid); 411 */
124 if (ret) 412 if (priv->fwrelease < 0x09000000) {
125 goto out; 413 ret = lbs_set_authentication(priv, assoc_req->bss.bssid,
414 priv->secinfo.auth_mode);
415 if (ret)
416 goto out;
417 }
126 418
127 /* Use short preamble only when both the BSS and firmware support it */ 419 /* Use short preamble only when both the BSS and firmware support it */
128 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) && 420 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
@@ -133,14 +425,78 @@ static int lbs_associate(struct lbs_private *priv,
133 if (ret) 425 if (ret)
134 goto out; 426 goto out;
135 427
136 ret = lbs_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE, 428 ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE);
137 0, CMD_OPTION_WAITFORRSP, 0, assoc_req);
138 429
139out: 430out:
140 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 431 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
141 return ret; 432 return ret;
142} 433}
143 434
435static int lbs_adhoc_post(struct lbs_private *priv,
436 struct cmd_ds_802_11_ad_hoc_result *resp)
437{
438 int ret = 0;
439 u16 command = le16_to_cpu(resp->hdr.command);
440 u16 result = le16_to_cpu(resp->hdr.result);
441 union iwreq_data wrqu;
442 struct bss_descriptor *bss;
443 DECLARE_SSID_BUF(ssid);
444
445 lbs_deb_enter(LBS_DEB_JOIN);
446
447 if (!priv->in_progress_assoc_req) {
448 lbs_deb_join("ADHOC_RESP: no in-progress association "
449 "request\n");
450 ret = -1;
451 goto done;
452 }
453 bss = &priv->in_progress_assoc_req->bss;
454
455 /*
456 * Join result code 0 --> SUCCESS
457 */
458 if (result) {
459 lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result);
460 if (priv->connect_status == LBS_CONNECTED)
461 lbs_mac_event_disconnected(priv);
462 ret = -1;
463 goto done;
464 }
465
466 /* Send a Media Connected event, according to the Spec */
467 priv->connect_status = LBS_CONNECTED;
468
469 if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
470 /* Update the created network descriptor with the new BSSID */
471 memcpy(bss->bssid, resp->bssid, ETH_ALEN);
472 }
473
474 /* Set the BSSID from the joined/started descriptor */
475 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
476
477 /* Set the new SSID to current SSID */
478 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
479 priv->curbssparams.ssid_len = bss->ssid_len;
480
481 netif_carrier_on(priv->dev);
482 if (!priv->tx_pending_len)
483 netif_wake_queue(priv->dev);
484
485 memset(&wrqu, 0, sizeof(wrqu));
486 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
487 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
488 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
489
490 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
491 print_ssid(ssid, bss->ssid, bss->ssid_len),
492 priv->curbssparams.bssid,
493 priv->curbssparams.channel);
494
495done:
496 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
497 return ret;
498}
499
144/** 500/**
145 * @brief Join an adhoc network found in a previous scan 501 * @brief Join an adhoc network found in a previous scan
146 * 502 *
@@ -219,11 +575,10 @@ static int lbs_adhoc_join(struct lbs_private *priv,
219 memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN); 575 memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN);
220 memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len); 576 memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len);
221 577
222 memcpy(&cmd.bss.phyparamset, &bss->phyparamset, 578 memcpy(&cmd.bss.ds, &bss->phy.ds, sizeof(struct ieee_ie_ds_param_set));
223 sizeof(union ieeetypes_phyparamset));
224 579
225 memcpy(&cmd.bss.ssparamset, &bss->ssparamset, 580 memcpy(&cmd.bss.ibss, &bss->ss.ibss,
226 sizeof(union IEEEtypes_ssparamset)); 581 sizeof(struct ieee_ie_ibss_param_set));
227 582
228 cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK); 583 cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
229 lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n", 584 lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
@@ -260,7 +615,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
260 */ 615 */
261 lbs_set_basic_rate_flags(cmd.bss.rates, ratesize); 616 lbs_set_basic_rate_flags(cmd.bss.rates, ratesize);
262 617
263 cmd.bss.ssparamset.ibssparamset.atimwindow = cpu_to_le16(bss->atimwindow); 618 cmd.bss.ibss.atimwindow = bss->atimwindow;
264 619
265 if (assoc_req->secinfo.wep_enabled) { 620 if (assoc_req->secinfo.wep_enabled) {
266 u16 tmp = le16_to_cpu(cmd.bss.capability); 621 u16 tmp = le16_to_cpu(cmd.bss.capability);
@@ -287,8 +642,10 @@ static int lbs_adhoc_join(struct lbs_private *priv,
287 } 642 }
288 643
289 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd); 644 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
290 if (ret == 0) 645 if (ret == 0) {
291 ret = lbs_adhoc_post(priv, (struct cmd_header *) &cmd); 646 ret = lbs_adhoc_post(priv,
647 (struct cmd_ds_802_11_ad_hoc_result *)&cmd);
648 }
292 649
293out: 650out:
294 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 651 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -343,22 +700,24 @@ static int lbs_adhoc_start(struct lbs_private *priv,
343 WARN_ON(!assoc_req->channel); 700 WARN_ON(!assoc_req->channel);
344 701
345 /* set Physical parameter set */ 702 /* set Physical parameter set */
346 cmd.phyparamset.dsparamset.elementid = WLAN_EID_DS_PARAMS; 703 cmd.ds.header.id = WLAN_EID_DS_PARAMS;
347 cmd.phyparamset.dsparamset.len = 1; 704 cmd.ds.header.len = 1;
348 cmd.phyparamset.dsparamset.currentchan = assoc_req->channel; 705 cmd.ds.channel = assoc_req->channel;
349 706
350 /* set IBSS parameter set */ 707 /* set IBSS parameter set */
351 cmd.ssparamset.ibssparamset.elementid = WLAN_EID_IBSS_PARAMS; 708 cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS;
352 cmd.ssparamset.ibssparamset.len = 2; 709 cmd.ibss.header.len = 2;
353 cmd.ssparamset.ibssparamset.atimwindow = 0; 710 cmd.ibss.atimwindow = cpu_to_le16(0);
354 711
355 /* set capability info */ 712 /* set capability info */
356 tmpcap = WLAN_CAPABILITY_IBSS; 713 tmpcap = WLAN_CAPABILITY_IBSS;
357 if (assoc_req->secinfo.wep_enabled) { 714 if (assoc_req->secinfo.wep_enabled ||
358 lbs_deb_join("ADHOC_START: WEP enabled, setting privacy on\n"); 715 assoc_req->secinfo.WPAenabled ||
716 assoc_req->secinfo.WPA2enabled) {
717 lbs_deb_join("ADHOC_START: WEP/WPA enabled, privacy on\n");
359 tmpcap |= WLAN_CAPABILITY_PRIVACY; 718 tmpcap |= WLAN_CAPABILITY_PRIVACY;
360 } else 719 } else
361 lbs_deb_join("ADHOC_START: WEP disabled, setting privacy off\n"); 720 lbs_deb_join("ADHOC_START: WEP disabled, privacy off\n");
362 721
363 cmd.capability = cpu_to_le16(tmpcap); 722 cmd.capability = cpu_to_le16(tmpcap);
364 723
@@ -395,7 +754,8 @@ static int lbs_adhoc_start(struct lbs_private *priv,
395 754
396 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd); 755 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
397 if (ret == 0) 756 if (ret == 0)
398 ret = lbs_adhoc_post(priv, (struct cmd_header *) &cmd); 757 ret = lbs_adhoc_post(priv,
758 (struct cmd_ds_802_11_ad_hoc_result *)&cmd);
399 759
400out: 760out:
401 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 761 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -720,7 +1080,7 @@ static int assoc_helper_essid(struct lbs_private *priv,
720 assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel); 1080 assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel);
721 if (bss != NULL) { 1081 if (bss != NULL) {
722 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor)); 1082 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
723 ret = lbs_associate(priv, assoc_req); 1083 ret = lbs_try_associate(priv, assoc_req);
724 } else { 1084 } else {
725 lbs_deb_assoc("SSID not found; cannot associate\n"); 1085 lbs_deb_assoc("SSID not found; cannot associate\n");
726 } 1086 }
@@ -772,8 +1132,9 @@ static int assoc_helper_bssid(struct lbs_private *priv,
772 1132
773 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor)); 1133 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
774 if (assoc_req->mode == IW_MODE_INFRA) { 1134 if (assoc_req->mode == IW_MODE_INFRA) {
775 ret = lbs_associate(priv, assoc_req); 1135 ret = lbs_try_associate(priv, assoc_req);
776 lbs_deb_assoc("ASSOC: lbs_associate(bssid) returned %d\n", ret); 1136 lbs_deb_assoc("ASSOC: lbs_try_associate(bssid) returned %d\n",
1137 ret);
777 } else if (assoc_req->mode == IW_MODE_ADHOC) { 1138 } else if (assoc_req->mode == IW_MODE_ADHOC) {
778 lbs_adhoc_join(priv, assoc_req); 1139 lbs_adhoc_join(priv, assoc_req);
779 } 1140 }
@@ -1467,57 +1828,6 @@ struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
1467 1828
1468 1829
1469/** 1830/**
1470 * @brief This function prepares command of authenticate.
1471 *
1472 * @param priv A pointer to struct lbs_private structure
1473 * @param cmd A pointer to cmd_ds_command structure
1474 * @param pdata_buf Void cast of pointer to a BSSID to authenticate with
1475 *
1476 * @return 0 or -1
1477 */
1478int lbs_cmd_80211_authenticate(struct lbs_private *priv,
1479 struct cmd_ds_command *cmd,
1480 void *pdata_buf)
1481{
1482 struct cmd_ds_802_11_authenticate *pauthenticate = &cmd->params.auth;
1483 int ret = -1;
1484 u8 *bssid = pdata_buf;
1485
1486 lbs_deb_enter(LBS_DEB_JOIN);
1487
1488 cmd->command = cpu_to_le16(CMD_802_11_AUTHENTICATE);
1489 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_authenticate)
1490 + S_DS_GEN);
1491
1492 /* translate auth mode to 802.11 defined wire value */
1493 switch (priv->secinfo.auth_mode) {
1494 case IW_AUTH_ALG_OPEN_SYSTEM:
1495 pauthenticate->authtype = 0x00;
1496 break;
1497 case IW_AUTH_ALG_SHARED_KEY:
1498 pauthenticate->authtype = 0x01;
1499 break;
1500 case IW_AUTH_ALG_LEAP:
1501 pauthenticate->authtype = 0x80;
1502 break;
1503 default:
1504 lbs_deb_join("AUTH_CMD: invalid auth alg 0x%X\n",
1505 priv->secinfo.auth_mode);
1506 goto out;
1507 }
1508
1509 memcpy(pauthenticate->macaddr, bssid, ETH_ALEN);
1510
1511 lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n",
1512 bssid, pauthenticate->authtype);
1513 ret = 0;
1514
1515out:
1516 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1517 return ret;
1518}
1519
1520/**
1521 * @brief Deauthenticate from a specific BSS 1831 * @brief Deauthenticate from a specific BSS
1522 * 1832 *
1523 * @param priv A pointer to struct lbs_private structure 1833 * @param priv A pointer to struct lbs_private structure
@@ -1550,285 +1860,3 @@ int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, u8 bssid[ETH_ALEN],
1550 return ret; 1860 return ret;
1551} 1861}
1552 1862
1553int lbs_cmd_80211_associate(struct lbs_private *priv,
1554 struct cmd_ds_command *cmd, void *pdata_buf)
1555{
1556 struct cmd_ds_802_11_associate *passo = &cmd->params.associate;
1557 int ret = 0;
1558 struct assoc_request *assoc_req = pdata_buf;
1559 struct bss_descriptor *bss = &assoc_req->bss;
1560 u8 *pos;
1561 u16 tmpcap, tmplen;
1562 struct mrvlietypes_ssidparamset *ssid;
1563 struct mrvlietypes_phyparamset *phy;
1564 struct mrvlietypes_ssparamset *ss;
1565 struct mrvlietypes_ratesparamset *rates;
1566 struct mrvlietypes_rsnparamset *rsn;
1567
1568 lbs_deb_enter(LBS_DEB_ASSOC);
1569
1570 pos = (u8 *) passo;
1571
1572 if (!priv) {
1573 ret = -1;
1574 goto done;
1575 }
1576
1577 cmd->command = cpu_to_le16(CMD_802_11_ASSOCIATE);
1578
1579 memcpy(passo->peerstaaddr, bss->bssid, sizeof(passo->peerstaaddr));
1580 pos += sizeof(passo->peerstaaddr);
1581
1582 /* set the listen interval */
1583 passo->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
1584
1585 pos += sizeof(passo->capability);
1586 pos += sizeof(passo->listeninterval);
1587 pos += sizeof(passo->bcnperiod);
1588 pos += sizeof(passo->dtimperiod);
1589
1590 ssid = (struct mrvlietypes_ssidparamset *) pos;
1591 ssid->header.type = cpu_to_le16(TLV_TYPE_SSID);
1592 tmplen = bss->ssid_len;
1593 ssid->header.len = cpu_to_le16(tmplen);
1594 memcpy(ssid->ssid, bss->ssid, tmplen);
1595 pos += sizeof(ssid->header) + tmplen;
1596
1597 phy = (struct mrvlietypes_phyparamset *) pos;
1598 phy->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
1599 tmplen = sizeof(phy->fh_ds.dsparamset);
1600 phy->header.len = cpu_to_le16(tmplen);
1601 memcpy(&phy->fh_ds.dsparamset,
1602 &bss->phyparamset.dsparamset.currentchan,
1603 tmplen);
1604 pos += sizeof(phy->header) + tmplen;
1605
1606 ss = (struct mrvlietypes_ssparamset *) pos;
1607 ss->header.type = cpu_to_le16(TLV_TYPE_CF);
1608 tmplen = sizeof(ss->cf_ibss.cfparamset);
1609 ss->header.len = cpu_to_le16(tmplen);
1610 pos += sizeof(ss->header) + tmplen;
1611
1612 rates = (struct mrvlietypes_ratesparamset *) pos;
1613 rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
1614 memcpy(&rates->rates, &bss->rates, MAX_RATES);
1615 tmplen = MAX_RATES;
1616 if (get_common_rates(priv, rates->rates, &tmplen)) {
1617 ret = -1;
1618 goto done;
1619 }
1620 pos += sizeof(rates->header) + tmplen;
1621 rates->header.len = cpu_to_le16(tmplen);
1622 lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen);
1623
1624 /* Copy the infra. association rates into Current BSS state structure */
1625 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
1626 memcpy(&priv->curbssparams.rates, &rates->rates, tmplen);
1627
1628 /* Set MSB on basic rates as the firmware requires, but _after_
1629 * copying to current bss rates.
1630 */
1631 lbs_set_basic_rate_flags(rates->rates, tmplen);
1632
1633 if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
1634 rsn = (struct mrvlietypes_rsnparamset *) pos;
1635 /* WPA_IE or WPA2_IE */
1636 rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]);
1637 tmplen = (u16) assoc_req->wpa_ie[1];
1638 rsn->header.len = cpu_to_le16(tmplen);
1639 memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen);
1640 lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: RSN IE", (u8 *) rsn,
1641 sizeof(rsn->header) + tmplen);
1642 pos += sizeof(rsn->header) + tmplen;
1643 }
1644
1645 /* update curbssparams */
1646 priv->curbssparams.channel = bss->phyparamset.dsparamset.currentchan;
1647
1648 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
1649 ret = -1;
1650 goto done;
1651 }
1652
1653 cmd->size = cpu_to_le16((u16) (pos - (u8 *) passo) + S_DS_GEN);
1654
1655 /* set the capability info */
1656 tmpcap = (bss->capability & CAPINFO_MASK);
1657 if (bss->mode == IW_MODE_INFRA)
1658 tmpcap |= WLAN_CAPABILITY_ESS;
1659 passo->capability = cpu_to_le16(tmpcap);
1660 lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap);
1661
1662done:
1663 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1664 return ret;
1665}
1666
1667int lbs_ret_80211_associate(struct lbs_private *priv,
1668 struct cmd_ds_command *resp)
1669{
1670 int ret = 0;
1671 union iwreq_data wrqu;
1672 struct ieeetypes_assocrsp *passocrsp;
1673 struct bss_descriptor *bss;
1674 u16 status_code;
1675
1676 lbs_deb_enter(LBS_DEB_ASSOC);
1677
1678 if (!priv->in_progress_assoc_req) {
1679 lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n");
1680 ret = -1;
1681 goto done;
1682 }
1683 bss = &priv->in_progress_assoc_req->bss;
1684
1685 passocrsp = (struct ieeetypes_assocrsp *) &resp->params;
1686
1687 /*
1688 * Older FW versions map the IEEE 802.11 Status Code in the association
1689 * response to the following values returned in passocrsp->statuscode:
1690 *
1691 * IEEE Status Code Marvell Status Code
1692 * 0 -> 0x0000 ASSOC_RESULT_SUCCESS
1693 * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1694 * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1695 * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1696 * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1697 * others -> 0x0003 ASSOC_RESULT_REFUSED
1698 *
1699 * Other response codes:
1700 * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
1701 * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
1702 * association response from the AP)
1703 */
1704
1705 status_code = le16_to_cpu(passocrsp->statuscode);
1706 switch (status_code) {
1707 case 0x00:
1708 break;
1709 case 0x01:
1710 lbs_deb_assoc("ASSOC_RESP: invalid parameters\n");
1711 break;
1712 case 0x02:
1713 lbs_deb_assoc("ASSOC_RESP: internal timer "
1714 "expired while waiting for the AP\n");
1715 break;
1716 case 0x03:
1717 lbs_deb_assoc("ASSOC_RESP: association "
1718 "refused by AP\n");
1719 break;
1720 case 0x04:
1721 lbs_deb_assoc("ASSOC_RESP: authentication "
1722 "refused by AP\n");
1723 break;
1724 default:
1725 lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x "
1726 " unknown\n", status_code);
1727 break;
1728 }
1729
1730 if (status_code) {
1731 lbs_mac_event_disconnected(priv);
1732 ret = -1;
1733 goto done;
1734 }
1735
1736 lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP", (void *)&resp->params,
1737 le16_to_cpu(resp->size) - S_DS_GEN);
1738
1739 /* Send a Media Connected event, according to the Spec */
1740 priv->connect_status = LBS_CONNECTED;
1741
1742 /* Update current SSID and BSSID */
1743 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
1744 priv->curbssparams.ssid_len = bss->ssid_len;
1745 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
1746
1747 priv->SNR[TYPE_RXPD][TYPE_AVG] = 0;
1748 priv->NF[TYPE_RXPD][TYPE_AVG] = 0;
1749
1750 memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
1751 memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
1752 priv->nextSNRNF = 0;
1753 priv->numSNRNF = 0;
1754
1755 netif_carrier_on(priv->dev);
1756 if (!priv->tx_pending_len)
1757 netif_wake_queue(priv->dev);
1758
1759 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
1760 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1761 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
1762
1763done:
1764 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1765 return ret;
1766}
1767
1768static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp)
1769{
1770 int ret = 0;
1771 u16 command = le16_to_cpu(resp->command);
1772 u16 result = le16_to_cpu(resp->result);
1773 struct cmd_ds_802_11_ad_hoc_result *adhoc_resp;
1774 union iwreq_data wrqu;
1775 struct bss_descriptor *bss;
1776 DECLARE_SSID_BUF(ssid);
1777
1778 lbs_deb_enter(LBS_DEB_JOIN);
1779
1780 adhoc_resp = (struct cmd_ds_802_11_ad_hoc_result *) resp;
1781
1782 if (!priv->in_progress_assoc_req) {
1783 lbs_deb_join("ADHOC_RESP: no in-progress association "
1784 "request\n");
1785 ret = -1;
1786 goto done;
1787 }
1788 bss = &priv->in_progress_assoc_req->bss;
1789
1790 /*
1791 * Join result code 0 --> SUCCESS
1792 */
1793 if (result) {
1794 lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result);
1795 if (priv->connect_status == LBS_CONNECTED)
1796 lbs_mac_event_disconnected(priv);
1797 ret = -1;
1798 goto done;
1799 }
1800
1801 /* Send a Media Connected event, according to the Spec */
1802 priv->connect_status = LBS_CONNECTED;
1803
1804 if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
1805 /* Update the created network descriptor with the new BSSID */
1806 memcpy(bss->bssid, adhoc_resp->bssid, ETH_ALEN);
1807 }
1808
1809 /* Set the BSSID from the joined/started descriptor */
1810 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
1811
1812 /* Set the new SSID to current SSID */
1813 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
1814 priv->curbssparams.ssid_len = bss->ssid_len;
1815
1816 netif_carrier_on(priv->dev);
1817 if (!priv->tx_pending_len)
1818 netif_wake_queue(priv->dev);
1819
1820 memset(&wrqu, 0, sizeof(wrqu));
1821 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
1822 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1823 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
1824
1825 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
1826 print_ssid(ssid, bss->ssid, bss->ssid_len),
1827 priv->curbssparams.bssid,
1828 priv->curbssparams.channel);
1829
1830done:
1831 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1832 return ret;
1833}
1834
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index 8b7336dd02a3..6e765e9f91a3 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -8,22 +8,9 @@
8void lbs_association_worker(struct work_struct *work); 8void lbs_association_worker(struct work_struct *work);
9struct assoc_request *lbs_get_association_request(struct lbs_private *priv); 9struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
10 10
11struct cmd_ds_command;
12int lbs_cmd_80211_authenticate(struct lbs_private *priv,
13 struct cmd_ds_command *cmd,
14 void *pdata_buf);
15
16int lbs_adhoc_stop(struct lbs_private *priv); 11int lbs_adhoc_stop(struct lbs_private *priv);
17 12
18int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, 13int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
19 u8 bssid[ETH_ALEN], u16 reason); 14 u8 bssid[ETH_ALEN], u16 reason);
20int lbs_cmd_80211_associate(struct lbs_private *priv,
21 struct cmd_ds_command *cmd,
22 void *pdata_buf);
23
24int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
25 struct cmd_ds_command *resp);
26int lbs_ret_80211_associate(struct lbs_private *priv,
27 struct cmd_ds_command *resp);
28 15
29#endif /* _LBS_ASSOC_H */ 16#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index c455b9abbfc0..01db705a38ec 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1220,8 +1220,7 @@ static void lbs_submit_command(struct lbs_private *priv,
1220 command = le16_to_cpu(cmd->command); 1220 command = le16_to_cpu(cmd->command);
1221 1221
1222 /* These commands take longer */ 1222 /* These commands take longer */
1223 if (command == CMD_802_11_SCAN || command == CMD_802_11_ASSOCIATE || 1223 if (command == CMD_802_11_SCAN || command == CMD_802_11_ASSOCIATE)
1224 command == CMD_802_11_AUTHENTICATE)
1225 timeo = 5 * HZ; 1224 timeo = 5 * HZ;
1226 1225
1227 lbs_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n", 1226 lbs_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n",
@@ -1415,15 +1414,6 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1415 ret = lbs_cmd_802_11_ps_mode(cmdptr, cmd_action); 1414 ret = lbs_cmd_802_11_ps_mode(cmdptr, cmd_action);
1416 break; 1415 break;
1417 1416
1418 case CMD_802_11_ASSOCIATE:
1419 case CMD_802_11_REASSOCIATE:
1420 ret = lbs_cmd_80211_associate(priv, cmdptr, pdata_buf);
1421 break;
1422
1423 case CMD_802_11_AUTHENTICATE:
1424 ret = lbs_cmd_80211_authenticate(priv, cmdptr, pdata_buf);
1425 break;
1426
1427 case CMD_MAC_REG_ACCESS: 1417 case CMD_MAC_REG_ACCESS:
1428 case CMD_BBP_REG_ACCESS: 1418 case CMD_BBP_REG_ACCESS:
1429 case CMD_RF_REG_ACCESS: 1419 case CMD_RF_REG_ACCESS:
@@ -1470,8 +1460,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1470 break; 1460 break;
1471 case CMD_802_11_LED_GPIO_CTRL: 1461 case CMD_802_11_LED_GPIO_CTRL:
1472 { 1462 {
1473 struct mrvlietypes_ledgpio *gpio = 1463 struct mrvl_ie_ledgpio *gpio =
1474 (struct mrvlietypes_ledgpio*) 1464 (struct mrvl_ie_ledgpio*)
1475 cmdptr->params.ledgpio.data; 1465 cmdptr->params.ledgpio.data;
1476 1466
1477 memmove(&cmdptr->params.ledgpio, 1467 memmove(&cmdptr->params.ledgpio,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index bcf2a9756fb6..c42d3faa2660 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -5,7 +5,7 @@
5#include <linux/delay.h> 5#include <linux/delay.h>
6#include <linux/if_arp.h> 6#include <linux/if_arp.h>
7#include <linux/netdevice.h> 7#include <linux/netdevice.h>
8 8#include <asm/unaligned.h>
9#include <net/iw_handler.h> 9#include <net/iw_handler.h>
10 10
11#include "host.h" 11#include "host.h"
@@ -154,11 +154,11 @@ static int lbs_ret_802_11_rssi(struct lbs_private *priv,
154 lbs_deb_enter(LBS_DEB_CMD); 154 lbs_deb_enter(LBS_DEB_CMD);
155 155
156 /* store the non average value */ 156 /* store the non average value */
157 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = le16_to_cpu(rssirsp->SNR); 157 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
158 priv->NF[TYPE_BEACON][TYPE_NOAVG] = le16_to_cpu(rssirsp->noisefloor); 158 priv->NF[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->noisefloor);
159 159
160 priv->SNR[TYPE_BEACON][TYPE_AVG] = le16_to_cpu(rssirsp->avgSNR); 160 priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
161 priv->NF[TYPE_BEACON][TYPE_AVG] = le16_to_cpu(rssirsp->avgnoisefloor); 161 priv->NF[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgnoisefloor);
162 162
163 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 163 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
164 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG], 164 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
@@ -210,12 +210,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
210 ret = lbs_ret_reg_access(priv, respcmd, resp); 210 ret = lbs_ret_reg_access(priv, respcmd, resp);
211 break; 211 break;
212 212
213 case CMD_RET_802_11_ASSOCIATE:
214 case CMD_RET(CMD_802_11_ASSOCIATE):
215 case CMD_RET(CMD_802_11_REASSOCIATE):
216 ret = lbs_ret_80211_associate(priv, resp);
217 break;
218
219 case CMD_RET(CMD_802_11_SET_AFC): 213 case CMD_RET(CMD_802_11_SET_AFC):
220 case CMD_RET(CMD_802_11_GET_AFC): 214 case CMD_RET(CMD_802_11_GET_AFC):
221 spin_lock_irqsave(&priv->driver_lock, flags); 215 spin_lock_irqsave(&priv->driver_lock, flags);
@@ -225,7 +219,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
225 219
226 break; 220 break;
227 221
228 case CMD_RET(CMD_802_11_AUTHENTICATE):
229 case CMD_RET(CMD_802_11_BEACON_STOP): 222 case CMD_RET(CMD_802_11_BEACON_STOP):
230 break; 223 break;
231 224
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 50e28a0cdfee..811ffc3ef414 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -183,12 +183,12 @@ out_unlock:
183 */ 183 */
184static void *lbs_tlv_find(uint16_t tlv_type, const uint8_t *tlv, uint16_t size) 184static void *lbs_tlv_find(uint16_t tlv_type, const uint8_t *tlv, uint16_t size)
185{ 185{
186 struct mrvlietypesheader *tlv_h; 186 struct mrvl_ie_header *tlv_h;
187 uint16_t length; 187 uint16_t length;
188 ssize_t pos = 0; 188 ssize_t pos = 0;
189 189
190 while (pos < size) { 190 while (pos < size) {
191 tlv_h = (struct mrvlietypesheader *) tlv; 191 tlv_h = (struct mrvl_ie_header *) tlv;
192 if (!tlv_h->len) 192 if (!tlv_h->len)
193 return NULL; 193 return NULL;
194 if (tlv_h->type == cpu_to_le16(tlv_type)) 194 if (tlv_h->type == cpu_to_le16(tlv_type))
@@ -206,7 +206,7 @@ static ssize_t lbs_threshold_read(uint16_t tlv_type, uint16_t event_mask,
206 size_t count, loff_t *ppos) 206 size_t count, loff_t *ppos)
207{ 207{
208 struct cmd_ds_802_11_subscribe_event *subscribed; 208 struct cmd_ds_802_11_subscribe_event *subscribed;
209 struct mrvlietypes_thresholds *got; 209 struct mrvl_ie_thresholds *got;
210 struct lbs_private *priv = file->private_data; 210 struct lbs_private *priv = file->private_data;
211 ssize_t ret = 0; 211 ssize_t ret = 0;
212 size_t pos = 0; 212 size_t pos = 0;
@@ -259,7 +259,7 @@ static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask,
259 loff_t *ppos) 259 loff_t *ppos)
260{ 260{
261 struct cmd_ds_802_11_subscribe_event *events; 261 struct cmd_ds_802_11_subscribe_event *events;
262 struct mrvlietypes_thresholds *tlv; 262 struct mrvl_ie_thresholds *tlv;
263 struct lbs_private *priv = file->private_data; 263 struct lbs_private *priv = file->private_data;
264 ssize_t buf_size; 264 ssize_t buf_size;
265 int value, freq, new_mask; 265 int value, freq, new_mask;
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index a4455ec7c354..f9ec69e04734 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -321,8 +321,6 @@ struct lbs_private {
321 321
322 u32 monitormode; 322 u32 monitormode;
323 u8 fw_ready; 323 u8 fw_ready;
324 u8 fn_init_required;
325 u8 fn_shutdown_required;
326}; 324};
327 325
328extern struct cmd_confirm_sleep confirm_sleep; 326extern struct cmd_confirm_sleep confirm_sleep;
@@ -340,7 +338,7 @@ struct bss_descriptor {
340 u32 rssi; 338 u32 rssi;
341 u32 channel; 339 u32 channel;
342 u16 beaconperiod; 340 u16 beaconperiod;
343 u32 atimwindow; 341 __le16 atimwindow;
344 342
345 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */ 343 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
346 u8 mode; 344 u8 mode;
@@ -350,10 +348,10 @@ struct bss_descriptor {
350 348
351 unsigned long last_scanned; 349 unsigned long last_scanned;
352 350
353 union ieeetypes_phyparamset phyparamset; 351 union ieee_phy_param_set phy;
354 union IEEEtypes_ssparamset ssparamset; 352 union ieee_ss_param_set ss;
355 353
356 struct ieeetypes_countryinfofullset countryinfo; 354 struct ieee_ie_country_info_full_set countryinfo;
357 355
358 u8 wpa_ie[MAX_WPA_IE_LEN]; 356 u8 wpa_ie[MAX_WPA_IE_LEN];
359 size_t wpa_ie_len; 357 size_t wpa_ie_len;
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index 391c54ab2b09..0a2e29140add 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -250,7 +250,9 @@ struct cmd_ds_gspi_bus_config {
250} __attribute__ ((packed)); 250} __attribute__ ((packed));
251 251
252struct cmd_ds_802_11_authenticate { 252struct cmd_ds_802_11_authenticate {
253 u8 macaddr[ETH_ALEN]; 253 struct cmd_header hdr;
254
255 u8 bssid[ETH_ALEN];
254 u8 authtype; 256 u8 authtype;
255 u8 reserved[10]; 257 u8 reserved[10];
256} __attribute__ ((packed)); 258} __attribute__ ((packed));
@@ -263,22 +265,23 @@ struct cmd_ds_802_11_deauthenticate {
263} __attribute__ ((packed)); 265} __attribute__ ((packed));
264 266
265struct cmd_ds_802_11_associate { 267struct cmd_ds_802_11_associate {
266 u8 peerstaaddr[6]; 268 struct cmd_header hdr;
269
270 u8 bssid[6];
267 __le16 capability; 271 __le16 capability;
268 __le16 listeninterval; 272 __le16 listeninterval;
269 __le16 bcnperiod; 273 __le16 bcnperiod;
270 u8 dtimperiod; 274 u8 dtimperiod;
271 275 u8 iebuf[512]; /* Enough for required and most optional IEs */
272#if 0
273 mrvlietypes_ssidparamset_t ssidParamSet;
274 mrvlietypes_phyparamset_t phyparamset;
275 mrvlietypes_ssparamset_t ssparamset;
276 mrvlietypes_ratesparamset_t ratesParamSet;
277#endif
278} __attribute__ ((packed)); 276} __attribute__ ((packed));
279 277
280struct cmd_ds_802_11_associate_rsp { 278struct cmd_ds_802_11_associate_response {
281 struct ieeetypes_assocrsp assocRsp; 279 struct cmd_header hdr;
280
281 __le16 capability;
282 __le16 statuscode;
283 __le16 aid;
284 u8 iebuf[512];
282} __attribute__ ((packed)); 285} __attribute__ ((packed));
283 286
284struct cmd_ds_802_11_set_wep { 287struct cmd_ds_802_11_set_wep {
@@ -535,9 +538,11 @@ struct cmd_ds_802_11_ad_hoc_start {
535 u8 bsstype; 538 u8 bsstype;
536 __le16 beaconperiod; 539 __le16 beaconperiod;
537 u8 dtimperiod; /* Reserved on v9 and later */ 540 u8 dtimperiod; /* Reserved on v9 and later */
538 union IEEEtypes_ssparamset ssparamset; 541 struct ieee_ie_ibss_param_set ibss;
539 union ieeetypes_phyparamset phyparamset; 542 u8 reserved1[4];
540 __le16 probedelay; 543 struct ieee_ie_ds_param_set ds;
544 u8 reserved2[4];
545 __le16 probedelay; /* Reserved on v9 and later */
541 __le16 capability; 546 __le16 capability;
542 u8 rates[MAX_RATES]; 547 u8 rates[MAX_RATES];
543 u8 tlv_memory_size_pad[100]; 548 u8 tlv_memory_size_pad[100];
@@ -558,8 +563,10 @@ struct adhoc_bssdesc {
558 u8 dtimperiod; 563 u8 dtimperiod;
559 __le64 timestamp; 564 __le64 timestamp;
560 __le64 localtime; 565 __le64 localtime;
561 union ieeetypes_phyparamset phyparamset; 566 struct ieee_ie_ds_param_set ds;
562 union IEEEtypes_ssparamset ssparamset; 567 u8 reserved1[4];
568 struct ieee_ie_ibss_param_set ibss;
569 u8 reserved2[4];
563 __le16 capability; 570 __le16 capability;
564 u8 rates[MAX_RATES]; 571 u8 rates[MAX_RATES];
565 572
@@ -765,8 +772,6 @@ struct cmd_ds_command {
765 /* command Body */ 772 /* command Body */
766 union { 773 union {
767 struct cmd_ds_802_11_ps_mode psmode; 774 struct cmd_ds_802_11_ps_mode psmode;
768 struct cmd_ds_802_11_associate associate;
769 struct cmd_ds_802_11_authenticate auth;
770 struct cmd_ds_802_11_get_stat gstat; 775 struct cmd_ds_802_11_get_stat gstat;
771 struct cmd_ds_802_3_get_stat gstat_8023; 776 struct cmd_ds_802_3_get_stat gstat_8023;
772 struct cmd_ds_802_11_rf_antenna rant; 777 struct cmd_ds_802_11_rf_antenna rant;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index a7e3fc119b70..8cdb88c6ca28 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -39,8 +39,24 @@
39#include "decl.h" 39#include "decl.h"
40#include "defs.h" 40#include "defs.h"
41#include "dev.h" 41#include "dev.h"
42#include "cmd.h"
42#include "if_sdio.h" 43#include "if_sdio.h"
43 44
45/* The if_sdio_remove() callback function is called when
46 * user removes this module from kernel space or ejects
47 * the card from the slot. The driver handles these 2 cases
48 * differently for SD8688 combo chip.
49 * If the user is removing the module, the FUNC_SHUTDOWN
50 * command for SD8688 is sent to the firmware.
51 * If the card is removed, there is no need to send this command.
52 *
53 * The variable 'user_rmmod' is used to distinguish these two
54 * scenarios. This flag is initialized as FALSE in case the card
55 * is removed, and will be set to TRUE for module removal when
56 * module_exit function is called.
57 */
58static u8 user_rmmod;
59
44static char *lbs_helper_name = NULL; 60static char *lbs_helper_name = NULL;
45module_param_named(helper_name, lbs_helper_name, charp, 0644); 61module_param_named(helper_name, lbs_helper_name, charp, 0644);
46 62
@@ -61,7 +77,6 @@ struct if_sdio_model {
61 int model; 77 int model;
62 const char *helper; 78 const char *helper;
63 const char *firmware; 79 const char *firmware;
64 struct if_sdio_card *card;
65}; 80};
66 81
67static struct if_sdio_model if_sdio_models[] = { 82static struct if_sdio_model if_sdio_models[] = {
@@ -70,21 +85,18 @@ static struct if_sdio_model if_sdio_models[] = {
70 .model = IF_SDIO_MODEL_8385, 85 .model = IF_SDIO_MODEL_8385,
71 .helper = "sd8385_helper.bin", 86 .helper = "sd8385_helper.bin",
72 .firmware = "sd8385.bin", 87 .firmware = "sd8385.bin",
73 .card = NULL,
74 }, 88 },
75 { 89 {
76 /* 8686 */ 90 /* 8686 */
77 .model = IF_SDIO_MODEL_8686, 91 .model = IF_SDIO_MODEL_8686,
78 .helper = "sd8686_helper.bin", 92 .helper = "sd8686_helper.bin",
79 .firmware = "sd8686.bin", 93 .firmware = "sd8686.bin",
80 .card = NULL,
81 }, 94 },
82 { 95 {
83 /* 8688 */ 96 /* 8688 */
84 .model = IF_SDIO_MODEL_8688, 97 .model = IF_SDIO_MODEL_8688,
85 .helper = "sd8688_helper.bin", 98 .helper = "sd8688_helper.bin",
86 .firmware = "sd8688.bin", 99 .firmware = "sd8688.bin",
87 .card = NULL,
88 }, 100 },
89}; 101};
90 102
@@ -927,8 +939,6 @@ static int if_sdio_probe(struct sdio_func *func,
927 goto free; 939 goto free;
928 } 940 }
929 941
930 if_sdio_models[i].card = card;
931
932 card->helper = if_sdio_models[i].helper; 942 card->helper = if_sdio_models[i].helper;
933 card->firmware = if_sdio_models[i].firmware; 943 card->firmware = if_sdio_models[i].firmware;
934 944
@@ -1014,8 +1024,16 @@ static int if_sdio_probe(struct sdio_func *func,
1014 /* 1024 /*
1015 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions 1025 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
1016 */ 1026 */
1017 priv->fn_init_required = 1027 if (card->model == IF_SDIO_MODEL_8688) {
1018 (card->model == IF_SDIO_MODEL_8688) ? 1 : 0; 1028 struct cmd_header cmd;
1029
1030 memset(&cmd, 0, sizeof(cmd));
1031
1032 lbs_deb_sdio("send function INIT command\n");
1033 if (__lbs_cmd(priv, CMD_FUNC_INIT, &cmd, sizeof(cmd),
1034 lbs_cmd_copyback, (unsigned long) &cmd))
1035 lbs_pr_alert("CMD_FUNC_INIT cmd failed\n");
1036 }
1019 1037
1020 ret = lbs_start_card(priv); 1038 ret = lbs_start_card(priv);
1021 if (ret) 1039 if (ret)
@@ -1057,30 +1075,39 @@ static void if_sdio_remove(struct sdio_func *func)
1057{ 1075{
1058 struct if_sdio_card *card; 1076 struct if_sdio_card *card;
1059 struct if_sdio_packet *packet; 1077 struct if_sdio_packet *packet;
1060 int ret;
1061 1078
1062 lbs_deb_enter(LBS_DEB_SDIO); 1079 lbs_deb_enter(LBS_DEB_SDIO);
1063 1080
1064 card = sdio_get_drvdata(func); 1081 card = sdio_get_drvdata(func);
1065 1082
1066 lbs_stop_card(card->priv); 1083 if (user_rmmod && (card->model == IF_SDIO_MODEL_8688)) {
1084 /*
1085 * FUNC_SHUTDOWN is required for SD8688 WLAN/BT
1086 * multiple functions
1087 */
1088 struct cmd_header cmd;
1089
1090 memset(&cmd, 0, sizeof(cmd));
1091
1092 lbs_deb_sdio("send function SHUTDOWN command\n");
1093 if (__lbs_cmd(card->priv, CMD_FUNC_SHUTDOWN,
1094 &cmd, sizeof(cmd), lbs_cmd_copyback,
1095 (unsigned long) &cmd))
1096 lbs_pr_alert("CMD_FUNC_SHUTDOWN cmd failed\n");
1097 }
1067 1098
1068 card->priv->surpriseremoved = 1; 1099 card->priv->surpriseremoved = 1;
1069 1100
1070 lbs_deb_sdio("call remove card\n"); 1101 lbs_deb_sdio("call remove card\n");
1102 lbs_stop_card(card->priv);
1071 lbs_remove_card(card->priv); 1103 lbs_remove_card(card->priv);
1072 1104
1073 flush_workqueue(card->workqueue); 1105 flush_workqueue(card->workqueue);
1074 destroy_workqueue(card->workqueue); 1106 destroy_workqueue(card->workqueue);
1075 1107
1076 sdio_claim_host(func); 1108 sdio_claim_host(func);
1077
1078 /* Disable interrupts */
1079 sdio_writeb(func, 0x00, IF_SDIO_H_INT_MASK, &ret);
1080
1081 sdio_release_irq(func); 1109 sdio_release_irq(func);
1082 sdio_disable_func(func); 1110 sdio_disable_func(func);
1083
1084 sdio_release_host(func); 1111 sdio_release_host(func);
1085 1112
1086 while (card->packets) { 1113 while (card->packets) {
@@ -1116,6 +1143,9 @@ static int __init if_sdio_init_module(void)
1116 1143
1117 ret = sdio_register_driver(&if_sdio_driver); 1144 ret = sdio_register_driver(&if_sdio_driver);
1118 1145
1146 /* Clear the flag in case user removes the card. */
1147 user_rmmod = 0;
1148
1119 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 1149 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
1120 1150
1121 return ret; 1151 return ret;
@@ -1123,22 +1153,10 @@ static int __init if_sdio_init_module(void)
1123 1153
1124static void __exit if_sdio_exit_module(void) 1154static void __exit if_sdio_exit_module(void)
1125{ 1155{
1126 int i;
1127 struct if_sdio_card *card;
1128
1129 lbs_deb_enter(LBS_DEB_SDIO); 1156 lbs_deb_enter(LBS_DEB_SDIO);
1130 1157
1131 for (i = 0; i < ARRAY_SIZE(if_sdio_models); i++) { 1158 /* Set the flag as user is removing this module. */
1132 card = if_sdio_models[i].card; 1159 user_rmmod = 1;
1133
1134 /*
1135 * FUNC_SHUTDOWN is required for SD8688 WLAN/BT
1136 * multiple functions
1137 */
1138 if (card && card->priv)
1139 card->priv->fn_shutdown_required =
1140 (card->model == IF_SDIO_MODEL_8688) ? 1 : 0;
1141 }
1142 1160
1143 sdio_unregister_driver(&if_sdio_driver); 1161 sdio_unregister_driver(&if_sdio_driver);
1144 1162
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 5fa55fe1f860..f8c2898d82b0 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -19,7 +19,6 @@
19 19
20#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include <linux/gpio.h>
23#include <linux/jiffies.h> 22#include <linux/jiffies.h>
24#include <linux/kthread.h> 23#include <linux/kthread.h>
25#include <linux/list.h> 24#include <linux/list.h>
@@ -51,13 +50,6 @@ struct if_spi_card {
51 u16 card_id; 50 u16 card_id;
52 u8 card_rev; 51 u8 card_rev;
53 52
54 /* Pin number for our GPIO chip-select. */
55 /* TODO: Once the generic SPI layer has some additional features, we
56 * should take this out and use the normal chip select here.
57 * We need support for chip select delays, and not dropping chipselect
58 * after each word. */
59 int gpio_cs;
60
61 /* The last time that we initiated an SPU operation */ 53 /* The last time that we initiated an SPU operation */
62 unsigned long prev_xfer_time; 54 unsigned long prev_xfer_time;
63 55
@@ -119,9 +111,6 @@ static struct chip_ident chip_id_to_device_name[] = {
119 * First we have to put a SPU register name on the bus. Then we can 111 * First we have to put a SPU register name on the bus. Then we can
120 * either read from or write to that register. 112 * either read from or write to that register.
121 * 113 *
122 * For 16-bit transactions, byte order on the bus is big-endian.
123 * We don't have to worry about that here, though.
124 * The translation takes place in the SPI routines.
125 */ 114 */
126 115
127static void spu_transaction_init(struct if_spi_card *card) 116static void spu_transaction_init(struct if_spi_card *card)
@@ -133,12 +122,10 @@ static void spu_transaction_init(struct if_spi_card *card)
133 * If not, we have to busy-wait to be on the safe side. */ 122 * If not, we have to busy-wait to be on the safe side. */
134 ndelay(400); 123 ndelay(400);
135 } 124 }
136 gpio_set_value(card->gpio_cs, 0); /* assert CS */
137} 125}
138 126
139static void spu_transaction_finish(struct if_spi_card *card) 127static void spu_transaction_finish(struct if_spi_card *card)
140{ 128{
141 gpio_set_value(card->gpio_cs, 1); /* drop CS */
142 card->prev_xfer_time = jiffies; 129 card->prev_xfer_time = jiffies;
143} 130}
144 131
@@ -147,7 +134,14 @@ static void spu_transaction_finish(struct if_spi_card *card)
147static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len) 134static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
148{ 135{
149 int err = 0; 136 int err = 0;
150 u16 reg_out = reg | IF_SPI_WRITE_OPERATION_MASK; 137 u16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK);
138 struct spi_message m;
139 struct spi_transfer reg_trans;
140 struct spi_transfer data_trans;
141
142 spi_message_init(&m);
143 memset(&reg_trans, 0, sizeof(reg_trans));
144 memset(&data_trans, 0, sizeof(data_trans));
151 145
152 /* You must give an even number of bytes to the SPU, even if it 146 /* You must give an even number of bytes to the SPU, even if it
153 * doesn't care about the last one. */ 147 * doesn't care about the last one. */
@@ -156,29 +150,26 @@ static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
156 spu_transaction_init(card); 150 spu_transaction_init(card);
157 151
158 /* write SPU register index */ 152 /* write SPU register index */
159 err = spi_write(card->spi, (u8 *)&reg_out, sizeof(u16)); 153 reg_trans.tx_buf = &reg_out;
160 if (err) 154 reg_trans.len = sizeof(reg_out);
161 goto out;
162 155
163 err = spi_write(card->spi, buf, len); 156 data_trans.tx_buf = buf;
157 data_trans.len = len;
164 158
165out: 159 spi_message_add_tail(&reg_trans, &m);
160 spi_message_add_tail(&data_trans, &m);
161
162 err = spi_sync(card->spi, &m);
166 spu_transaction_finish(card); 163 spu_transaction_finish(card);
167 return err; 164 return err;
168} 165}
169 166
170static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val) 167static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val)
171{ 168{
172 return spu_write(card, reg, (u8 *)&val, sizeof(u16)); 169 u16 buff;
173}
174 170
175static inline int spu_write_u32(struct if_spi_card *card, u16 reg, u32 val) 171 buff = cpu_to_le16(val);
176{ 172 return spu_write(card, reg, (u8 *)&buff, sizeof(u16));
177 /* The lower 16 bits are written first. */
178 u16 out[2];
179 out[0] = val & 0xffff;
180 out[1] = (val & 0xffff0000) >> 16;
181 return spu_write(card, reg, (u8 *)&out, sizeof(u32));
182} 173}
183 174
184static inline int spu_reg_is_port_reg(u16 reg) 175static inline int spu_reg_is_port_reg(u16 reg)
@@ -195,10 +186,13 @@ static inline int spu_reg_is_port_reg(u16 reg)
195 186
196static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len) 187static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
197{ 188{
198 unsigned int i, delay; 189 unsigned int delay;
199 int err = 0; 190 int err = 0;
200 u16 zero = 0; 191 u16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK);
201 u16 reg_out = reg | IF_SPI_READ_OPERATION_MASK; 192 struct spi_message m;
193 struct spi_transfer reg_trans;
194 struct spi_transfer dummy_trans;
195 struct spi_transfer data_trans;
202 196
203 /* You must take an even number of bytes from the SPU, even if you 197 /* You must take an even number of bytes from the SPU, even if you
204 * don't care about the last one. */ 198 * don't care about the last one. */
@@ -206,29 +200,34 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
206 200
207 spu_transaction_init(card); 201 spu_transaction_init(card);
208 202
203 spi_message_init(&m);
204 memset(&reg_trans, 0, sizeof(reg_trans));
205 memset(&dummy_trans, 0, sizeof(dummy_trans));
206 memset(&data_trans, 0, sizeof(data_trans));
207
209 /* write SPU register index */ 208 /* write SPU register index */
210 err = spi_write(card->spi, (u8 *)&reg_out, sizeof(u16)); 209 reg_trans.tx_buf = &reg_out;
211 if (err) 210 reg_trans.len = sizeof(reg_out);
212 goto out; 211 spi_message_add_tail(&reg_trans, &m);
213 212
214 delay = spu_reg_is_port_reg(reg) ? card->spu_port_delay : 213 delay = spu_reg_is_port_reg(reg) ? card->spu_port_delay :
215 card->spu_reg_delay; 214 card->spu_reg_delay;
216 if (card->use_dummy_writes) { 215 if (card->use_dummy_writes) {
217 /* Clock in dummy cycles while the SPU fills the FIFO */ 216 /* Clock in dummy cycles while the SPU fills the FIFO */
218 for (i = 0; i < delay / 16; ++i) { 217 dummy_trans.len = delay / 8;
219 err = spi_write(card->spi, (u8 *)&zero, sizeof(u16)); 218 spi_message_add_tail(&dummy_trans, &m);
220 if (err)
221 return err;
222 }
223 } else { 219 } else {
224 /* Busy-wait while the SPU fills the FIFO */ 220 /* Busy-wait while the SPU fills the FIFO */
225 ndelay(100 + (delay * 10)); 221 reg_trans.delay_usecs =
222 DIV_ROUND_UP((100 + (delay * 10)), 1000);
226 } 223 }
227 224
228 /* read in data */ 225 /* read in data */
229 err = spi_read(card->spi, buf, len); 226 data_trans.rx_buf = buf;
227 data_trans.len = len;
228 spi_message_add_tail(&data_trans, &m);
230 229
231out: 230 err = spi_sync(card->spi, &m);
232 spu_transaction_finish(card); 231 spu_transaction_finish(card);
233 return err; 232 return err;
234} 233}
@@ -236,18 +235,25 @@ out:
236/* Read 16 bits from an SPI register */ 235/* Read 16 bits from an SPI register */
237static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val) 236static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
238{ 237{
239 return spu_read(card, reg, (u8 *)val, sizeof(u16)); 238 u16 buf;
239 int ret;
240
241 ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
242 if (ret == 0)
243 *val = le16_to_cpup(&buf);
244 return ret;
240} 245}
241 246
242/* Read 32 bits from an SPI register. 247/* Read 32 bits from an SPI register.
243 * The low 16 bits are read first. */ 248 * The low 16 bits are read first. */
244static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val) 249static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val)
245{ 250{
246 u16 buf[2]; 251 u32 buf;
247 int err; 252 int err;
248 err = spu_read(card, reg, (u8 *)buf, sizeof(u32)); 253
254 err = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
249 if (!err) 255 if (!err)
250 *val = buf[0] | (buf[1] << 16); 256 *val = le32_to_cpup(&buf);
251 return err; 257 return err;
252} 258}
253 259
@@ -1051,7 +1057,6 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1051 spi_set_drvdata(spi, card); 1057 spi_set_drvdata(spi, card);
1052 card->pdata = pdata; 1058 card->pdata = pdata;
1053 card->spi = spi; 1059 card->spi = spi;
1054 card->gpio_cs = pdata->gpio_cs;
1055 card->prev_xfer_time = jiffies; 1060 card->prev_xfer_time = jiffies;
1056 1061
1057 sema_init(&card->spi_ready, 0); 1062 sema_init(&card->spi_ready, 0);
@@ -1060,26 +1065,18 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1060 INIT_LIST_HEAD(&card->data_packet_list); 1065 INIT_LIST_HEAD(&card->data_packet_list);
1061 spin_lock_init(&card->buffer_lock); 1066 spin_lock_init(&card->buffer_lock);
1062 1067
1063 /* set up GPIO CS line. TODO: use regular CS line */
1064 err = gpio_request(card->gpio_cs, "if_spi_gpio_chip_select");
1065 if (err)
1066 goto free_card;
1067 err = gpio_direction_output(card->gpio_cs, 1);
1068 if (err)
1069 goto free_gpio;
1070
1071 /* Initialize the SPI Interface Unit */ 1068 /* Initialize the SPI Interface Unit */
1072 err = spu_init(card, pdata->use_dummy_writes); 1069 err = spu_init(card, pdata->use_dummy_writes);
1073 if (err) 1070 if (err)
1074 goto free_gpio; 1071 goto free_card;
1075 err = spu_get_chip_revision(card, &card->card_id, &card->card_rev); 1072 err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
1076 if (err) 1073 if (err)
1077 goto free_gpio; 1074 goto free_card;
1078 1075
1079 /* Firmware load */ 1076 /* Firmware load */
1080 err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch); 1077 err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
1081 if (err) 1078 if (err)
1082 goto free_gpio; 1079 goto free_card;
1083 if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC) 1080 if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
1084 lbs_deb_spi("Firmware is already loaded for " 1081 lbs_deb_spi("Firmware is already loaded for "
1085 "Marvell WLAN 802.11 adapter\n"); 1082 "Marvell WLAN 802.11 adapter\n");
@@ -1087,7 +1084,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1087 err = if_spi_calculate_fw_names(card->card_id, 1084 err = if_spi_calculate_fw_names(card->card_id,
1088 card->helper_fw_name, card->main_fw_name); 1085 card->helper_fw_name, card->main_fw_name);
1089 if (err) 1086 if (err)
1090 goto free_gpio; 1087 goto free_card;
1091 1088
1092 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter " 1089 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
1093 "(chip_id = 0x%04x, chip_rev = 0x%02x) " 1090 "(chip_id = 0x%04x, chip_rev = 0x%02x) "
@@ -1098,23 +1095,23 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1098 spi->max_speed_hz); 1095 spi->max_speed_hz);
1099 err = if_spi_prog_helper_firmware(card); 1096 err = if_spi_prog_helper_firmware(card);
1100 if (err) 1097 if (err)
1101 goto free_gpio; 1098 goto free_card;
1102 err = if_spi_prog_main_firmware(card); 1099 err = if_spi_prog_main_firmware(card);
1103 if (err) 1100 if (err)
1104 goto free_gpio; 1101 goto free_card;
1105 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n"); 1102 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
1106 } 1103 }
1107 1104
1108 err = spu_set_interrupt_mode(card, 0, 1); 1105 err = spu_set_interrupt_mode(card, 0, 1);
1109 if (err) 1106 if (err)
1110 goto free_gpio; 1107 goto free_card;
1111 1108
1112 /* Register our card with libertas. 1109 /* Register our card with libertas.
1113 * This will call alloc_etherdev */ 1110 * This will call alloc_etherdev */
1114 priv = lbs_add_card(card, &spi->dev); 1111 priv = lbs_add_card(card, &spi->dev);
1115 if (!priv) { 1112 if (!priv) {
1116 err = -ENOMEM; 1113 err = -ENOMEM;
1117 goto free_gpio; 1114 goto free_card;
1118 } 1115 }
1119 card->priv = priv; 1116 card->priv = priv;
1120 priv->card = card; 1117 priv->card = card;
@@ -1159,8 +1156,6 @@ terminate_thread:
1159 if_spi_terminate_spi_thread(card); 1156 if_spi_terminate_spi_thread(card);
1160remove_card: 1157remove_card:
1161 lbs_remove_card(priv); /* will call free_netdev */ 1158 lbs_remove_card(priv); /* will call free_netdev */
1162free_gpio:
1163 gpio_free(card->gpio_cs);
1164free_card: 1159free_card:
1165 free_if_spi_card(card); 1160 free_if_spi_card(card);
1166out: 1161out:
@@ -1181,7 +1176,6 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
1181 free_irq(spi->irq, card); 1176 free_irq(spi->irq, card);
1182 if_spi_terminate_spi_thread(card); 1177 if_spi_terminate_spi_thread(card);
1183 lbs_remove_card(priv); /* will call free_netdev */ 1178 lbs_remove_card(priv); /* will call free_netdev */
1184 gpio_free(card->gpio_cs);
1185 if (card->pdata->teardown) 1179 if (card->pdata->teardown)
1186 card->pdata->teardown(spi); 1180 card->pdata->teardown(spi);
1187 free_if_spi_card(card); 1181 free_if_spi_card(card);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index a58a12352672..89575e448015 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -1002,17 +1002,9 @@ static int lbs_setup_firmware(struct lbs_private *priv)
1002{ 1002{
1003 int ret = -1; 1003 int ret = -1;
1004 s16 curlevel = 0, minlevel = 0, maxlevel = 0; 1004 s16 curlevel = 0, minlevel = 0, maxlevel = 0;
1005 struct cmd_header cmd;
1006 1005
1007 lbs_deb_enter(LBS_DEB_FW); 1006 lbs_deb_enter(LBS_DEB_FW);
1008 1007
1009 if (priv->fn_init_required) {
1010 memset(&cmd, 0, sizeof(cmd));
1011 if (__lbs_cmd(priv, CMD_FUNC_INIT, &cmd, sizeof(cmd),
1012 lbs_cmd_copyback, (unsigned long) &cmd))
1013 lbs_pr_alert("CMD_FUNC_INIT command failed\n");
1014 }
1015
1016 /* Read MAC address from firmware */ 1008 /* Read MAC address from firmware */
1017 memset(priv->current_addr, 0xff, ETH_ALEN); 1009 memset(priv->current_addr, 0xff, ETH_ALEN);
1018 ret = lbs_update_hw_spec(priv); 1010 ret = lbs_update_hw_spec(priv);
@@ -1200,9 +1192,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1200 priv->mesh_open = 0; 1192 priv->mesh_open = 0;
1201 priv->infra_open = 0; 1193 priv->infra_open = 0;
1202 1194
1203 priv->fn_init_required = 0;
1204 priv->fn_shutdown_required = 0;
1205
1206 /* Setup the OS Interface to our functions */ 1195 /* Setup the OS Interface to our functions */
1207 dev->netdev_ops = &lbs_netdev_ops; 1196 dev->netdev_ops = &lbs_netdev_ops;
1208 dev->watchdog_timeo = 5 * HZ; 1197 dev->watchdog_timeo = 5 * HZ;
@@ -1384,20 +1373,11 @@ void lbs_stop_card(struct lbs_private *priv)
1384 struct net_device *dev; 1373 struct net_device *dev;
1385 struct cmd_ctrl_node *cmdnode; 1374 struct cmd_ctrl_node *cmdnode;
1386 unsigned long flags; 1375 unsigned long flags;
1387 struct cmd_header cmd;
1388 1376
1389 lbs_deb_enter(LBS_DEB_MAIN); 1377 lbs_deb_enter(LBS_DEB_MAIN);
1390 1378
1391 if (!priv) 1379 if (!priv)
1392 goto out; 1380 goto out;
1393
1394 if (priv->fn_shutdown_required) {
1395 memset(&cmd, 0, sizeof(cmd));
1396 if (__lbs_cmd(priv, CMD_FUNC_SHUTDOWN, &cmd, sizeof(cmd),
1397 lbs_cmd_copyback, (unsigned long) &cmd))
1398 lbs_pr_alert("CMD_FUNC_SHUTDOWN command failed\n");
1399 }
1400
1401 dev = priv->dev; 1381 dev = priv->dev;
1402 1382
1403 netif_stop_queue(dev); 1383 netif_stop_queue(dev);
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 8124db36aaff..601b54249677 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -27,12 +27,12 @@
27 + 40) /* 40 for WPAIE */ 27 + 40) /* 40 for WPAIE */
28 28
29//! Memory needed to store a max sized channel List TLV for a firmware scan 29//! Memory needed to store a max sized channel List TLV for a firmware scan
30#define CHAN_TLV_MAX_SIZE (sizeof(struct mrvlietypesheader) \ 30#define CHAN_TLV_MAX_SIZE (sizeof(struct mrvl_ie_header) \
31 + (MRVDRV_MAX_CHANNELS_PER_SCAN \ 31 + (MRVDRV_MAX_CHANNELS_PER_SCAN \
32 * sizeof(struct chanscanparamset))) 32 * sizeof(struct chanscanparamset)))
33 33
34//! Memory needed to store a max number/size SSID TLV for a firmware scan 34//! Memory needed to store a max number/size SSID TLV for a firmware scan
35#define SSID_TLV_MAX_SIZE (1 * sizeof(struct mrvlietypes_ssidparamset)) 35#define SSID_TLV_MAX_SIZE (1 * sizeof(struct mrvl_ie_ssid_param_set))
36 36
37//! Maximum memory needed for a cmd_ds_802_11_scan with all TLVs at max 37//! Maximum memory needed for a cmd_ds_802_11_scan with all TLVs at max
38#define MAX_SCAN_CFG_ALLOC (sizeof(struct cmd_ds_802_11_scan) \ 38#define MAX_SCAN_CFG_ALLOC (sizeof(struct cmd_ds_802_11_scan) \
@@ -211,7 +211,7 @@ static int lbs_scan_create_channel_list(struct lbs_private *priv,
211 */ 211 */
212static int lbs_scan_add_ssid_tlv(struct lbs_private *priv, u8 *tlv) 212static int lbs_scan_add_ssid_tlv(struct lbs_private *priv, u8 *tlv)
213{ 213{
214 struct mrvlietypes_ssidparamset *ssid_tlv = (void *)tlv; 214 struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv;
215 215
216 ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID); 216 ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID);
217 ssid_tlv->header.len = cpu_to_le16(priv->scan_ssid_len); 217 ssid_tlv->header.len = cpu_to_le16(priv->scan_ssid_len);
@@ -249,7 +249,7 @@ static int lbs_scan_add_chanlist_tlv(uint8_t *tlv,
249 int chan_count) 249 int chan_count)
250{ 250{
251 size_t size = sizeof(struct chanscanparamset) *chan_count; 251 size_t size = sizeof(struct chanscanparamset) *chan_count;
252 struct mrvlietypes_chanlistparamset *chan_tlv = (void *)tlv; 252 struct mrvl_ie_chanlist_param_set *chan_tlv = (void *)tlv;
253 253
254 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); 254 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
255 memcpy(chan_tlv->chanscanparam, chan_list, size); 255 memcpy(chan_tlv->chanscanparam, chan_list, size);
@@ -270,7 +270,7 @@ static int lbs_scan_add_chanlist_tlv(uint8_t *tlv,
270static int lbs_scan_add_rates_tlv(uint8_t *tlv) 270static int lbs_scan_add_rates_tlv(uint8_t *tlv)
271{ 271{
272 int i; 272 int i;
273 struct mrvlietypes_ratesparamset *rate_tlv = (void *)tlv; 273 struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
274 274
275 rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); 275 rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
276 tlv += sizeof(rate_tlv->header); 276 tlv += sizeof(rate_tlv->header);
@@ -513,12 +513,12 @@ void lbs_scan_worker(struct work_struct *work)
513static int lbs_process_bss(struct bss_descriptor *bss, 513static int lbs_process_bss(struct bss_descriptor *bss,
514 uint8_t **pbeaconinfo, int *bytesleft) 514 uint8_t **pbeaconinfo, int *bytesleft)
515{ 515{
516 struct ieeetypes_fhparamset *pFH; 516 struct ieee_ie_fh_param_set *fh;
517 struct ieeetypes_dsparamset *pDS; 517 struct ieee_ie_ds_param_set *ds;
518 struct ieeetypes_cfparamset *pCF; 518 struct ieee_ie_cf_param_set *cf;
519 struct ieeetypes_ibssparamset *pibss; 519 struct ieee_ie_ibss_param_set *ibss;
520 DECLARE_SSID_BUF(ssid); 520 DECLARE_SSID_BUF(ssid);
521 struct ieeetypes_countryinfoset *pcountryinfo; 521 struct ieee_ie_country_info_set *pcountryinfo;
522 uint8_t *pos, *end, *p; 522 uint8_t *pos, *end, *p;
523 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; 523 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
524 uint16_t beaconsize = 0; 524 uint16_t beaconsize = 0;
@@ -616,50 +616,49 @@ static int lbs_process_bss(struct bss_descriptor *bss,
616 break; 616 break;
617 617
618 case WLAN_EID_FH_PARAMS: 618 case WLAN_EID_FH_PARAMS:
619 pFH = (struct ieeetypes_fhparamset *) pos; 619 fh = (struct ieee_ie_fh_param_set *) pos;
620 memmove(&bss->phyparamset.fhparamset, pFH, 620 memcpy(&bss->phy.fh, fh, sizeof(*fh));
621 sizeof(struct ieeetypes_fhparamset));
622 lbs_deb_scan("got FH IE\n"); 621 lbs_deb_scan("got FH IE\n");
623 break; 622 break;
624 623
625 case WLAN_EID_DS_PARAMS: 624 case WLAN_EID_DS_PARAMS:
626 pDS = (struct ieeetypes_dsparamset *) pos; 625 ds = (struct ieee_ie_ds_param_set *) pos;
627 bss->channel = pDS->currentchan; 626 bss->channel = ds->channel;
628 memcpy(&bss->phyparamset.dsparamset, pDS, 627 memcpy(&bss->phy.ds, ds, sizeof(*ds));
629 sizeof(struct ieeetypes_dsparamset));
630 lbs_deb_scan("got DS IE, channel %d\n", bss->channel); 628 lbs_deb_scan("got DS IE, channel %d\n", bss->channel);
631 break; 629 break;
632 630
633 case WLAN_EID_CF_PARAMS: 631 case WLAN_EID_CF_PARAMS:
634 pCF = (struct ieeetypes_cfparamset *) pos; 632 cf = (struct ieee_ie_cf_param_set *) pos;
635 memcpy(&bss->ssparamset.cfparamset, pCF, 633 memcpy(&bss->ss.cf, cf, sizeof(*cf));
636 sizeof(struct ieeetypes_cfparamset));
637 lbs_deb_scan("got CF IE\n"); 634 lbs_deb_scan("got CF IE\n");
638 break; 635 break;
639 636
640 case WLAN_EID_IBSS_PARAMS: 637 case WLAN_EID_IBSS_PARAMS:
641 pibss = (struct ieeetypes_ibssparamset *) pos; 638 ibss = (struct ieee_ie_ibss_param_set *) pos;
642 bss->atimwindow = le16_to_cpu(pibss->atimwindow); 639 bss->atimwindow = ibss->atimwindow;
643 memmove(&bss->ssparamset.ibssparamset, pibss, 640 memcpy(&bss->ss.ibss, ibss, sizeof(*ibss));
644 sizeof(struct ieeetypes_ibssparamset));
645 lbs_deb_scan("got IBSS IE\n"); 641 lbs_deb_scan("got IBSS IE\n");
646 break; 642 break;
647 643
648 case WLAN_EID_COUNTRY: 644 case WLAN_EID_COUNTRY:
649 pcountryinfo = (struct ieeetypes_countryinfoset *) pos; 645 pcountryinfo = (struct ieee_ie_country_info_set *) pos;
650 lbs_deb_scan("got COUNTRY IE\n"); 646 lbs_deb_scan("got COUNTRY IE\n");
651 if (pcountryinfo->len < sizeof(pcountryinfo->countrycode) 647 if (pcountryinfo->header.len < sizeof(pcountryinfo->countrycode)
652 || pcountryinfo->len > 254) { 648 || pcountryinfo->header.len > 254) {
653 lbs_deb_scan("process_bss: 11D- Err CountryInfo len %d, min %zd, max 254\n", 649 lbs_deb_scan("%s: 11D- Err CountryInfo len %d, min %zd, max 254\n",
654 pcountryinfo->len, sizeof(pcountryinfo->countrycode)); 650 __func__,
651 pcountryinfo->header.len,
652 sizeof(pcountryinfo->countrycode));
655 ret = -1; 653 ret = -1;
656 goto done; 654 goto done;
657 } 655 }
658 656
659 memcpy(&bss->countryinfo, pcountryinfo, pcountryinfo->len + 2); 657 memcpy(&bss->countryinfo, pcountryinfo,
658 pcountryinfo->header.len + 2);
660 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo", 659 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo",
661 (uint8_t *) pcountryinfo, 660 (uint8_t *) pcountryinfo,
662 (int) (pcountryinfo->len + 2)); 661 (int) (pcountryinfo->header.len + 2));
663 break; 662 break;
664 663
665 case WLAN_EID_EXT_SUPP_RATES: 664 case WLAN_EID_EXT_SUPP_RATES:
@@ -1130,7 +1129,7 @@ static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
1130 goto done; 1129 goto done;
1131 } 1130 }
1132 1131
1133 bytesleft = le16_to_cpu(scanresp->bssdescriptsize); 1132 bytesleft = get_unaligned_le16(&scanresp->bssdescriptsize);
1134 lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft); 1133 lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft);
1135 1134
1136 scanrespsize = le16_to_cpu(resp->size); 1135 scanrespsize = le16_to_cpu(resp->size);
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index de03b9c9c204..99905df65b25 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -8,9 +8,14 @@
8#include <asm/byteorder.h> 8#include <asm/byteorder.h>
9#include <linux/wireless.h> 9#include <linux/wireless.h>
10 10
11struct ieeetypes_cfparamset { 11struct ieee_ie_header {
12 u8 elementid; 12 u8 id;
13 u8 len; 13 u8 len;
14} __attribute__ ((packed));
15
16struct ieee_ie_cf_param_set {
17 struct ieee_ie_header header;
18
14 u8 cfpcnt; 19 u8 cfpcnt;
15 u8 cfpperiod; 20 u8 cfpperiod;
16 __le16 cfpmaxduration; 21 __le16 cfpmaxduration;
@@ -18,42 +23,35 @@ struct ieeetypes_cfparamset {
18} __attribute__ ((packed)); 23} __attribute__ ((packed));
19 24
20 25
21struct ieeetypes_ibssparamset { 26struct ieee_ie_ibss_param_set {
22 u8 elementid; 27 struct ieee_ie_header header;
23 u8 len; 28
24 __le16 atimwindow; 29 __le16 atimwindow;
25} __attribute__ ((packed)); 30} __attribute__ ((packed));
26 31
27union IEEEtypes_ssparamset { 32union ieee_ss_param_set {
28 struct ieeetypes_cfparamset cfparamset; 33 struct ieee_ie_cf_param_set cf;
29 struct ieeetypes_ibssparamset ibssparamset; 34 struct ieee_ie_ibss_param_set ibss;
30} __attribute__ ((packed)); 35} __attribute__ ((packed));
31 36
32struct ieeetypes_fhparamset { 37struct ieee_ie_fh_param_set {
33 u8 elementid; 38 struct ieee_ie_header header;
34 u8 len; 39
35 __le16 dwelltime; 40 __le16 dwelltime;
36 u8 hopset; 41 u8 hopset;
37 u8 hoppattern; 42 u8 hoppattern;
38 u8 hopindex; 43 u8 hopindex;
39} __attribute__ ((packed)); 44} __attribute__ ((packed));
40 45
41struct ieeetypes_dsparamset { 46struct ieee_ie_ds_param_set {
42 u8 elementid; 47 struct ieee_ie_header header;
43 u8 len;
44 u8 currentchan;
45} __attribute__ ((packed));
46 48
47union ieeetypes_phyparamset { 49 u8 channel;
48 struct ieeetypes_fhparamset fhparamset;
49 struct ieeetypes_dsparamset dsparamset;
50} __attribute__ ((packed)); 50} __attribute__ ((packed));
51 51
52struct ieeetypes_assocrsp { 52union ieee_phy_param_set {
53 __le16 capability; 53 struct ieee_ie_fh_param_set fh;
54 __le16 statuscode; 54 struct ieee_ie_ds_param_set ds;
55 __le16 aid;
56 u8 iebuffer[1];
57} __attribute__ ((packed)); 55} __attribute__ ((packed));
58 56
59/** TLV type ID definition */ 57/** TLV type ID definition */
@@ -94,32 +92,33 @@ struct ieeetypes_assocrsp {
94#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19) 92#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
95#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22) 93#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
96#define TLV_TYPE_SNR_HIGH (PROPRIETARY_TLV_BASE_ID + 23) 94#define TLV_TYPE_SNR_HIGH (PROPRIETARY_TLV_BASE_ID + 23)
95#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
97#define TLV_TYPE_MESH_ID (PROPRIETARY_TLV_BASE_ID + 37) 96#define TLV_TYPE_MESH_ID (PROPRIETARY_TLV_BASE_ID + 37)
98#define TLV_TYPE_OLD_MESH_ID (PROPRIETARY_TLV_BASE_ID + 291) 97#define TLV_TYPE_OLD_MESH_ID (PROPRIETARY_TLV_BASE_ID + 291)
99 98
100/** TLV related data structures*/ 99/** TLV related data structures*/
101struct mrvlietypesheader { 100struct mrvl_ie_header {
102 __le16 type; 101 __le16 type;
103 __le16 len; 102 __le16 len;
104} __attribute__ ((packed)); 103} __attribute__ ((packed));
105 104
106struct mrvlietypes_data { 105struct mrvl_ie_data {
107 struct mrvlietypesheader header; 106 struct mrvl_ie_header header;
108 u8 Data[1]; 107 u8 Data[1];
109} __attribute__ ((packed)); 108} __attribute__ ((packed));
110 109
111struct mrvlietypes_ratesparamset { 110struct mrvl_ie_rates_param_set {
112 struct mrvlietypesheader header; 111 struct mrvl_ie_header header;
113 u8 rates[1]; 112 u8 rates[1];
114} __attribute__ ((packed)); 113} __attribute__ ((packed));
115 114
116struct mrvlietypes_ssidparamset { 115struct mrvl_ie_ssid_param_set {
117 struct mrvlietypesheader header; 116 struct mrvl_ie_header header;
118 u8 ssid[1]; 117 u8 ssid[1];
119} __attribute__ ((packed)); 118} __attribute__ ((packed));
120 119
121struct mrvlietypes_wildcardssidparamset { 120struct mrvl_ie_wildcard_ssid_param_set {
122 struct mrvlietypesheader header; 121 struct mrvl_ie_header header;
123 u8 MaxSsidlength; 122 u8 MaxSsidlength;
124 u8 ssid[1]; 123 u8 ssid[1];
125} __attribute__ ((packed)); 124} __attribute__ ((packed));
@@ -144,91 +143,72 @@ struct chanscanparamset {
144 __le16 maxscantime; 143 __le16 maxscantime;
145} __attribute__ ((packed)); 144} __attribute__ ((packed));
146 145
147struct mrvlietypes_chanlistparamset { 146struct mrvl_ie_chanlist_param_set {
148 struct mrvlietypesheader header; 147 struct mrvl_ie_header header;
149 struct chanscanparamset chanscanparam[1]; 148 struct chanscanparamset chanscanparam[1];
150} __attribute__ ((packed)); 149} __attribute__ ((packed));
151 150
152struct cfparamset { 151struct mrvl_ie_cf_param_set {
152 struct mrvl_ie_header header;
153 u8 cfpcnt; 153 u8 cfpcnt;
154 u8 cfpperiod; 154 u8 cfpperiod;
155 __le16 cfpmaxduration; 155 __le16 cfpmaxduration;
156 __le16 cfpdurationremaining; 156 __le16 cfpdurationremaining;
157} __attribute__ ((packed)); 157} __attribute__ ((packed));
158 158
159struct ibssparamset { 159struct mrvl_ie_ds_param_set {
160 __le16 atimwindow; 160 struct mrvl_ie_header header;
161} __attribute__ ((packed)); 161 u8 channel;
162
163struct mrvlietypes_ssparamset {
164 struct mrvlietypesheader header;
165 union {
166 struct cfparamset cfparamset[1];
167 struct ibssparamset ibssparamset[1];
168 } cf_ibss;
169} __attribute__ ((packed)); 162} __attribute__ ((packed));
170 163
171struct fhparamset { 164struct mrvl_ie_rsn_param_set {
172 __le16 dwelltime; 165 struct mrvl_ie_header header;
173 u8 hopset;
174 u8 hoppattern;
175 u8 hopindex;
176} __attribute__ ((packed));
177
178struct dsparamset {
179 u8 currentchan;
180} __attribute__ ((packed));
181
182struct mrvlietypes_phyparamset {
183 struct mrvlietypesheader header;
184 union {
185 struct fhparamset fhparamset[1];
186 struct dsparamset dsparamset[1];
187 } fh_ds;
188} __attribute__ ((packed));
189
190struct mrvlietypes_rsnparamset {
191 struct mrvlietypesheader header;
192 u8 rsnie[1]; 166 u8 rsnie[1];
193} __attribute__ ((packed)); 167} __attribute__ ((packed));
194 168
195struct mrvlietypes_tsftimestamp { 169struct mrvl_ie_tsf_timestamp {
196 struct mrvlietypesheader header; 170 struct mrvl_ie_header header;
197 __le64 tsftable[1]; 171 __le64 tsftable[1];
198} __attribute__ ((packed)); 172} __attribute__ ((packed));
199 173
174/* v9 and later firmware only */
175struct mrvl_ie_auth_type {
176 struct mrvl_ie_header header;
177 __le16 auth;
178} __attribute__ ((packed));
179
200/** Local Power capability */ 180/** Local Power capability */
201struct mrvlietypes_powercapability { 181struct mrvl_ie_power_capability {
202 struct mrvlietypesheader header; 182 struct mrvl_ie_header header;
203 s8 minpower; 183 s8 minpower;
204 s8 maxpower; 184 s8 maxpower;
205} __attribute__ ((packed)); 185} __attribute__ ((packed));
206 186
207/* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */ 187/* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */
208struct mrvlietypes_thresholds { 188struct mrvl_ie_thresholds {
209 struct mrvlietypesheader header; 189 struct mrvl_ie_header header;
210 u8 value; 190 u8 value;
211 u8 freq; 191 u8 freq;
212} __attribute__ ((packed)); 192} __attribute__ ((packed));
213 193
214struct mrvlietypes_beaconsmissed { 194struct mrvl_ie_beacons_missed {
215 struct mrvlietypesheader header; 195 struct mrvl_ie_header header;
216 u8 beaconmissed; 196 u8 beaconmissed;
217 u8 reserved; 197 u8 reserved;
218} __attribute__ ((packed)); 198} __attribute__ ((packed));
219 199
220struct mrvlietypes_numprobes { 200struct mrvl_ie_num_probes {
221 struct mrvlietypesheader header; 201 struct mrvl_ie_header header;
222 __le16 numprobes; 202 __le16 numprobes;
223} __attribute__ ((packed)); 203} __attribute__ ((packed));
224 204
225struct mrvlietypes_bcastprobe { 205struct mrvl_ie_bcast_probe {
226 struct mrvlietypesheader header; 206 struct mrvl_ie_header header;
227 __le16 bcastprobe; 207 __le16 bcastprobe;
228} __attribute__ ((packed)); 208} __attribute__ ((packed));
229 209
230struct mrvlietypes_numssidprobe { 210struct mrvl_ie_num_ssid_probe {
231 struct mrvlietypesheader header; 211 struct mrvl_ie_header header;
232 __le16 numssidprobe; 212 __le16 numssidprobe;
233} __attribute__ ((packed)); 213} __attribute__ ((packed));
234 214
@@ -237,8 +217,8 @@ struct led_pin {
237 u8 pin; 217 u8 pin;
238} __attribute__ ((packed)); 218} __attribute__ ((packed));
239 219
240struct mrvlietypes_ledgpio { 220struct mrvl_ie_ledgpio {
241 struct mrvlietypesheader header; 221 struct mrvl_ie_header header;
242 struct led_pin ledpin[1]; 222 struct led_pin ledpin[1];
243} __attribute__ ((packed)); 223} __attribute__ ((packed));
244 224
@@ -250,8 +230,8 @@ struct led_bhv {
250} __attribute__ ((packed)); 230} __attribute__ ((packed));
251 231
252 232
253struct mrvlietypes_ledbhv { 233struct mrvl_ie_ledbhv {
254 struct mrvlietypesheader header; 234 struct mrvl_ie_header header;
255 struct led_bhv ledbhv[1]; 235 struct led_bhv ledbhv[1];
256} __attribute__ ((packed)); 236} __attribute__ ((packed));
257 237
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 574b8bb121e1..e789c6e9938c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -280,7 +280,6 @@ struct mac80211_hwsim_data {
280 struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; 280 struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
281 281
282 struct ieee80211_channel *channel; 282 struct ieee80211_channel *channel;
283 int radio_enabled;
284 unsigned long beacon_int; /* in jiffies unit */ 283 unsigned long beacon_int; /* in jiffies unit */
285 unsigned int rx_filter; 284 unsigned int rx_filter;
286 int started; 285 int started;
@@ -418,8 +417,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
418 if (data == data2) 417 if (data == data2)
419 continue; 418 continue;
420 419
421 if (!data2->started || !data2->radio_enabled || 420 if (!data2->started || !hwsim_ps_rx_ok(data2, skb) ||
422 !hwsim_ps_rx_ok(data2, skb) ||
423 data->channel->center_freq != data2->channel->center_freq || 421 data->channel->center_freq != data2->channel->center_freq ||
424 !(data->group & data2->group)) 422 !(data->group & data2->group))
425 continue; 423 continue;
@@ -441,7 +439,6 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
441 439
442static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 440static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
443{ 441{
444 struct mac80211_hwsim_data *data = hw->priv;
445 bool ack; 442 bool ack;
446 struct ieee80211_tx_info *txi; 443 struct ieee80211_tx_info *txi;
447 444
@@ -453,13 +450,6 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
453 return NETDEV_TX_OK; 450 return NETDEV_TX_OK;
454 } 451 }
455 452
456 if (!data->radio_enabled) {
457 printk(KERN_DEBUG "%s: dropped TX frame since radio "
458 "disabled\n", wiphy_name(hw->wiphy));
459 dev_kfree_skb(skb);
460 return NETDEV_TX_OK;
461 }
462
463 ack = mac80211_hwsim_tx_frame(hw, skb); 453 ack = mac80211_hwsim_tx_frame(hw, skb);
464 454
465 txi = IEEE80211_SKB_CB(skb); 455 txi = IEEE80211_SKB_CB(skb);
@@ -546,7 +536,7 @@ static void mac80211_hwsim_beacon(unsigned long arg)
546 struct ieee80211_hw *hw = (struct ieee80211_hw *) arg; 536 struct ieee80211_hw *hw = (struct ieee80211_hw *) arg;
547 struct mac80211_hwsim_data *data = hw->priv; 537 struct mac80211_hwsim_data *data = hw->priv;
548 538
549 if (!data->started || !data->radio_enabled) 539 if (!data->started)
550 return; 540 return;
551 541
552 ieee80211_iterate_active_interfaces_atomic( 542 ieee80211_iterate_active_interfaces_atomic(
@@ -562,15 +552,14 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
562 struct mac80211_hwsim_data *data = hw->priv; 552 struct mac80211_hwsim_data *data = hw->priv;
563 struct ieee80211_conf *conf = &hw->conf; 553 struct ieee80211_conf *conf = &hw->conf;
564 554
565 printk(KERN_DEBUG "%s:%s (freq=%d radio_enabled=%d idle=%d ps=%d)\n", 555 printk(KERN_DEBUG "%s:%s (freq=%d idle=%d ps=%d)\n",
566 wiphy_name(hw->wiphy), __func__, 556 wiphy_name(hw->wiphy), __func__,
567 conf->channel->center_freq, conf->radio_enabled, 557 conf->channel->center_freq,
568 !!(conf->flags & IEEE80211_CONF_IDLE), 558 !!(conf->flags & IEEE80211_CONF_IDLE),
569 !!(conf->flags & IEEE80211_CONF_PS)); 559 !!(conf->flags & IEEE80211_CONF_PS));
570 560
571 data->channel = conf->channel; 561 data->channel = conf->channel;
572 data->radio_enabled = conf->radio_enabled; 562 if (!data->started || !data->beacon_int)
573 if (!data->started || !data->radio_enabled || !data->beacon_int)
574 del_timer(&data->beacon_timer); 563 del_timer(&data->beacon_timer);
575 else 564 else
576 mod_timer(&data->beacon_timer, jiffies + data->beacon_int); 565 mod_timer(&data->beacon_timer, jiffies + data->beacon_int);
@@ -787,8 +776,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
787 pspoll->aid = cpu_to_le16(0xc000 | vp->aid); 776 pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
788 memcpy(pspoll->bssid, vp->bssid, ETH_ALEN); 777 memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
789 memcpy(pspoll->ta, mac, ETH_ALEN); 778 memcpy(pspoll->ta, mac, ETH_ALEN);
790 if (data->radio_enabled && 779 if (!mac80211_hwsim_tx_frame(data->hw, skb))
791 !mac80211_hwsim_tx_frame(data->hw, skb))
792 printk(KERN_DEBUG "%s: PS-Poll frame not ack'ed\n", __func__); 780 printk(KERN_DEBUG "%s: PS-Poll frame not ack'ed\n", __func__);
793 dev_kfree_skb(skb); 781 dev_kfree_skb(skb);
794} 782}
@@ -819,8 +807,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
819 memcpy(hdr->addr1, vp->bssid, ETH_ALEN); 807 memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
820 memcpy(hdr->addr2, mac, ETH_ALEN); 808 memcpy(hdr->addr2, mac, ETH_ALEN);
821 memcpy(hdr->addr3, vp->bssid, ETH_ALEN); 809 memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
822 if (data->radio_enabled && 810 if (!mac80211_hwsim_tx_frame(data->hw, skb))
823 !mac80211_hwsim_tx_frame(data->hw, skb))
824 printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__); 811 printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
825 dev_kfree_skb(skb); 812 dev_kfree_skb(skb);
826} 813}
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 48d81d98e12d..b618bd14583f 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -823,30 +823,30 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
823 struct p54_tx_info *range; 823 struct p54_tx_info *range;
824 unsigned long flags; 824 unsigned long flags;
825 825
826 if (unlikely(!skb || !dev || !skb_queue_len(&priv->tx_queue))) 826 if (unlikely(!skb || !dev || skb_queue_empty(&priv->tx_queue)))
827 return; 827 return;
828 828
829 /* 829 /* There used to be a check here to see if the SKB was on the
830 * don't try to free an already unlinked skb 830 * TX queue or not. This can never happen because all SKBs we
831 * see here successfully went through p54_assign_address()
832 * which means the SKB is on the ->tx_queue.
831 */ 833 */
832 if (unlikely((!skb->next) || (!skb->prev)))
833 return;
834 834
835 spin_lock_irqsave(&priv->tx_queue.lock, flags); 835 spin_lock_irqsave(&priv->tx_queue.lock, flags);
836 info = IEEE80211_SKB_CB(skb); 836 info = IEEE80211_SKB_CB(skb);
837 range = (void *)info->rate_driver_data; 837 range = (void *)info->rate_driver_data;
838 if (skb->prev != (struct sk_buff *)&priv->tx_queue) { 838 if (!skb_queue_is_first(&priv->tx_queue, skb)) {
839 struct ieee80211_tx_info *ni; 839 struct ieee80211_tx_info *ni;
840 struct p54_tx_info *mr; 840 struct p54_tx_info *mr;
841 841
842 ni = IEEE80211_SKB_CB(skb->prev); 842 ni = IEEE80211_SKB_CB(skb_queue_prev(&priv->tx_queue, skb));
843 mr = (struct p54_tx_info *)ni->rate_driver_data; 843 mr = (struct p54_tx_info *)ni->rate_driver_data;
844 } 844 }
845 if (skb->next != (struct sk_buff *)&priv->tx_queue) { 845 if (!skb_queue_is_last(&priv->tx_queue, skb)) {
846 struct ieee80211_tx_info *ni; 846 struct ieee80211_tx_info *ni;
847 struct p54_tx_info *mr; 847 struct p54_tx_info *mr;
848 848
849 ni = IEEE80211_SKB_CB(skb->next); 849 ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue, skb));
850 mr = (struct p54_tx_info *)ni->rate_driver_data; 850 mr = (struct p54_tx_info *)ni->rate_driver_data;
851 } 851 }
852 __skb_unlink(skb, &priv->tx_queue); 852 __skb_unlink(skb, &priv->tx_queue);
@@ -864,15 +864,13 @@ static struct sk_buff *p54_find_tx_entry(struct ieee80211_hw *dev,
864 unsigned long flags; 864 unsigned long flags;
865 865
866 spin_lock_irqsave(&priv->tx_queue.lock, flags); 866 spin_lock_irqsave(&priv->tx_queue.lock, flags);
867 entry = priv->tx_queue.next; 867 skb_queue_walk(&priv->tx_queue, entry) {
868 while (entry != (struct sk_buff *)&priv->tx_queue) {
869 struct p54_hdr *hdr = (struct p54_hdr *) entry->data; 868 struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
870 869
871 if (hdr->req_id == req_id) { 870 if (hdr->req_id == req_id) {
872 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 871 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
873 return entry; 872 return entry;
874 } 873 }
875 entry = entry->next;
876 } 874 }
877 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 875 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
878 return NULL; 876 return NULL;
@@ -890,24 +888,22 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
890 int count, idx; 888 int count, idx;
891 889
892 spin_lock_irqsave(&priv->tx_queue.lock, flags); 890 spin_lock_irqsave(&priv->tx_queue.lock, flags);
893 entry = (struct sk_buff *) priv->tx_queue.next; 891 skb_queue_walk(&priv->tx_queue, entry) {
894 while (entry != (struct sk_buff *)&priv->tx_queue) {
895 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 892 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
896 struct p54_hdr *entry_hdr; 893 struct p54_hdr *entry_hdr;
897 struct p54_tx_data *entry_data; 894 struct p54_tx_data *entry_data;
898 unsigned int pad = 0, frame_len; 895 unsigned int pad = 0, frame_len;
899 896
900 range = (void *)info->rate_driver_data; 897 range = (void *)info->rate_driver_data;
901 if (range->start_addr != addr) { 898 if (range->start_addr != addr)
902 entry = entry->next;
903 continue; 899 continue;
904 }
905 900
906 if (entry->next != (struct sk_buff *)&priv->tx_queue) { 901 if (!skb_queue_is_last(&priv->tx_queue, entry)) {
907 struct ieee80211_tx_info *ni; 902 struct ieee80211_tx_info *ni;
908 struct p54_tx_info *mr; 903 struct p54_tx_info *mr;
909 904
910 ni = IEEE80211_SKB_CB(entry->next); 905 ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue,
906 entry));
911 mr = (struct p54_tx_info *)ni->rate_driver_data; 907 mr = (struct p54_tx_info *)ni->rate_driver_data;
912 } 908 }
913 909
@@ -1168,23 +1164,21 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
1168 } 1164 }
1169 } 1165 }
1170 1166
1171 entry = priv->tx_queue.next; 1167 skb_queue_walk(&priv->tx_queue, entry) {
1172 while (left--) {
1173 u32 hole_size; 1168 u32 hole_size;
1174 info = IEEE80211_SKB_CB(entry); 1169 info = IEEE80211_SKB_CB(entry);
1175 range = (void *)info->rate_driver_data; 1170 range = (void *)info->rate_driver_data;
1176 hole_size = range->start_addr - last_addr; 1171 hole_size = range->start_addr - last_addr;
1177 if (!target_skb && hole_size >= len) { 1172 if (!target_skb && hole_size >= len) {
1178 target_skb = entry->prev; 1173 target_skb = skb_queue_prev(&priv->tx_queue, entry);
1179 hole_size -= len; 1174 hole_size -= len;
1180 target_addr = last_addr; 1175 target_addr = last_addr;
1181 } 1176 }
1182 largest_hole = max(largest_hole, hole_size); 1177 largest_hole = max(largest_hole, hole_size);
1183 last_addr = range->end_addr; 1178 last_addr = range->end_addr;
1184 entry = entry->next;
1185 } 1179 }
1186 if (!target_skb && priv->rx_end - last_addr >= len) { 1180 if (!target_skb && priv->rx_end - last_addr >= len) {
1187 target_skb = priv->tx_queue.prev; 1181 target_skb = skb_peek_tail(&priv->tx_queue);
1188 largest_hole = max(largest_hole, priv->rx_end - last_addr - len); 1182 largest_hole = max(largest_hole, priv->rx_end - last_addr - len);
1189 if (!skb_queue_empty(&priv->tx_queue)) { 1183 if (!skb_queue_empty(&priv->tx_queue)) {
1190 info = IEEE80211_SKB_CB(target_skb); 1184 info = IEEE80211_SKB_CB(target_skb);
@@ -2090,7 +2084,6 @@ out:
2090static void p54_stop(struct ieee80211_hw *dev) 2084static void p54_stop(struct ieee80211_hw *dev)
2091{ 2085{
2092 struct p54_common *priv = dev->priv; 2086 struct p54_common *priv = dev->priv;
2093 struct sk_buff *skb;
2094 2087
2095 mutex_lock(&priv->conf_mutex); 2088 mutex_lock(&priv->conf_mutex);
2096 priv->mode = NL80211_IFTYPE_UNSPECIFIED; 2089 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2105,8 +2098,7 @@ static void p54_stop(struct ieee80211_hw *dev)
2105 p54_tx_cancel(dev, priv->cached_beacon); 2098 p54_tx_cancel(dev, priv->cached_beacon);
2106 2099
2107 priv->stop(dev); 2100 priv->stop(dev);
2108 while ((skb = skb_dequeue(&priv->tx_queue))) 2101 skb_queue_purge(&priv->tx_queue);
2109 kfree_skb(skb);
2110 priv->cached_beacon = NULL; 2102 priv->cached_beacon = NULL;
2111 priv->tsf_high32 = priv->tsf_low32 = 0; 2103 priv->tsf_high32 = priv->tsf_low32 = 0;
2112 mutex_unlock(&priv->conf_mutex); 2104 mutex_unlock(&priv->conf_mutex);
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index f40c0f468b27..0e877a104a89 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -84,8 +84,8 @@ MODULE_DEVICE_TABLE(usb, p54u_table);
84static const struct { 84static const struct {
85 u32 intf; 85 u32 intf;
86 enum p54u_hw_type type; 86 enum p54u_hw_type type;
87 char fw[FIRMWARE_NAME_MAX]; 87 const char *fw;
88 char fw_legacy[FIRMWARE_NAME_MAX]; 88 const char *fw_legacy;
89 char hw[20]; 89 char hw[20];
90} p54u_fwlist[__NUM_P54U_HWTYPES] = { 90} p54u_fwlist[__NUM_P54U_HWTYPES] = {
91 { 91 {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index c254fdf446fd..7441d5585110 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -157,55 +157,55 @@ MODULE_PARM_DESC(workaround_interval,
157#define NDIS_802_11_LENGTH_RATES_EX 16 157#define NDIS_802_11_LENGTH_RATES_EX 16
158 158
159enum ndis_80211_net_type { 159enum ndis_80211_net_type {
160 ndis_80211_type_freq_hop, 160 NDIS_80211_TYPE_FREQ_HOP,
161 ndis_80211_type_direct_seq, 161 NDIS_80211_TYPE_DIRECT_SEQ,
162 ndis_80211_type_ofdm_a, 162 NDIS_80211_TYPE_OFDM_A,
163 ndis_80211_type_ofdm_g 163 NDIS_80211_TYPE_OFDM_G
164}; 164};
165 165
166enum ndis_80211_net_infra { 166enum ndis_80211_net_infra {
167 ndis_80211_infra_adhoc, 167 NDIS_80211_INFRA_ADHOC,
168 ndis_80211_infra_infra, 168 NDIS_80211_INFRA_INFRA,
169 ndis_80211_infra_auto_unknown 169 NDIS_80211_INFRA_AUTO_UNKNOWN
170}; 170};
171 171
172enum ndis_80211_auth_mode { 172enum ndis_80211_auth_mode {
173 ndis_80211_auth_open, 173 NDIS_80211_AUTH_OPEN,
174 ndis_80211_auth_shared, 174 NDIS_80211_AUTH_SHARED,
175 ndis_80211_auth_auto_switch, 175 NDIS_80211_AUTH_AUTO_SWITCH,
176 ndis_80211_auth_wpa, 176 NDIS_80211_AUTH_WPA,
177 ndis_80211_auth_wpa_psk, 177 NDIS_80211_AUTH_WPA_PSK,
178 ndis_80211_auth_wpa_none, 178 NDIS_80211_AUTH_WPA_NONE,
179 ndis_80211_auth_wpa2, 179 NDIS_80211_AUTH_WPA2,
180 ndis_80211_auth_wpa2_psk 180 NDIS_80211_AUTH_WPA2_PSK
181}; 181};
182 182
183enum ndis_80211_encr_status { 183enum ndis_80211_encr_status {
184 ndis_80211_encr_wep_enabled, 184 NDIS_80211_ENCR_WEP_ENABLED,
185 ndis_80211_encr_disabled, 185 NDIS_80211_ENCR_DISABLED,
186 ndis_80211_encr_wep_key_absent, 186 NDIS_80211_ENCR_WEP_KEY_ABSENT,
187 ndis_80211_encr_not_supported, 187 NDIS_80211_ENCR_NOT_SUPPORTED,
188 ndis_80211_encr_tkip_enabled, 188 NDIS_80211_ENCR_TKIP_ENABLED,
189 ndis_80211_encr_tkip_key_absent, 189 NDIS_80211_ENCR_TKIP_KEY_ABSENT,
190 ndis_80211_encr_ccmp_enabled, 190 NDIS_80211_ENCR_CCMP_ENABLED,
191 ndis_80211_encr_ccmp_key_absent 191 NDIS_80211_ENCR_CCMP_KEY_ABSENT
192}; 192};
193 193
194enum ndis_80211_priv_filter { 194enum ndis_80211_priv_filter {
195 ndis_80211_priv_accept_all, 195 NDIS_80211_PRIV_ACCEPT_ALL,
196 ndis_80211_priv_8021x_wep 196 NDIS_80211_PRIV_8021X_WEP
197}; 197};
198 198
199enum ndis_80211_addkey_bits { 199enum ndis_80211_addkey_bits {
200 ndis_80211_addkey_8021x_auth = cpu_to_le32(1 << 28), 200 NDIS_80211_ADDKEY_8021X_AUTH = cpu_to_le32(1 << 28),
201 ndis_80211_addkey_set_init_recv_seq = cpu_to_le32(1 << 29), 201 NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ = cpu_to_le32(1 << 29),
202 ndis_80211_addkey_pairwise_key = cpu_to_le32(1 << 30), 202 NDIS_80211_ADDKEY_PAIRWISE_KEY = cpu_to_le32(1 << 30),
203 ndis_80211_addkey_transmit_key = cpu_to_le32(1 << 31), 203 NDIS_80211_ADDKEY_TRANSMIT_KEY = cpu_to_le32(1 << 31)
204}; 204};
205 205
206enum ndis_80211_addwep_bits { 206enum ndis_80211_addwep_bits {
207 ndis_80211_addwep_perclient_key = cpu_to_le32(1 << 30), 207 NDIS_80211_ADDWEP_PERCLIENT_KEY = cpu_to_le32(1 << 30),
208 ndis_80211_addwep_transmit_key = cpu_to_le32(1 << 31), 208 NDIS_80211_ADDWEP_TRANSMIT_KEY = cpu_to_le32(1 << 31)
209}; 209};
210 210
211struct ndis_80211_ssid { 211struct ndis_80211_ssid {
@@ -361,7 +361,7 @@ static const struct ieee80211_rate rndis_rates[] = {
361}; 361};
362 362
363/* RNDIS device private data */ 363/* RNDIS device private data */
364struct rndis_wext_private { 364struct rndis_wlan_private {
365 struct usbnet *usbdev; 365 struct usbnet *usbdev;
366 366
367 struct wireless_dev wdev; 367 struct wireless_dev wdev;
@@ -441,13 +441,13 @@ static const unsigned char ffff_bssid[ETH_ALEN] = { 0xff, 0xff, 0xff,
441 0xff, 0xff, 0xff }; 441 0xff, 0xff, 0xff };
442 442
443 443
444static struct rndis_wext_private *get_rndis_wext_priv(struct usbnet *dev) 444static struct rndis_wlan_private *get_rndis_wlan_priv(struct usbnet *dev)
445{ 445{
446 return (struct rndis_wext_private *)dev->driver_priv; 446 return (struct rndis_wlan_private *)dev->driver_priv;
447} 447}
448 448
449 449
450static u32 get_bcm4320_power(struct rndis_wext_private *priv) 450static u32 get_bcm4320_power(struct rndis_wlan_private *priv)
451{ 451{
452 return BCM4320_DEFAULT_TXPOWER * 452 return BCM4320_DEFAULT_TXPOWER *
453 bcm4320_power_output[priv->param_power_output] / 100; 453 bcm4320_power_output[priv->param_power_output] / 100;
@@ -480,7 +480,7 @@ static int rndis_error_status(__le32 rndis_status)
480 480
481static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len) 481static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
482{ 482{
483 struct rndis_wext_private *priv = get_rndis_wext_priv(dev); 483 struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
484 union { 484 union {
485 void *buf; 485 void *buf;
486 struct rndis_msg_hdr *header; 486 struct rndis_msg_hdr *header;
@@ -526,7 +526,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
526 526
527static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len) 527static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len)
528{ 528{
529 struct rndis_wext_private *priv = get_rndis_wext_priv(dev); 529 struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
530 union { 530 union {
531 void *buf; 531 void *buf;
532 struct rndis_msg_hdr *header; 532 struct rndis_msg_hdr *header;
@@ -747,7 +747,7 @@ static int get_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
747 747
748static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid) 748static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
749{ 749{
750 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 750 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
751 int ret; 751 int ret;
752 752
753 ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid)); 753 ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid));
@@ -794,7 +794,7 @@ static int is_associated(struct usbnet *usbdev)
794 794
795static int disassociate(struct usbnet *usbdev, int reset_ssid) 795static int disassociate(struct usbnet *usbdev, int reset_ssid)
796{ 796{
797 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 797 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
798 struct ndis_80211_ssid ssid; 798 struct ndis_80211_ssid ssid;
799 int i, ret = 0; 799 int i, ret = 0;
800 800
@@ -826,7 +826,7 @@ static int disassociate(struct usbnet *usbdev, int reset_ssid)
826 826
827static int set_auth_mode(struct usbnet *usbdev, int wpa_version, int authalg) 827static int set_auth_mode(struct usbnet *usbdev, int wpa_version, int authalg)
828{ 828{
829 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 829 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
830 __le32 tmp; 830 __le32 tmp;
831 int auth_mode, ret; 831 int auth_mode, ret;
832 832
@@ -835,23 +835,23 @@ static int set_auth_mode(struct usbnet *usbdev, int wpa_version, int authalg)
835 835
836 if (wpa_version & IW_AUTH_WPA_VERSION_WPA2) { 836 if (wpa_version & IW_AUTH_WPA_VERSION_WPA2) {
837 if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X) 837 if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X)
838 auth_mode = ndis_80211_auth_wpa2; 838 auth_mode = NDIS_80211_AUTH_WPA2;
839 else 839 else
840 auth_mode = ndis_80211_auth_wpa2_psk; 840 auth_mode = NDIS_80211_AUTH_WPA2_PSK;
841 } else if (wpa_version & IW_AUTH_WPA_VERSION_WPA) { 841 } else if (wpa_version & IW_AUTH_WPA_VERSION_WPA) {
842 if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X) 842 if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X)
843 auth_mode = ndis_80211_auth_wpa; 843 auth_mode = NDIS_80211_AUTH_WPA;
844 else if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_PSK) 844 else if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_PSK)
845 auth_mode = ndis_80211_auth_wpa_psk; 845 auth_mode = NDIS_80211_AUTH_WPA_PSK;
846 else 846 else
847 auth_mode = ndis_80211_auth_wpa_none; 847 auth_mode = NDIS_80211_AUTH_WPA_NONE;
848 } else if (authalg & IW_AUTH_ALG_SHARED_KEY) { 848 } else if (authalg & IW_AUTH_ALG_SHARED_KEY) {
849 if (authalg & IW_AUTH_ALG_OPEN_SYSTEM) 849 if (authalg & IW_AUTH_ALG_OPEN_SYSTEM)
850 auth_mode = ndis_80211_auth_auto_switch; 850 auth_mode = NDIS_80211_AUTH_AUTO_SWITCH;
851 else 851 else
852 auth_mode = ndis_80211_auth_shared; 852 auth_mode = NDIS_80211_AUTH_SHARED;
853 } else 853 } else
854 auth_mode = ndis_80211_auth_open; 854 auth_mode = NDIS_80211_AUTH_OPEN;
855 855
856 tmp = cpu_to_le32(auth_mode); 856 tmp = cpu_to_le32(auth_mode);
857 ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp, 857 ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp,
@@ -869,16 +869,16 @@ static int set_auth_mode(struct usbnet *usbdev, int wpa_version, int authalg)
869 869
870static int set_priv_filter(struct usbnet *usbdev) 870static int set_priv_filter(struct usbnet *usbdev)
871{ 871{
872 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 872 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
873 __le32 tmp; 873 __le32 tmp;
874 874
875 devdbg(usbdev, "set_priv_filter: wpa_version=0x%x", priv->wpa_version); 875 devdbg(usbdev, "set_priv_filter: wpa_version=0x%x", priv->wpa_version);
876 876
877 if (priv->wpa_version & IW_AUTH_WPA_VERSION_WPA2 || 877 if (priv->wpa_version & IW_AUTH_WPA_VERSION_WPA2 ||
878 priv->wpa_version & IW_AUTH_WPA_VERSION_WPA) 878 priv->wpa_version & IW_AUTH_WPA_VERSION_WPA)
879 tmp = cpu_to_le32(ndis_80211_priv_8021x_wep); 879 tmp = cpu_to_le32(NDIS_80211_PRIV_8021X_WEP);
880 else 880 else
881 tmp = cpu_to_le32(ndis_80211_priv_accept_all); 881 tmp = cpu_to_le32(NDIS_80211_PRIV_ACCEPT_ALL);
882 882
883 return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp, 883 return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp,
884 sizeof(tmp)); 884 sizeof(tmp));
@@ -887,7 +887,7 @@ static int set_priv_filter(struct usbnet *usbdev)
887 887
888static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise) 888static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
889{ 889{
890 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 890 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
891 __le32 tmp; 891 __le32 tmp;
892 int encr_mode, ret; 892 int encr_mode, ret;
893 893
@@ -896,18 +896,18 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
896 groupwise); 896 groupwise);
897 897
898 if (pairwise & IW_AUTH_CIPHER_CCMP) 898 if (pairwise & IW_AUTH_CIPHER_CCMP)
899 encr_mode = ndis_80211_encr_ccmp_enabled; 899 encr_mode = NDIS_80211_ENCR_CCMP_ENABLED;
900 else if (pairwise & IW_AUTH_CIPHER_TKIP) 900 else if (pairwise & IW_AUTH_CIPHER_TKIP)
901 encr_mode = ndis_80211_encr_tkip_enabled; 901 encr_mode = NDIS_80211_ENCR_TKIP_ENABLED;
902 else if (pairwise & 902 else if (pairwise &
903 (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104)) 903 (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
904 encr_mode = ndis_80211_encr_wep_enabled; 904 encr_mode = NDIS_80211_ENCR_WEP_ENABLED;
905 else if (groupwise & IW_AUTH_CIPHER_CCMP) 905 else if (groupwise & IW_AUTH_CIPHER_CCMP)
906 encr_mode = ndis_80211_encr_ccmp_enabled; 906 encr_mode = NDIS_80211_ENCR_CCMP_ENABLED;
907 else if (groupwise & IW_AUTH_CIPHER_TKIP) 907 else if (groupwise & IW_AUTH_CIPHER_TKIP)
908 encr_mode = ndis_80211_encr_tkip_enabled; 908 encr_mode = NDIS_80211_ENCR_TKIP_ENABLED;
909 else 909 else
910 encr_mode = ndis_80211_encr_disabled; 910 encr_mode = NDIS_80211_ENCR_DISABLED;
911 911
912 tmp = cpu_to_le32(encr_mode); 912 tmp = cpu_to_le32(encr_mode);
913 ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp, 913 ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp,
@@ -925,7 +925,7 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
925 925
926static int set_assoc_params(struct usbnet *usbdev) 926static int set_assoc_params(struct usbnet *usbdev)
927{ 927{
928 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 928 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
929 929
930 set_auth_mode(usbdev, priv->wpa_version, priv->wpa_authalg); 930 set_auth_mode(usbdev, priv->wpa_version, priv->wpa_authalg);
931 set_priv_filter(usbdev); 931 set_priv_filter(usbdev);
@@ -937,7 +937,7 @@ static int set_assoc_params(struct usbnet *usbdev)
937 937
938static int set_infra_mode(struct usbnet *usbdev, int mode) 938static int set_infra_mode(struct usbnet *usbdev, int mode)
939{ 939{
940 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 940 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
941 __le32 tmp; 941 __le32 tmp;
942 int ret, i; 942 int ret, i;
943 943
@@ -970,12 +970,12 @@ static int set_infra_mode(struct usbnet *usbdev, int mode)
970 970
971static void set_default_iw_params(struct usbnet *usbdev) 971static void set_default_iw_params(struct usbnet *usbdev)
972{ 972{
973 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 973 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
974 974
975 priv->wpa_keymgmt = 0; 975 priv->wpa_keymgmt = 0;
976 priv->wpa_version = 0; 976 priv->wpa_version = 0;
977 977
978 set_infra_mode(usbdev, ndis_80211_infra_infra); 978 set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA);
979 set_auth_mode(usbdev, IW_AUTH_WPA_VERSION_DISABLED, 979 set_auth_mode(usbdev, IW_AUTH_WPA_VERSION_DISABLED,
980 IW_AUTH_ALG_OPEN_SYSTEM); 980 IW_AUTH_ALG_OPEN_SYSTEM);
981 set_priv_filter(usbdev); 981 set_priv_filter(usbdev);
@@ -996,7 +996,7 @@ static int deauthenticate(struct usbnet *usbdev)
996/* index must be 0 - N, as per NDIS */ 996/* index must be 0 - N, as per NDIS */
997static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index) 997static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index)
998{ 998{
999 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 999 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1000 struct ndis_80211_wep_key ndis_key; 1000 struct ndis_80211_wep_key ndis_key;
1001 int ret; 1001 int ret;
1002 1002
@@ -1011,7 +1011,7 @@ static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index)
1011 memcpy(&ndis_key.material, key, key_len); 1011 memcpy(&ndis_key.material, key, key_len);
1012 1012
1013 if (index == priv->encr_tx_key_index) { 1013 if (index == priv->encr_tx_key_index) {
1014 ndis_key.index |= ndis_80211_addwep_transmit_key; 1014 ndis_key.index |= NDIS_80211_ADDWEP_TRANSMIT_KEY;
1015 ret = set_encr_mode(usbdev, IW_AUTH_CIPHER_WEP104, 1015 ret = set_encr_mode(usbdev, IW_AUTH_CIPHER_WEP104,
1016 IW_AUTH_CIPHER_NONE); 1016 IW_AUTH_CIPHER_NONE);
1017 if (ret) 1017 if (ret)
@@ -1039,7 +1039,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
1039 int index, const struct sockaddr *addr, 1039 int index, const struct sockaddr *addr,
1040 const u8 *rx_seq, int alg, int flags) 1040 const u8 *rx_seq, int alg, int flags)
1041{ 1041{
1042 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1042 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1043 struct ndis_80211_key ndis_key; 1043 struct ndis_80211_key ndis_key;
1044 int ret; 1044 int ret;
1045 1045
@@ -1047,15 +1047,15 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
1047 return -EINVAL; 1047 return -EINVAL;
1048 if (key_len > sizeof(ndis_key.material) || key_len < 0) 1048 if (key_len > sizeof(ndis_key.material) || key_len < 0)
1049 return -EINVAL; 1049 return -EINVAL;
1050 if ((flags & ndis_80211_addkey_set_init_recv_seq) && !rx_seq) 1050 if ((flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) && !rx_seq)
1051 return -EINVAL; 1051 return -EINVAL;
1052 if ((flags & ndis_80211_addkey_pairwise_key) && !addr) 1052 if ((flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) && !addr)
1053 return -EINVAL; 1053 return -EINVAL;
1054 1054
1055 devdbg(usbdev, "add_wpa_key(%i): flags:%i%i%i", index, 1055 devdbg(usbdev, "add_wpa_key(%i): flags:%i%i%i", index,
1056 !!(flags & ndis_80211_addkey_transmit_key), 1056 !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY),
1057 !!(flags & ndis_80211_addkey_pairwise_key), 1057 !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY),
1058 !!(flags & ndis_80211_addkey_set_init_recv_seq)); 1058 !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ));
1059 1059
1060 memset(&ndis_key, 0, sizeof(ndis_key)); 1060 memset(&ndis_key, 0, sizeof(ndis_key));
1061 1061
@@ -1073,15 +1073,15 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
1073 } else 1073 } else
1074 memcpy(ndis_key.material, key, key_len); 1074 memcpy(ndis_key.material, key, key_len);
1075 1075
1076 if (flags & ndis_80211_addkey_set_init_recv_seq) 1076 if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ)
1077 memcpy(ndis_key.rsc, rx_seq, 6); 1077 memcpy(ndis_key.rsc, rx_seq, 6);
1078 1078
1079 if (flags & ndis_80211_addkey_pairwise_key) { 1079 if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) {
1080 /* pairwise key */ 1080 /* pairwise key */
1081 memcpy(ndis_key.bssid, addr->sa_data, ETH_ALEN); 1081 memcpy(ndis_key.bssid, addr->sa_data, ETH_ALEN);
1082 } else { 1082 } else {
1083 /* group key */ 1083 /* group key */
1084 if (priv->infra_mode == ndis_80211_infra_adhoc) 1084 if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
1085 memset(ndis_key.bssid, 0xff, ETH_ALEN); 1085 memset(ndis_key.bssid, 0xff, ETH_ALEN);
1086 else 1086 else
1087 get_bssid(usbdev, ndis_key.bssid); 1087 get_bssid(usbdev, ndis_key.bssid);
@@ -1096,7 +1096,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
1096 priv->encr_key_len[index] = key_len; 1096 priv->encr_key_len[index] = key_len;
1097 priv->encr_key_wpa[index] = 1; 1097 priv->encr_key_wpa[index] = 1;
1098 1098
1099 if (flags & ndis_80211_addkey_transmit_key) 1099 if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY)
1100 priv->encr_tx_key_index = index; 1100 priv->encr_tx_key_index = index;
1101 1101
1102 return 0; 1102 return 0;
@@ -1106,7 +1106,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
1106/* remove_key is for both wep and wpa */ 1106/* remove_key is for both wep and wpa */
1107static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN]) 1107static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN])
1108{ 1108{
1109 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1109 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1110 struct ndis_80211_remove_key remove_key; 1110 struct ndis_80211_remove_key remove_key;
1111 __le32 keyindex; 1111 __le32 keyindex;
1112 int ret; 1112 int ret;
@@ -1128,7 +1128,7 @@ static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN])
1128 /* pairwise key */ 1128 /* pairwise key */
1129 if (memcmp(bssid, ffff_bssid, ETH_ALEN) != 0) 1129 if (memcmp(bssid, ffff_bssid, ETH_ALEN) != 0)
1130 remove_key.index |= 1130 remove_key.index |=
1131 ndis_80211_addkey_pairwise_key; 1131 NDIS_80211_ADDKEY_PAIRWISE_KEY;
1132 memcpy(remove_key.bssid, bssid, 1132 memcpy(remove_key.bssid, bssid,
1133 sizeof(remove_key.bssid)); 1133 sizeof(remove_key.bssid));
1134 } else 1134 } else
@@ -1161,7 +1161,7 @@ static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN])
1161 1161
1162static void set_multicast_list(struct usbnet *usbdev) 1162static void set_multicast_list(struct usbnet *usbdev)
1163{ 1163{
1164 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1164 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1165 struct dev_mc_list *mclist; 1165 struct dev_mc_list *mclist;
1166 __le32 filter; 1166 __le32 filter;
1167 int ret, i, size; 1167 int ret, i, size;
@@ -1238,10 +1238,10 @@ static int rndis_change_virtual_intf(struct wiphy *wiphy, int ifindex,
1238 1238
1239 switch (type) { 1239 switch (type) {
1240 case NL80211_IFTYPE_ADHOC: 1240 case NL80211_IFTYPE_ADHOC:
1241 mode = ndis_80211_infra_adhoc; 1241 mode = NDIS_80211_INFRA_ADHOC;
1242 break; 1242 break;
1243 case NL80211_IFTYPE_STATION: 1243 case NL80211_IFTYPE_STATION:
1244 mode = ndis_80211_infra_infra; 1244 mode = NDIS_80211_INFRA_INFRA;
1245 break; 1245 break;
1246 default: 1246 default:
1247 return -EINVAL; 1247 return -EINVAL;
@@ -1256,7 +1256,7 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
1256 struct cfg80211_scan_request *request) 1256 struct cfg80211_scan_request *request)
1257{ 1257{
1258 struct usbnet *usbdev = netdev_priv(dev); 1258 struct usbnet *usbdev = netdev_priv(dev);
1259 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1259 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1260 int ret; 1260 int ret;
1261 __le32 tmp; 1261 __le32 tmp;
1262 1262
@@ -1286,7 +1286,7 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
1286static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev, 1286static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
1287 struct ndis_80211_bssid_ex *bssid) 1287 struct ndis_80211_bssid_ex *bssid)
1288{ 1288{
1289 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1289 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1290 struct ieee80211_channel *channel; 1290 struct ieee80211_channel *channel;
1291 s32 signal; 1291 s32 signal;
1292 u64 timestamp; 1292 u64 timestamp;
@@ -1371,8 +1371,8 @@ out:
1371 1371
1372static void rndis_get_scan_results(struct work_struct *work) 1372static void rndis_get_scan_results(struct work_struct *work)
1373{ 1373{
1374 struct rndis_wext_private *priv = 1374 struct rndis_wlan_private *priv =
1375 container_of(work, struct rndis_wext_private, scan_work.work); 1375 container_of(work, struct rndis_wlan_private, scan_work.work);
1376 struct usbnet *usbdev = priv->usbdev; 1376 struct usbnet *usbdev = priv->usbdev;
1377 int ret; 1377 int ret;
1378 1378
@@ -1497,7 +1497,7 @@ static int rndis_iw_set_auth(struct net_device *dev,
1497{ 1497{
1498 struct iw_param *p = &wrqu->param; 1498 struct iw_param *p = &wrqu->param;
1499 struct usbnet *usbdev = netdev_priv(dev); 1499 struct usbnet *usbdev = netdev_priv(dev);
1500 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1500 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1501 int ret = -ENOTSUPP; 1501 int ret = -ENOTSUPP;
1502 1502
1503 switch (p->flags & IW_AUTH_INDEX) { 1503 switch (p->flags & IW_AUTH_INDEX) {
@@ -1578,7 +1578,7 @@ static int rndis_iw_get_auth(struct net_device *dev,
1578{ 1578{
1579 struct iw_param *p = &wrqu->param; 1579 struct iw_param *p = &wrqu->param;
1580 struct usbnet *usbdev = netdev_priv(dev); 1580 struct usbnet *usbdev = netdev_priv(dev);
1581 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1581 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1582 1582
1583 switch (p->flags & IW_AUTH_INDEX) { 1583 switch (p->flags & IW_AUTH_INDEX) {
1584 case IW_AUTH_WPA_VERSION: 1584 case IW_AUTH_WPA_VERSION:
@@ -1609,7 +1609,7 @@ static int rndis_iw_set_encode(struct net_device *dev,
1609 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1609 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1610{ 1610{
1611 struct usbnet *usbdev = netdev_priv(dev); 1611 struct usbnet *usbdev = netdev_priv(dev);
1612 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1612 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1613 int ret, index, key_len; 1613 int ret, index, key_len;
1614 u8 *key; 1614 u8 *key;
1615 1615
@@ -1672,7 +1672,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
1672{ 1672{
1673 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1673 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1674 struct usbnet *usbdev = netdev_priv(dev); 1674 struct usbnet *usbdev = netdev_priv(dev);
1675 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1675 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1676 int keyidx, flags; 1676 int keyidx, flags;
1677 1677
1678 keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX; 1678 keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX;
@@ -1698,11 +1698,11 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
1698 1698
1699 flags = 0; 1699 flags = 0;
1700 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) 1700 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
1701 flags |= ndis_80211_addkey_set_init_recv_seq; 1701 flags |= NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ;
1702 if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)) 1702 if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY))
1703 flags |= ndis_80211_addkey_pairwise_key; 1703 flags |= NDIS_80211_ADDKEY_PAIRWISE_KEY;
1704 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) 1704 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
1705 flags |= ndis_80211_addkey_transmit_key; 1705 flags |= NDIS_80211_ADDKEY_TRANSMIT_KEY;
1706 1706
1707 return add_wpa_key(usbdev, ext->key, ext->key_len, keyidx, &ext->addr, 1707 return add_wpa_key(usbdev, ext->key, ext->key_len, keyidx, &ext->addr,
1708 ext->rx_seq, ext->alg, flags); 1708 ext->rx_seq, ext->alg, flags);
@@ -1713,7 +1713,7 @@ static int rndis_iw_set_genie(struct net_device *dev,
1713 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1713 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1714{ 1714{
1715 struct usbnet *usbdev = netdev_priv(dev); 1715 struct usbnet *usbdev = netdev_priv(dev);
1716 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1716 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1717 int ret = 0; 1717 int ret = 0;
1718 1718
1719#ifdef DEBUG 1719#ifdef DEBUG
@@ -1747,7 +1747,7 @@ static int rndis_iw_get_genie(struct net_device *dev,
1747 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1747 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1748{ 1748{
1749 struct usbnet *usbdev = netdev_priv(dev); 1749 struct usbnet *usbdev = netdev_priv(dev);
1750 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1750 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1751 1751
1752 devdbg(usbdev, "SIOCGIWGENIE"); 1752 devdbg(usbdev, "SIOCGIWGENIE");
1753 1753
@@ -1886,7 +1886,7 @@ static int rndis_iw_get_txpower(struct net_device *dev,
1886 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1886 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1887{ 1887{
1888 struct usbnet *usbdev = netdev_priv(dev); 1888 struct usbnet *usbdev = netdev_priv(dev);
1889 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1889 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1890 __le32 tx_power; 1890 __le32 tx_power;
1891 1891
1892 if (priv->radio_on) { 1892 if (priv->radio_on) {
@@ -1912,7 +1912,7 @@ static int rndis_iw_set_txpower(struct net_device *dev,
1912 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1912 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1913{ 1913{
1914 struct usbnet *usbdev = netdev_priv(dev); 1914 struct usbnet *usbdev = netdev_priv(dev);
1915 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1915 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1916 __le32 tx_power = 0; 1916 __le32 tx_power = 0;
1917 1917
1918 if (!wrqu->txpower.disabled) { 1918 if (!wrqu->txpower.disabled) {
@@ -1969,7 +1969,7 @@ static int rndis_iw_set_mlme(struct net_device *dev,
1969 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1969 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1970{ 1970{
1971 struct usbnet *usbdev = netdev_priv(dev); 1971 struct usbnet *usbdev = netdev_priv(dev);
1972 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1972 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1973 struct iw_mlme *mlme = (struct iw_mlme *)extra; 1973 struct iw_mlme *mlme = (struct iw_mlme *)extra;
1974 unsigned char bssid[ETH_ALEN]; 1974 unsigned char bssid[ETH_ALEN];
1975 1975
@@ -1994,7 +1994,7 @@ static int rndis_iw_set_mlme(struct net_device *dev,
1994static struct iw_statistics *rndis_get_wireless_stats(struct net_device *dev) 1994static struct iw_statistics *rndis_get_wireless_stats(struct net_device *dev)
1995{ 1995{
1996 struct usbnet *usbdev = netdev_priv(dev); 1996 struct usbnet *usbdev = netdev_priv(dev);
1997 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1997 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1998 unsigned long flags; 1998 unsigned long flags;
1999 1999
2000 spin_lock_irqsave(&priv->stats_lock, flags); 2000 spin_lock_irqsave(&priv->stats_lock, flags);
@@ -2037,28 +2037,28 @@ static const iw_handler rndis_iw_handler[] =
2037 IW_IOCTL(SIOCSIWMLME) = rndis_iw_set_mlme, 2037 IW_IOCTL(SIOCSIWMLME) = rndis_iw_set_mlme,
2038}; 2038};
2039 2039
2040static const iw_handler rndis_wext_private_handler[] = { 2040static const iw_handler rndis_wlan_private_handler[] = {
2041}; 2041};
2042 2042
2043static const struct iw_priv_args rndis_wext_private_args[] = { 2043static const struct iw_priv_args rndis_wlan_private_args[] = {
2044}; 2044};
2045 2045
2046 2046
2047static const struct iw_handler_def rndis_iw_handlers = { 2047static const struct iw_handler_def rndis_iw_handlers = {
2048 .num_standard = ARRAY_SIZE(rndis_iw_handler), 2048 .num_standard = ARRAY_SIZE(rndis_iw_handler),
2049 .num_private = ARRAY_SIZE(rndis_wext_private_handler), 2049 .num_private = ARRAY_SIZE(rndis_wlan_private_handler),
2050 .num_private_args = ARRAY_SIZE(rndis_wext_private_args), 2050 .num_private_args = ARRAY_SIZE(rndis_wlan_private_args),
2051 .standard = (iw_handler *)rndis_iw_handler, 2051 .standard = (iw_handler *)rndis_iw_handler,
2052 .private = (iw_handler *)rndis_wext_private_handler, 2052 .private = (iw_handler *)rndis_wlan_private_handler,
2053 .private_args = (struct iw_priv_args *)rndis_wext_private_args, 2053 .private_args = (struct iw_priv_args *)rndis_wlan_private_args,
2054 .get_wireless_stats = rndis_get_wireless_stats, 2054 .get_wireless_stats = rndis_get_wireless_stats,
2055}; 2055};
2056 2056
2057 2057
2058static void rndis_wext_worker(struct work_struct *work) 2058static void rndis_wlan_worker(struct work_struct *work)
2059{ 2059{
2060 struct rndis_wext_private *priv = 2060 struct rndis_wlan_private *priv =
2061 container_of(work, struct rndis_wext_private, work); 2061 container_of(work, struct rndis_wlan_private, work);
2062 struct usbnet *usbdev = priv->usbdev; 2062 struct usbnet *usbdev = priv->usbdev;
2063 union iwreq_data evt; 2063 union iwreq_data evt;
2064 unsigned char bssid[ETH_ALEN]; 2064 unsigned char bssid[ETH_ALEN];
@@ -2119,10 +2119,10 @@ get_bssid:
2119 set_multicast_list(usbdev); 2119 set_multicast_list(usbdev);
2120} 2120}
2121 2121
2122static void rndis_wext_set_multicast_list(struct net_device *dev) 2122static void rndis_wlan_set_multicast_list(struct net_device *dev)
2123{ 2123{
2124 struct usbnet *usbdev = netdev_priv(dev); 2124 struct usbnet *usbdev = netdev_priv(dev);
2125 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 2125 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2126 2126
2127 if (test_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending)) 2127 if (test_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending))
2128 return; 2128 return;
@@ -2131,9 +2131,9 @@ static void rndis_wext_set_multicast_list(struct net_device *dev)
2131 queue_work(priv->workqueue, &priv->work); 2131 queue_work(priv->workqueue, &priv->work);
2132} 2132}
2133 2133
2134static void rndis_wext_link_change(struct usbnet *usbdev, int state) 2134static void rndis_wlan_link_change(struct usbnet *usbdev, int state)
2135{ 2135{
2136 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 2136 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2137 2137
2138 /* queue work to avoid recursive calls into rndis_command */ 2138 /* queue work to avoid recursive calls into rndis_command */
2139 set_bit(state ? WORK_LINK_UP : WORK_LINK_DOWN, &priv->work_pending); 2139 set_bit(state ? WORK_LINK_UP : WORK_LINK_DOWN, &priv->work_pending);
@@ -2141,14 +2141,14 @@ static void rndis_wext_link_change(struct usbnet *usbdev, int state)
2141} 2141}
2142 2142
2143 2143
2144static int rndis_wext_get_caps(struct usbnet *usbdev) 2144static int rndis_wlan_get_caps(struct usbnet *usbdev)
2145{ 2145{
2146 struct { 2146 struct {
2147 __le32 num_items; 2147 __le32 num_items;
2148 __le32 items[8]; 2148 __le32 items[8];
2149 } networks_supported; 2149 } networks_supported;
2150 int len, retval, i, n; 2150 int len, retval, i, n;
2151 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 2151 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2152 2152
2153 /* determine supported modes */ 2153 /* determine supported modes */
2154 len = sizeof(networks_supported); 2154 len = sizeof(networks_supported);
@@ -2160,14 +2160,14 @@ static int rndis_wext_get_caps(struct usbnet *usbdev)
2160 n = 8; 2160 n = 8;
2161 for (i = 0; i < n; i++) { 2161 for (i = 0; i < n; i++) {
2162 switch (le32_to_cpu(networks_supported.items[i])) { 2162 switch (le32_to_cpu(networks_supported.items[i])) {
2163 case ndis_80211_type_freq_hop: 2163 case NDIS_80211_TYPE_FREQ_HOP:
2164 case ndis_80211_type_direct_seq: 2164 case NDIS_80211_TYPE_DIRECT_SEQ:
2165 priv->caps |= CAP_MODE_80211B; 2165 priv->caps |= CAP_MODE_80211B;
2166 break; 2166 break;
2167 case ndis_80211_type_ofdm_a: 2167 case NDIS_80211_TYPE_OFDM_A:
2168 priv->caps |= CAP_MODE_80211A; 2168 priv->caps |= CAP_MODE_80211A;
2169 break; 2169 break;
2170 case ndis_80211_type_ofdm_g: 2170 case NDIS_80211_TYPE_OFDM_G:
2171 priv->caps |= CAP_MODE_80211G; 2171 priv->caps |= CAP_MODE_80211G;
2172 break; 2172 break;
2173 } 2173 }
@@ -2181,8 +2181,8 @@ static int rndis_wext_get_caps(struct usbnet *usbdev)
2181#define STATS_UPDATE_JIFFIES (HZ) 2181#define STATS_UPDATE_JIFFIES (HZ)
2182static void rndis_update_wireless_stats(struct work_struct *work) 2182static void rndis_update_wireless_stats(struct work_struct *work)
2183{ 2183{
2184 struct rndis_wext_private *priv = 2184 struct rndis_wlan_private *priv =
2185 container_of(work, struct rndis_wext_private, stats_work.work); 2185 container_of(work, struct rndis_wlan_private, stats_work.work);
2186 struct usbnet *usbdev = priv->usbdev; 2186 struct usbnet *usbdev = priv->usbdev;
2187 struct iw_statistics iwstats; 2187 struct iw_statistics iwstats;
2188 __le32 rssi, tmp; 2188 __le32 rssi, tmp;
@@ -2297,7 +2297,7 @@ static int bcm4320a_early_init(struct usbnet *usbdev)
2297 2297
2298static int bcm4320b_early_init(struct usbnet *usbdev) 2298static int bcm4320b_early_init(struct usbnet *usbdev)
2299{ 2299{
2300 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 2300 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2301 char buf[8]; 2301 char buf[8];
2302 2302
2303 /* Early initialization settings, setting these won't have effect 2303 /* Early initialization settings, setting these won't have effect
@@ -2363,21 +2363,21 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
2363} 2363}
2364 2364
2365/* same as rndis_netdev_ops but with local multicast handler */ 2365/* same as rndis_netdev_ops but with local multicast handler */
2366static const struct net_device_ops rndis_wext_netdev_ops = { 2366static const struct net_device_ops rndis_wlan_netdev_ops = {
2367 .ndo_open = usbnet_open, 2367 .ndo_open = usbnet_open,
2368 .ndo_stop = usbnet_stop, 2368 .ndo_stop = usbnet_stop,
2369 .ndo_start_xmit = usbnet_start_xmit, 2369 .ndo_start_xmit = usbnet_start_xmit,
2370 .ndo_tx_timeout = usbnet_tx_timeout, 2370 .ndo_tx_timeout = usbnet_tx_timeout,
2371 .ndo_set_mac_address = eth_mac_addr, 2371 .ndo_set_mac_address = eth_mac_addr,
2372 .ndo_validate_addr = eth_validate_addr, 2372 .ndo_validate_addr = eth_validate_addr,
2373 .ndo_set_multicast_list = rndis_wext_set_multicast_list, 2373 .ndo_set_multicast_list = rndis_wlan_set_multicast_list,
2374}; 2374};
2375 2375
2376 2376
2377static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf) 2377static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
2378{ 2378{
2379 struct wiphy *wiphy; 2379 struct wiphy *wiphy;
2380 struct rndis_wext_private *priv; 2380 struct rndis_wlan_private *priv;
2381 int retval, len; 2381 int retval, len;
2382 __le32 tmp; 2382 __le32 tmp;
2383 2383
@@ -2385,7 +2385,7 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
2385 * NOTE: We only support a single virtual interface, so wiphy 2385 * NOTE: We only support a single virtual interface, so wiphy
2386 * and wireless_dev are somewhat synonymous for this device. 2386 * and wireless_dev are somewhat synonymous for this device.
2387 */ 2387 */
2388 wiphy = wiphy_new(&rndis_config_ops, sizeof(struct rndis_wext_private)); 2388 wiphy = wiphy_new(&rndis_config_ops, sizeof(struct rndis_wlan_private));
2389 if (!wiphy) 2389 if (!wiphy)
2390 return -ENOMEM; 2390 return -ENOMEM;
2391 2391
@@ -2395,7 +2395,7 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
2395 priv->wdev.iftype = NL80211_IFTYPE_STATION; 2395 priv->wdev.iftype = NL80211_IFTYPE_STATION;
2396 2396
2397 /* These have to be initialized before calling generic_rndis_bind(). 2397 /* These have to be initialized before calling generic_rndis_bind().
2398 * Otherwise we'll be in big trouble in rndis_wext_early_init(). 2398 * Otherwise we'll be in big trouble in rndis_wlan_early_init().
2399 */ 2399 */
2400 usbdev->driver_priv = priv; 2400 usbdev->driver_priv = priv;
2401 usbdev->net->wireless_handlers = &rndis_iw_handlers; 2401 usbdev->net->wireless_handlers = &rndis_iw_handlers;
@@ -2406,7 +2406,7 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
2406 2406
2407 /* because rndis_command() sleeps we need to use workqueue */ 2407 /* because rndis_command() sleeps we need to use workqueue */
2408 priv->workqueue = create_singlethread_workqueue("rndis_wlan"); 2408 priv->workqueue = create_singlethread_workqueue("rndis_wlan");
2409 INIT_WORK(&priv->work, rndis_wext_worker); 2409 INIT_WORK(&priv->work, rndis_wlan_worker);
2410 INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats); 2410 INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats);
2411 INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results); 2411 INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results);
2412 2412
@@ -2420,9 +2420,9 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
2420 * picks up rssi to closest station instead of to access point). 2420 * picks up rssi to closest station instead of to access point).
2421 * 2421 *
2422 * rndis_host wants to avoid all OID as much as possible 2422 * rndis_host wants to avoid all OID as much as possible
2423 * so do promisc/multicast handling in rndis_wext. 2423 * so do promisc/multicast handling in rndis_wlan.
2424 */ 2424 */
2425 usbdev->net->netdev_ops = &rndis_wext_netdev_ops; 2425 usbdev->net->netdev_ops = &rndis_wlan_netdev_ops;
2426 2426
2427 tmp = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST; 2427 tmp = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST;
2428 retval = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &tmp, 2428 retval = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &tmp,
@@ -2455,7 +2455,7 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
2455 wiphy->max_scan_ssids = 1; 2455 wiphy->max_scan_ssids = 1;
2456 2456
2457 /* TODO: fill-out band information based on priv->caps */ 2457 /* TODO: fill-out band information based on priv->caps */
2458 rndis_wext_get_caps(usbdev); 2458 rndis_wlan_get_caps(usbdev);
2459 2459
2460 memcpy(priv->channels, rndis_channels, sizeof(rndis_channels)); 2460 memcpy(priv->channels, rndis_channels, sizeof(rndis_channels));
2461 memcpy(priv->rates, rndis_rates, sizeof(rndis_rates)); 2461 memcpy(priv->rates, rndis_rates, sizeof(rndis_rates));
@@ -2497,9 +2497,9 @@ fail:
2497} 2497}
2498 2498
2499 2499
2500static void rndis_wext_unbind(struct usbnet *usbdev, struct usb_interface *intf) 2500static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf)
2501{ 2501{
2502 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 2502 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2503 2503
2504 /* turn radio off */ 2504 /* turn radio off */
2505 disassociate(usbdev, 0); 2505 disassociate(usbdev, 0);
@@ -2520,7 +2520,7 @@ static void rndis_wext_unbind(struct usbnet *usbdev, struct usb_interface *intf)
2520} 2520}
2521 2521
2522 2522
2523static int rndis_wext_reset(struct usbnet *usbdev) 2523static int rndis_wlan_reset(struct usbnet *usbdev)
2524{ 2524{
2525 return deauthenticate(usbdev); 2525 return deauthenticate(usbdev);
2526} 2526}
@@ -2529,40 +2529,40 @@ static int rndis_wext_reset(struct usbnet *usbdev)
2529static const struct driver_info bcm4320b_info = { 2529static const struct driver_info bcm4320b_info = {
2530 .description = "Wireless RNDIS device, BCM4320b based", 2530 .description = "Wireless RNDIS device, BCM4320b based",
2531 .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT, 2531 .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT,
2532 .bind = rndis_wext_bind, 2532 .bind = rndis_wlan_bind,
2533 .unbind = rndis_wext_unbind, 2533 .unbind = rndis_wlan_unbind,
2534 .status = rndis_status, 2534 .status = rndis_status,
2535 .rx_fixup = rndis_rx_fixup, 2535 .rx_fixup = rndis_rx_fixup,
2536 .tx_fixup = rndis_tx_fixup, 2536 .tx_fixup = rndis_tx_fixup,
2537 .reset = rndis_wext_reset, 2537 .reset = rndis_wlan_reset,
2538 .early_init = bcm4320b_early_init, 2538 .early_init = bcm4320b_early_init,
2539 .link_change = rndis_wext_link_change, 2539 .link_change = rndis_wlan_link_change,
2540}; 2540};
2541 2541
2542static const struct driver_info bcm4320a_info = { 2542static const struct driver_info bcm4320a_info = {
2543 .description = "Wireless RNDIS device, BCM4320a based", 2543 .description = "Wireless RNDIS device, BCM4320a based",
2544 .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT, 2544 .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT,
2545 .bind = rndis_wext_bind, 2545 .bind = rndis_wlan_bind,
2546 .unbind = rndis_wext_unbind, 2546 .unbind = rndis_wlan_unbind,
2547 .status = rndis_status, 2547 .status = rndis_status,
2548 .rx_fixup = rndis_rx_fixup, 2548 .rx_fixup = rndis_rx_fixup,
2549 .tx_fixup = rndis_tx_fixup, 2549 .tx_fixup = rndis_tx_fixup,
2550 .reset = rndis_wext_reset, 2550 .reset = rndis_wlan_reset,
2551 .early_init = bcm4320a_early_init, 2551 .early_init = bcm4320a_early_init,
2552 .link_change = rndis_wext_link_change, 2552 .link_change = rndis_wlan_link_change,
2553}; 2553};
2554 2554
2555static const struct driver_info rndis_wext_info = { 2555static const struct driver_info rndis_wlan_info = {
2556 .description = "Wireless RNDIS device", 2556 .description = "Wireless RNDIS device",
2557 .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT, 2557 .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT,
2558 .bind = rndis_wext_bind, 2558 .bind = rndis_wlan_bind,
2559 .unbind = rndis_wext_unbind, 2559 .unbind = rndis_wlan_unbind,
2560 .status = rndis_status, 2560 .status = rndis_status,
2561 .rx_fixup = rndis_rx_fixup, 2561 .rx_fixup = rndis_rx_fixup,
2562 .tx_fixup = rndis_tx_fixup, 2562 .tx_fixup = rndis_tx_fixup,
2563 .reset = rndis_wext_reset, 2563 .reset = rndis_wlan_reset,
2564 .early_init = bcm4320a_early_init, 2564 .early_init = bcm4320a_early_init,
2565 .link_change = rndis_wext_link_change, 2565 .link_change = rndis_wlan_link_change,
2566}; 2566};
2567 2567
2568/*-------------------------------------------------------------------------*/ 2568/*-------------------------------------------------------------------------*/
@@ -2672,11 +2672,11 @@ static const struct usb_device_id products [] = {
2672{ 2672{
2673 /* RNDIS is MSFT's un-official variant of CDC ACM */ 2673 /* RNDIS is MSFT's un-official variant of CDC ACM */
2674 USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), 2674 USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
2675 .driver_info = (unsigned long) &rndis_wext_info, 2675 .driver_info = (unsigned long) &rndis_wlan_info,
2676}, { 2676}, {
2677 /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ 2677 /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
2678 USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), 2678 USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
2679 .driver_info = (unsigned long) &rndis_wext_info, 2679 .driver_info = (unsigned long) &rndis_wlan_info,
2680}, 2680},
2681 { }, // END 2681 { }, // END
2682}; 2682};
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 0197531bd88c..435f945fe64d 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -520,7 +520,7 @@ static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
520 if (state == STATE_SLEEP) { 520 if (state == STATE_SLEEP) {
521 rt2x00pci_register_read(rt2x00dev, CSR20, &reg); 521 rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
522 rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN, 522 rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN,
523 (libconf->conf->beacon_int - 20) * 16); 523 (rt2x00dev->beacon_int - 20) * 16);
524 rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP, 524 rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP,
525 libconf->conf->listen_interval - 1); 525 libconf->conf->listen_interval - 1);
526 526
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index f95cb646f85a..08b30d01e67d 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -569,7 +569,7 @@ static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
569 if (state == STATE_SLEEP) { 569 if (state == STATE_SLEEP) {
570 rt2x00pci_register_read(rt2x00dev, CSR20, &reg); 570 rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
571 rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN, 571 rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN,
572 (libconf->conf->beacon_int - 20) * 16); 572 (rt2x00dev->beacon_int - 20) * 16);
573 rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP, 573 rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP,
574 libconf->conf->listen_interval - 1); 574 libconf->conf->listen_interval - 1);
575 575
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 69f966f1ce54..66daf68ff0ee 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -647,7 +647,7 @@ static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev,
647 if (state == STATE_SLEEP) { 647 if (state == STATE_SLEEP) {
648 rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg); 648 rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg);
649 rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON, 649 rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON,
650 libconf->conf->beacon_int - 20); 650 rt2x00dev->beacon_int - 20);
651 rt2x00_set_field16(&reg, MAC_CSR18_BEACONS_BEFORE_WAKEUP, 651 rt2x00_set_field16(&reg, MAC_CSR18_BEACONS_BEFORE_WAKEUP,
652 libconf->conf->listen_interval - 1); 652 libconf->conf->listen_interval - 1);
653 653
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 142ad34fdc49..37561667925b 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -2927,12 +2927,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
2927 { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 2927 { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
2928 { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) }, 2928 { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
2929 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) }, 2929 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
2930 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
2931 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
2932 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
2930 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, 2933 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
2931 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, 2934 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
2932 /* Edimax */ 2935 /* Edimax */
2933 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) }, 2936 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
2934 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) }, 2937 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
2935 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) }, 2938 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
2939 /* Encore */
2940 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
2936 /* EnGenius */ 2941 /* EnGenius */
2937 { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) }, 2942 { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
2938 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) }, 2943 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2951,6 +2956,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
2951 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) }, 2956 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
2952 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) }, 2957 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
2953 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) }, 2958 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
2959 /* I-O DATA */
2960 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
2954 /* LevelOne */ 2961 /* LevelOne */
2955 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) }, 2962 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
2956 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) }, 2963 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2970,6 +2977,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
2970 /* Pegatron */ 2977 /* Pegatron */
2971 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) }, 2978 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
2972 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) }, 2979 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
2980 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
2973 /* Philips */ 2981 /* Philips */
2974 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) }, 2982 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
2975 /* Planex */ 2983 /* Planex */
@@ -2981,6 +2989,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
2981 /* Quanta */ 2989 /* Quanta */
2982 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) }, 2990 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
2983 /* Ralink */ 2991 /* Ralink */
2992 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
2984 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) }, 2993 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
2985 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, 2994 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
2986 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 2995 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -3005,6 +3014,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
3005 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) }, 3014 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
3006 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 3015 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
3007 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) }, 3016 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
3017 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
3008 /* SMC */ 3018 /* SMC */
3009 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) }, 3019 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
3010 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) }, 3020 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -3029,6 +3039,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
3029 /* Zinwell */ 3039 /* Zinwell */
3030 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) }, 3040 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
3031 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) }, 3041 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
3042 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
3043 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
3032 /* Zyxel */ 3044 /* Zyxel */
3033 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) }, 3045 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
3034 { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, 3046 { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 2b64a6198698..a498dde024e1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -802,6 +802,11 @@ struct rt2x00_dev {
802 u8 calibration[2]; 802 u8 calibration[2];
803 803
804 /* 804 /*
805 * Beacon interval.
806 */
807 u16 beacon_int;
808
809 /*
805 * Low level statistics which will have 810 * Low level statistics which will have
806 * to be kept up to date while device is running. 811 * to be kept up to date while device is running.
807 */ 812 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index c5bbf0b6e207..3e019a12df2e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -108,6 +108,9 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
108 erp.basic_rates = bss_conf->basic_rates; 108 erp.basic_rates = bss_conf->basic_rates;
109 erp.beacon_int = bss_conf->beacon_int; 109 erp.beacon_int = bss_conf->beacon_int;
110 110
111 /* Update global beacon interval time, this is needed for PS support */
112 rt2x00dev->beacon_int = bss_conf->beacon_int;
113
111 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp); 114 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp);
112} 115}
113 116
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index a8bf5c432858..49b29ff90c47 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -956,7 +956,7 @@ static void rt61pci_config_ps(struct rt2x00_dev *rt2x00dev,
956 if (state == STATE_SLEEP) { 956 if (state == STATE_SLEEP) {
957 rt2x00pci_register_read(rt2x00dev, MAC_CSR11, &reg); 957 rt2x00pci_register_read(rt2x00dev, MAC_CSR11, &reg);
958 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 958 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN,
959 libconf->conf->beacon_int - 10); 959 rt2x00dev->beacon_int - 10);
960 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 960 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP,
961 libconf->conf->listen_interval - 1); 961 libconf->conf->listen_interval - 1);
962 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5); 962 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 211a3d6bc054..c18848836f2d 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -852,7 +852,7 @@ static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev,
852 if (state == STATE_SLEEP) { 852 if (state == STATE_SLEEP) {
853 rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg); 853 rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg);
854 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 854 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN,
855 libconf->conf->beacon_int - 10); 855 rt2x00dev->beacon_int - 10);
856 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 856 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP,
857 libconf->conf->listen_interval - 1); 857 libconf->conf->listen_interval - 1);
858 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5); 858 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 6499ccc34c94..294250e294dd 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -74,6 +74,8 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
74 {USB_DEVICE(0x18E8, 0x6232), .driver_info = DEVICE_RTL8187}, 74 {USB_DEVICE(0x18E8, 0x6232), .driver_info = DEVICE_RTL8187},
75 /* AirLive */ 75 /* AirLive */
76 {USB_DEVICE(0x1b75, 0x8187), .driver_info = DEVICE_RTL8187}, 76 {USB_DEVICE(0x1b75, 0x8187), .driver_info = DEVICE_RTL8187},
77 /* Linksys */
78 {USB_DEVICE(0x1737, 0x0073), .driver_info = DEVICE_RTL8187B},
77 {} 79 {}
78}; 80};
79 81
@@ -321,12 +323,7 @@ static void rtl8187_rx_cb(struct urb *urb)
321 unsigned long f; 323 unsigned long f;
322 324
323 spin_lock_irqsave(&priv->rx_queue.lock, f); 325 spin_lock_irqsave(&priv->rx_queue.lock, f);
324 if (skb->next) 326 __skb_unlink(skb, &priv->rx_queue);
325 __skb_unlink(skb, &priv->rx_queue);
326 else {
327 spin_unlock_irqrestore(&priv->rx_queue.lock, f);
328 return;
329 }
330 spin_unlock_irqrestore(&priv->rx_queue.lock, f); 327 spin_unlock_irqrestore(&priv->rx_queue.lock, f);
331 skb_put(skb, urb->actual_length); 328 skb_put(skb, urb->actual_length);
332 329
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 3ab3eb957189..25d27b64f528 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -2869,10 +2869,6 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
2869 if (lp->tx_n_in_use == (NTXBLOCKS - 1)) 2869 if (lp->tx_n_in_use == (NTXBLOCKS - 1))
2870 return 1; 2870 return 1;
2871 } 2871 }
2872#ifdef DEBUG_TX_ERROR
2873 if (skb->next)
2874 printk(KERN_INFO "skb has next\n");
2875#endif
2876 2872
2877 /* Do we need some padding? */ 2873 /* Do we need some padding? */
2878 /* Note : on wireless the propagation time is in the order of 1us, 2874 /* Note : on wireless the propagation time is in the order of 1us,
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index e55b33961aeb..1a90d69f18a9 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -3107,11 +3107,6 @@ wavelan_packet_xmit(struct sk_buff * skb,
3107 * so the Tx buffer is now free */ 3107 * so the Tx buffer is now free */
3108 } 3108 }
3109 3109
3110#ifdef DEBUG_TX_ERROR
3111 if (skb->next)
3112 printk(KERN_INFO "skb has next\n");
3113#endif
3114
3115 /* Check if we need some padding */ 3110 /* Check if we need some padding */
3116 /* Note : on wireless the propagation time is in the order of 1us, 3111 /* Note : on wireless the propagation time is in the order of 1us,
3117 * and we don't have the Ethernet specific requirement of beeing 3112 * and we don't have the Ethernet specific requirement of beeing
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 7477ffdcddb4..3c7a5053f1da 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -717,7 +717,7 @@ static void yellowfin_tx_timeout(struct net_device *dev)
717 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) 717 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
718 netif_wake_queue (dev); /* Typical path */ 718 netif_wake_queue (dev); /* Typical path */
719 719
720 dev->trans_start = jiffies; 720 dev->trans_start = jiffies; /* prevent tx timeout */
721 dev->stats.tx_errors++; 721 dev->stats.tx_errors++;
722} 722}
723 723
@@ -876,7 +876,6 @@ static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
876 netif_start_queue (dev); /* Typical path */ 876 netif_start_queue (dev); /* Typical path */
877 else 877 else
878 yp->tx_full = 1; 878 yp->tx_full = 1;
879 dev->trans_start = jiffies;
880 879
881 if (yellowfin_debug > 4) { 880 if (yellowfin_debug > 4) {
882 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n", 881 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",