aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/fore200e.c4
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/isdn/gigaset/asyncdata.c624
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c68
-rw-r--r--drivers/isdn/gigaset/capi.c175
-rw-r--r--drivers/isdn/gigaset/common.c67
-rw-r--r--drivers/isdn/gigaset/ev-layer.c516
-rw-r--r--drivers/isdn/gigaset/gigaset.h81
-rw-r--r--drivers/isdn/gigaset/i4l.c57
-rw-r--r--drivers/isdn/gigaset/interface.c39
-rw-r--r--drivers/isdn/gigaset/isocdata.c113
-rw-r--r--drivers/isdn/gigaset/proc.c2
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c56
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c69
-rw-r--r--drivers/isdn/hardware/eicon/maintidi.c5
-rw-r--r--drivers/isdn/hardware/eicon/message.c18
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c1
-rw-r--r--drivers/isdn/hisax/diva.c2
-rw-r--r--drivers/isdn/hisax/hfc_usb.c4
-rw-r--r--drivers/isdn/hisax/hscx_irq.c2
-rw-r--r--drivers/isdn/hisax/icc.c1
-rw-r--r--drivers/isdn/i4l/isdn_net.h6
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/isdn/mISDN/stack.c2
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/iwmc3200top/Kconfig20
-rw-r--r--drivers/misc/iwmc3200top/Makefile29
-rw-r--r--drivers/misc/iwmc3200top/debugfs.c133
-rw-r--r--drivers/misc/iwmc3200top/debugfs.h58
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c359
-rw-r--r--drivers/misc/iwmc3200top/fw-msg.h113
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h206
-rw-r--r--drivers/misc/iwmc3200top/log.c347
-rw-r--r--drivers/misc/iwmc3200top/log.h158
-rw-r--r--drivers/misc/iwmc3200top/main.c699
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arm/ks8695net.c142
-rw-r--r--drivers/net/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/au1000_eth.c1
-rw-r--r--drivers/net/benet/be_cmds.c70
-rw-r--r--drivers/net/benet/be_cmds.h52
-rw-r--r--drivers/net/benet/be_ethtool.c36
-rw-r--r--drivers/net/benet/be_main.c34
-rw-r--r--drivers/net/bnx2.c4
-rw-r--r--drivers/net/bnx2.h9
-rw-r--r--drivers/net/bnx2x_hsi.h1
-rw-r--r--drivers/net/bnx2x_link.c317
-rw-r--r--drivers/net/bnx2x_link.h3
-rw-r--r--drivers/net/bnx2x_main.c20
-rw-r--r--drivers/net/bnx2x_reg.h23
-rw-r--r--drivers/net/bonding/bond_3ad.c14
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_ipv6.c7
-rw-r--r--drivers/net/bonding/bond_main.c259
-rw-r--r--drivers/net/bonding/bond_sysfs.c39
-rw-r--r--drivers/net/bonding/bonding.h20
-rw-r--r--drivers/net/can/at91_can.c32
-rw-r--r--drivers/net/can/dev.c42
-rw-r--r--drivers/net/can/sja1000/sja1000.c14
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/can/ti_hecc.c17
-rw-r--r--drivers/net/can/usb/ems_usb.c16
-rw-r--r--drivers/net/cnic.c14
-rw-r--r--drivers/net/cxgb3/sge.c1
-rw-r--r--drivers/net/davinci_emac.c32
-rw-r--r--drivers/net/dm9000.h2
-rw-r--r--drivers/net/e100.c46
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h26
-rw-r--r--drivers/net/e1000e/ethtool.c12
-rw-r--r--drivers/net/e1000e/hw.h3
-rw-r--r--drivers/net/e1000e/ich8lan.c628
-rw-r--r--drivers/net/e1000e/phy.c476
-rw-r--r--drivers/net/ethoc.c21
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/fec_mpc52xx.c6
-rw-r--r--drivers/net/fec_mpc52xx_phy.c1
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/fsl_pq_mdio.c60
-rw-r--r--drivers/net/fsl_pq_mdio.h11
-rw-r--r--drivers/net/gianfar.c1492
-rw-r--r--drivers/net/gianfar.h413
-rw-r--r--drivers/net/gianfar_ethtool.c376
-rw-r--r--drivers/net/gianfar_sysfs.c77
-rw-r--r--drivers/net/ibm_newemac/core.c2
-rw-r--r--drivers/net/ifb.c5
-rw-r--r--drivers/net/igb/e1000_82575.h25
-rw-r--r--drivers/net/igb/e1000_defines.h33
-rw-r--r--drivers/net/igb/e1000_mbx.h10
-rw-r--r--drivers/net/igb/e1000_regs.h74
-rw-r--r--drivers/net/igb/igb.h129
-rw-r--r--drivers/net/igb/igb_ethtool.c573
-rw-r--r--drivers/net/igb/igb_main.c2922
-rw-r--r--drivers/net/igbvf/ethtool.c30
-rw-r--r--drivers/net/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c50
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c46
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h15
-rw-r--r--drivers/net/ks8851.c42
-rw-r--r--drivers/net/ks8851.h1
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/myri10ge/myri10ge.c17
-rw-r--r--drivers/net/netxen/netxen_nic.h46
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c4
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c89
-rw-r--r--drivers/net/netxen/netxen_nic_init.c319
-rw-r--r--drivers/net/netxen/netxen_nic_main.c38
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/phy/broadcom.c208
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/pppoe.c148
-rw-r--r--drivers/net/pppox.c3
-rw-r--r--drivers/net/qlge/qlge.h203
-rw-r--r--drivers/net/qlge/qlge_dbg.c180
-rw-r--r--drivers/net/qlge/qlge_ethtool.c221
-rw-r--r--drivers/net/qlge/qlge_main.c503
-rw-r--r--drivers/net/qlge/qlge_mpi.c208
-rw-r--r--drivers/net/r8169.c15
-rw-r--r--drivers/net/sfc/Makefile4
-rw-r--r--drivers/net/sfc/bitfield.h13
-rw-r--r--drivers/net/sfc/boards.c328
-rw-r--r--drivers/net/sfc/boards.h28
-rw-r--r--drivers/net/sfc/efx.c89
-rw-r--r--drivers/net/sfc/efx.h11
-rw-r--r--drivers/net/sfc/ethtool.c8
-rw-r--r--drivers/net/sfc/falcon.c1123
-rw-r--r--drivers/net/sfc/falcon.h4
-rw-r--r--drivers/net/sfc/falcon_boards.c (renamed from drivers/net/sfc/sfe4001.c)367
-rw-r--r--drivers/net/sfc/falcon_gmac.c95
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h1333
-rw-r--r--drivers/net/sfc/falcon_io.h258
-rw-r--r--drivers/net/sfc/falcon_xmac.c178
-rw-r--r--drivers/net/sfc/gmii.h60
-rw-r--r--drivers/net/sfc/io.h256
-rw-r--r--drivers/net/sfc/mdio_10g.c121
-rw-r--r--drivers/net/sfc/mdio_10g.h1
-rw-r--r--drivers/net/sfc/net_driver.h56
-rw-r--r--drivers/net/sfc/phy.h6
-rw-r--r--drivers/net/sfc/qt202x_phy.c (renamed from drivers/net/sfc/xfp_phy.c)80
-rw-r--r--drivers/net/sfc/regs.h3180
-rw-r--r--drivers/net/sfc/rx.c40
-rw-r--r--drivers/net/sfc/selftest.c5
-rw-r--r--drivers/net/sfc/tenxpress.c4
-rw-r--r--drivers/net/sfc/tx.c83
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/sky2.c128
-rw-r--r--drivers/net/sky2.h185
-rw-r--r--drivers/net/stmmac/Kconfig53
-rw-r--r--drivers/net/stmmac/Makefile4
-rw-r--r--drivers/net/stmmac/common.h330
-rw-r--r--drivers/net/stmmac/descs.h163
-rw-r--r--drivers/net/stmmac/gmac.c693
-rw-r--r--drivers/net/stmmac/gmac.h204
-rw-r--r--drivers/net/stmmac/mac100.c517
-rw-r--r--drivers/net/stmmac/mac100.h116
-rw-r--r--drivers/net/stmmac/stmmac.h98
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c395
-rw-r--r--drivers/net/stmmac/stmmac_main.c2204
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c217
-rw-r--r--drivers/net/stmmac/stmmac_timer.c140
-rw-r--r--drivers/net/stmmac/stmmac_timer.h41
-rw-r--r--drivers/net/sungem.c4
-rw-r--r--drivers/net/tc35815.c290
-rw-r--r--drivers/net/tehuti.c2
-rw-r--r--drivers/net/tehuti.h2
-rw-r--r--drivers/net/tg3.c205
-rw-r--r--drivers/net/tg3.h39
-rw-r--r--drivers/net/tokenring/ibmtr.c11
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/cdc_ether.c42
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/kaweth.c11
-rw-r--r--drivers/net/veth.c6
-rw-r--r--drivers/net/via-velocity.c39
-rw-r--r--drivers/net/via-velocity.h2
-rw-r--r--drivers/net/virtio_net.c22
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c38
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h5
-rw-r--r--drivers/net/vxge/vxge-main.c5
-rw-r--r--drivers/net/vxge/vxge-version.h2
-rw-r--r--drivers/net/wimax/i2400m/Kconfig8
-rw-r--r--drivers/net/wireless/airo.c8
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h4
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c11
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c12
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h13
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c185
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h11
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/ath/regd.h8
-rw-r--r--drivers/net/wireless/ath/regd_common.h32
-rw-r--r--drivers/net/wireless/b43/b43.h16
-rw-r--r--drivers/net/wireless/b43/dma.c15
-rw-r--r--drivers/net/wireless/b43/leds.h1
-rw-r--r--drivers/net/wireless/b43/main.c5
-rw-r--r--drivers/net/wireless/b43/pio.c79
-rw-r--r--drivers/net/wireless/b43/rfkill.c3
-rw-r--r--drivers/net/wireless/b43/xmit.c7
-rw-r--r--drivers/net/wireless/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c5
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h1
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c237
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c172
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c219
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c172
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c108
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h101
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c154
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h79
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c260
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c676
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c83
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c177
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c266
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c47
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c31
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h70
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h6
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h8
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c46
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c84
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c10
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h5
-rw-r--r--drivers/net/wireless/libertas/11d.c696
-rw-r--r--drivers/net/wireless/libertas/11d.h105
-rw-r--r--drivers/net/wireless/libertas/Makefile1
-rw-r--r--drivers/net/wireless/libertas/assoc.c445
-rw-r--r--drivers/net/wireless/libertas/assoc.h141
-rw-r--r--drivers/net/wireless/libertas/cmd.c482
-rw-r--r--drivers/net/wireless/libertas/cmd.h127
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c104
-rw-r--r--drivers/net/wireless/libertas/debugfs.c27
-rw-r--r--drivers/net/wireless/libertas/decl.h62
-rw-r--r--drivers/net/wireless/libertas/defs.h1
-rw-r--r--drivers/net/wireless/libertas/dev.h424
-rw-r--r--drivers/net/wireless/libertas/host.h960
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h800
-rw-r--r--drivers/net/wireless/libertas/if_spi.c10
-rw-r--r--drivers/net/wireless/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/libertas/main.c202
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c8
-rw-r--r--drivers/net/wireless/libertas/rx.c2
-rw-r--r--drivers/net/wireless/libertas/scan.c250
-rw-r--r--drivers/net/wireless/libertas/scan.h30
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/types.h4
-rw-r--r--drivers/net/wireless/libertas/wext.c144
-rw-r--r--drivers/net/wireless/libertas/wext.h8
-rw-r--r--drivers/net/wireless/orinoco/hw.c33
-rw-r--r--drivers/net/wireless/orinoco/hw.h3
-rw-r--r--drivers/net/wireless/orinoco/main.c33
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h1
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig30
-rw-r--r--drivers/net/wireless/rt2x00/Makefile2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c3323
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h1960
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c159
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.h52
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c7
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h92
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c369
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h586
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c215
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c329
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h115
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h911
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c64
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h30
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c155
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.h53
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c963
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c68
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c86
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c311
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h65
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c76
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h18
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h18
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c202
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h25
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c4
-rw-r--r--drivers/serial/serial_cs.c8
-rw-r--r--drivers/ssb/driver_pcicore.c4
331 files changed, 34490 insertions, 13954 deletions
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f766cc46b4c4..bc53fed89b1e 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2906,8 +2906,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2906 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); 2906 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2907 u32 oc3_index; 2907 u32 oc3_index;
2908 2908
2909 if ((media_index < 0) || (media_index > 4)) 2909 if (media_index > 4)
2910 media_index = 5; 2910 media_index = 5;
2911 2911
2912 switch (fore200e->loop_mode) { 2912 switch (fore200e->loop_mode) {
2913 case ATM_LM_NONE: oc3_index = 0; 2913 case ATM_LM_NONE: oc3_index = 0;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 70667033a568..e90665876c47 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2739,7 +2739,7 @@ he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2739 spin_lock_irqsave(&he_dev->global_lock, flags); 2739 spin_lock_irqsave(&he_dev->global_lock, flags);
2740 switch (reg.type) { 2740 switch (reg.type) {
2741 case HE_REGTYPE_PCI: 2741 case HE_REGTYPE_PCI:
2742 if (reg.addr < 0 || reg.addr >= HE_REGMAP_SIZE) { 2742 if (reg.addr >= HE_REGMAP_SIZE) {
2743 err = -EINVAL; 2743 err = -EINVAL;
2744 break; 2744 break;
2745 } 2745 }
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index a25216bf475e..ccb2a7b7c41d 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -19,7 +19,7 @@
19 19
20/* check if byte must be stuffed/escaped 20/* check if byte must be stuffed/escaped
21 * I'm not sure which data should be encoded. 21 * I'm not sure which data should be encoded.
22 * Therefore I will go the hard way and decode every value 22 * Therefore I will go the hard way and encode every value
23 * less than 0x20, the flag sequence and the control escape char. 23 * less than 0x20, the flag sequence and the control escape char.
24 */ 24 */
25static inline int muststuff(unsigned char c) 25static inline int muststuff(unsigned char c)
@@ -35,288 +35,383 @@ static inline int muststuff(unsigned char c)
35 35
36/* == data input =========================================================== */ 36/* == data input =========================================================== */
37 37
38/* process a block of received bytes in command mode (modem response) 38/* process a block of received bytes in command mode
39 * (mstate != MS_LOCKED && (inputstate & INS_command))
40 * Append received bytes to the command response buffer and forward them
41 * line by line to the response handler. Exit whenever a mode/state change
42 * might have occurred.
39 * Return value: 43 * Return value:
40 * number of processed bytes 44 * number of processed bytes
41 */ 45 */
42static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes, 46static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
43 struct inbuf_t *inbuf)
44{ 47{
48 unsigned char *src = inbuf->data + inbuf->head;
45 struct cardstate *cs = inbuf->cs; 49 struct cardstate *cs = inbuf->cs;
46 unsigned cbytes = cs->cbytes; 50 unsigned cbytes = cs->cbytes;
47 int inputstate = inbuf->inputstate; 51 unsigned procbytes = 0;
48 int startbytes = numbytes; 52 unsigned char c;
49 53
50 for (;;) { 54 while (procbytes < numbytes) {
51 cs->respdata[cbytes] = c; 55 c = *src++;
52 if (c == 10 || c == 13) { 56 procbytes++;
53 gig_dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)", 57
58 switch (c) {
59 case '\n':
60 if (cbytes == 0 && cs->respdata[0] == '\r') {
61 /* collapse LF with preceding CR */
62 cs->respdata[0] = 0;
63 break;
64 }
65 /* --v-- fall through --v-- */
66 case '\r':
67 /* end of message line, pass to response handler */
68 gig_dbg(DEBUG_TRANSCMD, "%s: End of Message (%d Bytes)",
54 __func__, cbytes); 69 __func__, cbytes);
70 if (cbytes >= MAX_RESP_SIZE) {
71 dev_warn(cs->dev, "response too large (%d)\n",
72 cbytes);
73 cbytes = MAX_RESP_SIZE;
74 }
55 cs->cbytes = cbytes; 75 cs->cbytes = cbytes;
56 gigaset_handle_modem_response(cs); /* can change 76 gigaset_handle_modem_response(cs);
57 cs->dle */
58 cbytes = 0; 77 cbytes = 0;
59 78
60 if (cs->dle && 79 /* store EOL byte for CRLF collapsing */
61 !(inputstate & INS_DLE_command)) { 80 cs->respdata[0] = c;
62 inputstate &= ~INS_command;
63 break;
64 }
65 } else {
66 /* advance in line buffer, checking for overflow */
67 if (cbytes < MAX_RESP_SIZE - 1)
68 cbytes++;
69 else
70 dev_warn(cs->dev, "response too large\n");
71 }
72 81
73 if (!numbytes) 82 /* cs->dle may have changed */
74 break; 83 if (cs->dle && !(inbuf->inputstate & INS_DLE_command))
75 c = *src++; 84 inbuf->inputstate &= ~INS_command;
76 --numbytes; 85
77 if (c == DLE_FLAG && 86 /* return for reevaluating state */
78 (cs->dle || inputstate & INS_DLE_command)) { 87 goto exit;
79 inputstate |= INS_DLE_char; 88
80 break; 89 case DLE_FLAG:
90 if (inbuf->inputstate & INS_DLE_char) {
91 /* quoted DLE: clear quote flag */
92 inbuf->inputstate &= ~INS_DLE_char;
93 } else if (cs->dle ||
94 (inbuf->inputstate & INS_DLE_command)) {
95 /* DLE escape, pass up for handling */
96 inbuf->inputstate |= INS_DLE_char;
97 goto exit;
98 }
99 /* quoted or not in DLE mode: treat as regular data */
100 /* --v-- fall through --v-- */
101 default:
102 /* append to line buffer if possible */
103 if (cbytes < MAX_RESP_SIZE)
104 cs->respdata[cbytes] = c;
105 cbytes++;
81 } 106 }
82 } 107 }
83 108exit:
84 cs->cbytes = cbytes; 109 cs->cbytes = cbytes;
85 inbuf->inputstate = inputstate; 110 return procbytes;
86
87 return startbytes - numbytes;
88} 111}
89 112
90/* process a block of received bytes in lock mode (tty i/f) 113/* process a block of received bytes in lock mode
114 * All received bytes are passed unmodified to the tty i/f.
91 * Return value: 115 * Return value:
92 * number of processed bytes 116 * number of processed bytes
93 */ 117 */
94static inline int lock_loop(unsigned char *src, int numbytes, 118static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
95 struct inbuf_t *inbuf)
96{ 119{
97 struct cardstate *cs = inbuf->cs; 120 unsigned char *src = inbuf->data + inbuf->head;
98
99 gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
100 numbytes, src);
101 gigaset_if_receive(cs, src, numbytes);
102 121
122 gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src);
123 gigaset_if_receive(inbuf->cs, src, numbytes);
103 return numbytes; 124 return numbytes;
104} 125}
105 126
127/* set up next receive skb for data mode
128 */
129static void new_rcv_skb(struct bc_state *bcs)
130{
131 struct cardstate *cs = bcs->cs;
132 unsigned short hw_hdr_len = cs->hw_hdr_len;
133
134 if (bcs->ignore) {
135 bcs->skb = NULL;
136 return;
137 }
138
139 bcs->skb = dev_alloc_skb(SBUFSIZE + hw_hdr_len);
140 if (bcs->skb == NULL) {
141 dev_warn(cs->dev, "could not allocate new skb\n");
142 return;
143 }
144 skb_reserve(bcs->skb, hw_hdr_len);
145}
146
106/* process a block of received bytes in HDLC data mode 147/* process a block of received bytes in HDLC data mode
148 * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
107 * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes. 149 * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
108 * When a frame is complete, check the FCS and pass valid frames to the LL. 150 * When a frame is complete, check the FCS and pass valid frames to the LL.
109 * If DLE is encountered, return immediately to let the caller handle it. 151 * If DLE is encountered, return immediately to let the caller handle it.
110 * Return value: 152 * Return value:
111 * number of processed bytes 153 * number of processed bytes
112 * numbytes (all bytes processed) on error --FIXME
113 */ 154 */
114static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes, 155static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
115 struct inbuf_t *inbuf)
116{ 156{
117 struct cardstate *cs = inbuf->cs; 157 struct cardstate *cs = inbuf->cs;
118 struct bc_state *bcs = inbuf->bcs; 158 struct bc_state *bcs = cs->bcs;
119 int inputstate = bcs->inputstate; 159 int inputstate = bcs->inputstate;
120 __u16 fcs = bcs->fcs; 160 __u16 fcs = bcs->fcs;
121 struct sk_buff *skb = bcs->skb; 161 struct sk_buff *skb = bcs->skb;
122 int startbytes = numbytes; 162 unsigned char *src = inbuf->data + inbuf->head;
163 unsigned procbytes = 0;
164 unsigned char c;
123 165
124 if (unlikely(inputstate & INS_byte_stuff)) { 166 if (inputstate & INS_byte_stuff) {
167 if (!numbytes)
168 return 0;
125 inputstate &= ~INS_byte_stuff; 169 inputstate &= ~INS_byte_stuff;
126 goto byte_stuff; 170 goto byte_stuff;
127 } 171 }
128 for (;;) { 172
129 if (unlikely(c == PPP_ESCAPE)) { 173 while (procbytes < numbytes) {
130 if (unlikely(!numbytes)) { 174 c = *src++;
131 inputstate |= INS_byte_stuff; 175 procbytes++;
176 if (c == DLE_FLAG) {
177 if (inputstate & INS_DLE_char) {
178 /* quoted DLE: clear quote flag */
179 inputstate &= ~INS_DLE_char;
180 } else if (cs->dle || (inputstate & INS_DLE_command)) {
181 /* DLE escape, pass up for handling */
182 inputstate |= INS_DLE_char;
132 break; 183 break;
133 } 184 }
134 c = *src++; 185 }
135 --numbytes; 186
136 if (unlikely(c == DLE_FLAG && 187 if (c == PPP_ESCAPE) {
137 (cs->dle || 188 /* byte stuffing indicator: pull in next byte */
138 inbuf->inputstate & INS_DLE_command))) { 189 if (procbytes >= numbytes) {
139 inbuf->inputstate |= INS_DLE_char; 190 /* end of buffer, save for later processing */
140 inputstate |= INS_byte_stuff; 191 inputstate |= INS_byte_stuff;
141 break; 192 break;
142 } 193 }
143byte_stuff: 194byte_stuff:
195 c = *src++;
196 procbytes++;
197 if (c == DLE_FLAG) {
198 if (inputstate & INS_DLE_char) {
199 /* quoted DLE: clear quote flag */
200 inputstate &= ~INS_DLE_char;
201 } else if (cs->dle ||
202 (inputstate & INS_DLE_command)) {
203 /* DLE escape, pass up for handling */
204 inputstate |=
205 INS_DLE_char | INS_byte_stuff;
206 break;
207 }
208 }
144 c ^= PPP_TRANS; 209 c ^= PPP_TRANS;
145 if (unlikely(!muststuff(c)))
146 gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
147 } else if (unlikely(c == PPP_FLAG)) {
148 if (unlikely(inputstate & INS_skip_frame)) {
149#ifdef CONFIG_GIGASET_DEBUG 210#ifdef CONFIG_GIGASET_DEBUG
150 if (!(inputstate & INS_have_data)) { /* 7E 7E */ 211 if (!muststuff(c))
151 ++bcs->emptycount; 212 gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
152 } else
153 gig_dbg(DEBUG_HDLC,
154 "7e----------------------------");
155#endif
156
157 /* end of frame */
158 gigaset_isdn_rcv_err(bcs);
159 dev_kfree_skb(skb);
160 } else if (!(inputstate & INS_have_data)) { /* 7E 7E */
161#ifdef CONFIG_GIGASET_DEBUG
162 ++bcs->emptycount;
163#endif 213#endif
164 break; 214 } else if (c == PPP_FLAG) {
165 } else { 215 /* end of frame: process content if any */
216 if (inputstate & INS_have_data) {
166 gig_dbg(DEBUG_HDLC, 217 gig_dbg(DEBUG_HDLC,
167 "7e----------------------------"); 218 "7e----------------------------");
168 219
169 /* end of frame */ 220 /* check and pass received frame */
170 if (unlikely(fcs != PPP_GOODFCS)) { 221 if (!skb) {
222 /* skipped frame */
223 gigaset_isdn_rcv_err(bcs);
224 } else if (skb->len < 2) {
225 /* frame too short for FCS */
226 dev_warn(cs->dev,
227 "short frame (%d)\n",
228 skb->len);
229 gigaset_isdn_rcv_err(bcs);
230 dev_kfree_skb_any(skb);
231 } else if (fcs != PPP_GOODFCS) {
232 /* frame check error */
171 dev_err(cs->dev, 233 dev_err(cs->dev,
172 "Checksum failed, %u bytes corrupted!\n", 234 "Checksum failed, %u bytes corrupted!\n",
173 skb->len); 235 skb->len);
174 gigaset_isdn_rcv_err(bcs); 236 gigaset_isdn_rcv_err(bcs);
175 dev_kfree_skb(skb); 237 dev_kfree_skb_any(skb);
176 } else if (likely(skb->len > 2)) { 238 } else {
239 /* good frame */
177 __skb_trim(skb, skb->len - 2); 240 __skb_trim(skb, skb->len - 2);
178 gigaset_skb_rcvd(bcs, skb); 241 gigaset_skb_rcvd(bcs, skb);
179 } else {
180 if (skb->len) {
181 dev_err(cs->dev,
182 "invalid packet size (%d)\n", skb->len);
183 gigaset_isdn_rcv_err(bcs);
184 }
185 dev_kfree_skb(skb);
186 } 242 }
187 }
188 243
189 fcs = PPP_INITFCS; 244 /* prepare reception of next frame */
190 inputstate &= ~(INS_have_data | INS_skip_frame); 245 inputstate &= ~INS_have_data;
191 if (unlikely(bcs->ignore)) { 246 new_rcv_skb(bcs);
192 inputstate |= INS_skip_frame; 247 skb = bcs->skb;
193 skb = NULL;
194 } else { 248 } else {
195 skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len); 249 /* empty frame (7E 7E) */
196 if (skb != NULL) { 250#ifdef CONFIG_GIGASET_DEBUG
197 skb_reserve(skb, cs->hw_hdr_len); 251 ++bcs->emptycount;
198 } else { 252#endif
199 dev_warn(cs->dev, 253 if (!skb) {
200 "could not allocate new skb\n"); 254 /* skipped (?) */
201 inputstate |= INS_skip_frame; 255 gigaset_isdn_rcv_err(bcs);
256 new_rcv_skb(bcs);
257 skb = bcs->skb;
202 } 258 }
203 } 259 }
204 260
205 break; 261 fcs = PPP_INITFCS;
206 } else if (unlikely(muststuff(c))) { 262 continue;
263#ifdef CONFIG_GIGASET_DEBUG
264 } else if (muststuff(c)) {
207 /* Should not happen. Possible after ZDLE=1<CR><LF>. */ 265 /* Should not happen. Possible after ZDLE=1<CR><LF>. */
208 gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c); 266 gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
267#endif
209 } 268 }
210 269
211 /* add character */ 270 /* regular data byte, append to skb */
212
213#ifdef CONFIG_GIGASET_DEBUG 271#ifdef CONFIG_GIGASET_DEBUG
214 if (unlikely(!(inputstate & INS_have_data))) { 272 if (!(inputstate & INS_have_data)) {
215 gig_dbg(DEBUG_HDLC, "7e (%d x) ================", 273 gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
216 bcs->emptycount); 274 bcs->emptycount);
217 bcs->emptycount = 0; 275 bcs->emptycount = 0;
218 } 276 }
219#endif 277#endif
220
221 inputstate |= INS_have_data; 278 inputstate |= INS_have_data;
222 279 if (skb) {
223 if (likely(!(inputstate & INS_skip_frame))) { 280 if (skb->len == SBUFSIZE) {
224 if (unlikely(skb->len == SBUFSIZE)) {
225 dev_warn(cs->dev, "received packet too long\n"); 281 dev_warn(cs->dev, "received packet too long\n");
226 dev_kfree_skb_any(skb); 282 dev_kfree_skb_any(skb);
227 skb = NULL; 283 /* skip remainder of packet */
228 inputstate |= INS_skip_frame; 284 bcs->skb = skb = NULL;
229 break; 285 } else {
286 *__skb_put(skb, 1) = c;
287 fcs = crc_ccitt_byte(fcs, c);
230 } 288 }
231 *__skb_put(skb, 1) = c;
232 fcs = crc_ccitt_byte(fcs, c);
233 }
234
235 if (unlikely(!numbytes))
236 break;
237 c = *src++;
238 --numbytes;
239 if (unlikely(c == DLE_FLAG &&
240 (cs->dle ||
241 inbuf->inputstate & INS_DLE_command))) {
242 inbuf->inputstate |= INS_DLE_char;
243 break;
244 } 289 }
245 } 290 }
291
246 bcs->inputstate = inputstate; 292 bcs->inputstate = inputstate;
247 bcs->fcs = fcs; 293 bcs->fcs = fcs;
248 bcs->skb = skb; 294 return procbytes;
249 return startbytes - numbytes;
250} 295}
251 296
252/* process a block of received bytes in transparent data mode 297/* process a block of received bytes in transparent data mode
298 * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC)
253 * Invert bytes, undoing byte stuffing and watching for DLE escapes. 299 * Invert bytes, undoing byte stuffing and watching for DLE escapes.
254 * If DLE is encountered, return immediately to let the caller handle it. 300 * If DLE is encountered, return immediately to let the caller handle it.
255 * Return value: 301 * Return value:
256 * number of processed bytes 302 * number of processed bytes
257 * numbytes (all bytes processed) on error --FIXME
258 */ 303 */
259static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes, 304static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
260 struct inbuf_t *inbuf)
261{ 305{
262 struct cardstate *cs = inbuf->cs; 306 struct cardstate *cs = inbuf->cs;
263 struct bc_state *bcs = inbuf->bcs; 307 struct bc_state *bcs = cs->bcs;
264 int inputstate = bcs->inputstate; 308 int inputstate = bcs->inputstate;
265 struct sk_buff *skb = bcs->skb; 309 struct sk_buff *skb = bcs->skb;
266 int startbytes = numbytes; 310 unsigned char *src = inbuf->data + inbuf->head;
311 unsigned procbytes = 0;
312 unsigned char c;
267 313
268 for (;;) { 314 if (!skb) {
269 /* add character */ 315 /* skip this block */
270 inputstate |= INS_have_data; 316 new_rcv_skb(bcs);
317 return numbytes;
318 }
271 319
272 if (likely(!(inputstate & INS_skip_frame))) { 320 while (procbytes < numbytes && skb->len < SBUFSIZE) {
273 if (unlikely(skb->len == SBUFSIZE)) { 321 c = *src++;
274 //FIXME just pass skb up and allocate a new one 322 procbytes++;
275 dev_warn(cs->dev, "received packet too long\n"); 323
276 dev_kfree_skb_any(skb); 324 if (c == DLE_FLAG) {
277 skb = NULL; 325 if (inputstate & INS_DLE_char) {
278 inputstate |= INS_skip_frame; 326 /* quoted DLE: clear quote flag */
327 inputstate &= ~INS_DLE_char;
328 } else if (cs->dle || (inputstate & INS_DLE_command)) {
329 /* DLE escape, pass up for handling */
330 inputstate |= INS_DLE_char;
279 break; 331 break;
280 } 332 }
281 *__skb_put(skb, 1) = bitrev8(c);
282 } 333 }
283 334
284 if (unlikely(!numbytes)) 335 /* regular data byte: append to current skb */
285 break; 336 inputstate |= INS_have_data;
286 c = *src++; 337 *__skb_put(skb, 1) = bitrev8(c);
287 --numbytes;
288 if (unlikely(c == DLE_FLAG &&
289 (cs->dle ||
290 inbuf->inputstate & INS_DLE_command))) {
291 inbuf->inputstate |= INS_DLE_char;
292 break;
293 }
294 } 338 }
295 339
296 /* pass data up */ 340 /* pass data up */
297 if (likely(inputstate & INS_have_data)) { 341 if (inputstate & INS_have_data) {
298 if (likely(!(inputstate & INS_skip_frame))) { 342 gigaset_skb_rcvd(bcs, skb);
299 gigaset_skb_rcvd(bcs, skb); 343 inputstate &= ~INS_have_data;
300 } 344 new_rcv_skb(bcs);
301 inputstate &= ~(INS_have_data | INS_skip_frame); 345 }
302 if (unlikely(bcs->ignore)) { 346
303 inputstate |= INS_skip_frame; 347 bcs->inputstate = inputstate;
304 skb = NULL; 348 return procbytes;
305 } else { 349}
306 skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len); 350
307 if (skb != NULL) { 351/* process DLE escapes
308 skb_reserve(skb, cs->hw_hdr_len); 352 * Called whenever a DLE sequence might be encountered in the input stream.
309 } else { 353 * Either processes the entire DLE sequence or, if that isn't possible,
310 dev_warn(cs->dev, 354 * notes the fact that an initial DLE has been received in the INS_DLE_char
311 "could not allocate new skb\n"); 355 * inputstate flag and resumes processing of the sequence on the next call.
312 inputstate |= INS_skip_frame; 356 */
357static void handle_dle(struct inbuf_t *inbuf)
358{
359 struct cardstate *cs = inbuf->cs;
360
361 if (cs->mstate == MS_LOCKED)
362 return; /* no DLE processing in lock mode */
363
364 if (!(inbuf->inputstate & INS_DLE_char)) {
365 /* no DLE pending */
366 if (inbuf->data[inbuf->head] == DLE_FLAG &&
367 (cs->dle || inbuf->inputstate & INS_DLE_command)) {
368 /* start of DLE sequence */
369 inbuf->head++;
370 if (inbuf->head == inbuf->tail ||
371 inbuf->head == RBUFSIZE) {
372 /* end of buffer, save for later processing */
373 inbuf->inputstate |= INS_DLE_char;
374 return;
313 } 375 }
376 } else {
377 /* regular data byte */
378 return;
314 } 379 }
315 } 380 }
316 381
317 bcs->inputstate = inputstate; 382 /* consume pending DLE */
318 bcs->skb = skb; 383 inbuf->inputstate &= ~INS_DLE_char;
319 return startbytes - numbytes; 384
385 switch (inbuf->data[inbuf->head]) {
386 case 'X': /* begin of event message */
387 if (inbuf->inputstate & INS_command)
388 dev_notice(cs->dev,
389 "received <DLE>X in command mode\n");
390 inbuf->inputstate |= INS_command | INS_DLE_command;
391 inbuf->head++; /* byte consumed */
392 break;
393 case '.': /* end of event message */
394 if (!(inbuf->inputstate & INS_DLE_command))
395 dev_notice(cs->dev,
396 "received <DLE>. without <DLE>X\n");
397 inbuf->inputstate &= ~INS_DLE_command;
398 /* return to data mode if in DLE mode */
399 if (cs->dle)
400 inbuf->inputstate &= ~INS_command;
401 inbuf->head++; /* byte consumed */
402 break;
403 case DLE_FLAG: /* DLE in data stream */
404 /* mark as quoted */
405 inbuf->inputstate |= INS_DLE_char;
406 if (!(cs->dle || inbuf->inputstate & INS_DLE_command))
407 dev_notice(cs->dev,
408 "received <DLE><DLE> not in DLE mode\n");
409 break; /* quoted byte left in buffer */
410 default:
411 dev_notice(cs->dev, "received <DLE><%02x>\n",
412 inbuf->data[inbuf->head]);
413 /* quoted byte left in buffer */
414 }
320} 415}
321 416
322/** 417/**
@@ -330,94 +425,39 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
330 */ 425 */
331void gigaset_m10x_input(struct inbuf_t *inbuf) 426void gigaset_m10x_input(struct inbuf_t *inbuf)
332{ 427{
333 struct cardstate *cs; 428 struct cardstate *cs = inbuf->cs;
334 unsigned tail, head, numbytes; 429 unsigned numbytes, procbytes;
335 unsigned char *src, c;
336 int procbytes;
337
338 head = inbuf->head;
339 tail = inbuf->tail;
340 gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
341
342 if (head != tail) {
343 cs = inbuf->cs;
344 src = inbuf->data + head;
345 numbytes = (head > tail ? RBUFSIZE : tail) - head;
346 gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
347 430
348 while (numbytes) { 431 gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail);
349 if (cs->mstate == MS_LOCKED) {
350 procbytes = lock_loop(src, numbytes, inbuf);
351 src += procbytes;
352 numbytes -= procbytes;
353 } else {
354 c = *src++;
355 --numbytes;
356 if (c == DLE_FLAG && (cs->dle ||
357 inbuf->inputstate & INS_DLE_command)) {
358 if (!(inbuf->inputstate & INS_DLE_char)) {
359 inbuf->inputstate |= INS_DLE_char;
360 goto nextbyte;
361 }
362 /* <DLE> <DLE> => <DLE> in data stream */
363 inbuf->inputstate &= ~INS_DLE_char;
364 }
365 432
366 if (!(inbuf->inputstate & INS_DLE_char)) { 433 while (inbuf->head != inbuf->tail) {
367 434 /* check for DLE escape */
368 /* FIXME use function pointers? */ 435 handle_dle(inbuf);
369 if (inbuf->inputstate & INS_command)
370 procbytes = cmd_loop(c, src, numbytes, inbuf);
371 else if (inbuf->bcs->proto2 == L2_HDLC)
372 procbytes = hdlc_loop(c, src, numbytes, inbuf);
373 else
374 procbytes = iraw_loop(c, src, numbytes, inbuf);
375
376 src += procbytes;
377 numbytes -= procbytes;
378 } else { /* DLE char */
379 inbuf->inputstate &= ~INS_DLE_char;
380 switch (c) {
381 case 'X': /*begin of command*/
382 if (inbuf->inputstate & INS_command)
383 dev_warn(cs->dev,
384 "received <DLE> 'X' in command mode\n");
385 inbuf->inputstate |=
386 INS_command | INS_DLE_command;
387 break;
388 case '.': /*end of command*/
389 if (!(inbuf->inputstate & INS_command))
390 dev_warn(cs->dev,
391 "received <DLE> '.' in hdlc mode\n");
392 inbuf->inputstate &= cs->dle ?
393 ~(INS_DLE_command|INS_command)
394 : ~INS_DLE_command;
395 break;
396 //case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */
397 default:
398 dev_err(cs->dev,
399 "received 0x10 0x%02x!\n",
400 (int) c);
401 /* FIXME: reset driver?? */
402 }
403 }
404 }
405nextbyte:
406 if (!numbytes) {
407 /* end of buffer, check for wrap */
408 if (head > tail) {
409 head = 0;
410 src = inbuf->data;
411 numbytes = tail;
412 } else {
413 head = tail;
414 break;
415 }
416 }
417 }
418 436
419 gig_dbg(DEBUG_INTR, "setting head to %u", head); 437 /* process a contiguous block of bytes */
420 inbuf->head = head; 438 numbytes = (inbuf->head > inbuf->tail ?
439 RBUFSIZE : inbuf->tail) - inbuf->head;
440 gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
441 /*
442 * numbytes may be 0 if handle_dle() ate the last byte.
443 * This does no harm, *_loop() will just return 0 immediately.
444 */
445
446 if (cs->mstate == MS_LOCKED)
447 procbytes = lock_loop(numbytes, inbuf);
448 else if (inbuf->inputstate & INS_command)
449 procbytes = cmd_loop(numbytes, inbuf);
450 else if (cs->bcs->proto2 == L2_HDLC)
451 procbytes = hdlc_loop(numbytes, inbuf);
452 else
453 procbytes = iraw_loop(numbytes, inbuf);
454 inbuf->head += procbytes;
455
456 /* check for buffer wraparound */
457 if (inbuf->head >= RBUFSIZE)
458 inbuf->head = 0;
459
460 gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head);
421 } 461 }
422} 462}
423EXPORT_SYMBOL_GPL(gigaset_m10x_input); 463EXPORT_SYMBOL_GPL(gigaset_m10x_input);
@@ -430,11 +470,11 @@ EXPORT_SYMBOL_GPL(gigaset_m10x_input);
430 * opening and closing flags, preserving headroom data. 470 * opening and closing flags, preserving headroom data.
431 * parameters: 471 * parameters:
432 * skb skb containing original packet (freed upon return) 472 * skb skb containing original packet (freed upon return)
433 * headroom number of headroom bytes to preserve
434 * Return value: 473 * Return value:
435 * pointer to newly allocated skb containing the result frame 474 * pointer to newly allocated skb containing the result frame
475 * and the original link layer header, NULL on error
436 */ 476 */
437static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int headroom) 477static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
438{ 478{
439 struct sk_buff *hdlc_skb; 479 struct sk_buff *hdlc_skb;
440 __u16 fcs; 480 __u16 fcs;
@@ -456,17 +496,19 @@ static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int headroom)
456 496
457 /* size of new buffer: original size + number of stuffing bytes 497 /* size of new buffer: original size + number of stuffing bytes
458 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes 498 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
459 * + room for acknowledgement header 499 * + room for link layer header
460 */ 500 */
461 hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + headroom); 501 hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
462 if (!hdlc_skb) { 502 if (!hdlc_skb) {
463 dev_kfree_skb(skb); 503 dev_kfree_skb_any(skb);
464 return NULL; 504 return NULL;
465 } 505 }
466 506
467 /* Copy acknowledgement header into new skb */ 507 /* Copy link layer header into new skb */
468 skb_reserve(hdlc_skb, headroom); 508 skb_reset_mac_header(hdlc_skb);
469 memcpy(hdlc_skb->head, skb->head, headroom); 509 skb_reserve(hdlc_skb, skb->mac_len);
510 memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
511 hdlc_skb->mac_len = skb->mac_len;
470 512
471 /* Add flag sequence in front of everything.. */ 513 /* Add flag sequence in front of everything.. */
472 *(skb_put(hdlc_skb, 1)) = PPP_FLAG; 514 *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
@@ -497,7 +539,7 @@ static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int headroom)
497 539
498 *(skb_put(hdlc_skb, 1)) = PPP_FLAG; 540 *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
499 541
500 dev_kfree_skb(skb); 542 dev_kfree_skb_any(skb);
501 return hdlc_skb; 543 return hdlc_skb;
502} 544}
503 545
@@ -506,28 +548,33 @@ static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int headroom)
506 * preserving headroom data. 548 * preserving headroom data.
507 * parameters: 549 * parameters:
508 * skb skb containing original packet (freed upon return) 550 * skb skb containing original packet (freed upon return)
509 * headroom number of headroom bytes to preserve
510 * Return value: 551 * Return value:
511 * pointer to newly allocated skb containing the result frame 552 * pointer to newly allocated skb containing the result frame
553 * and the original link layer header, NULL on error
512 */ 554 */
513static struct sk_buff *iraw_encode(struct sk_buff *skb, int headroom) 555static struct sk_buff *iraw_encode(struct sk_buff *skb)
514{ 556{
515 struct sk_buff *iraw_skb; 557 struct sk_buff *iraw_skb;
516 unsigned char c; 558 unsigned char c;
517 unsigned char *cp; 559 unsigned char *cp;
518 int len; 560 int len;
519 561
520 /* worst case: every byte must be stuffed */ 562 /* size of new buffer (worst case = every byte must be stuffed):
521 iraw_skb = dev_alloc_skb(2*skb->len + headroom); 563 * 2 * original size + room for link layer header
564 */
565 iraw_skb = dev_alloc_skb(2*skb->len + skb->mac_len);
522 if (!iraw_skb) { 566 if (!iraw_skb) {
523 dev_kfree_skb(skb); 567 dev_kfree_skb_any(skb);
524 return NULL; 568 return NULL;
525 } 569 }
526 570
527 /* Copy acknowledgement header into new skb */ 571 /* copy link layer header into new skb */
528 skb_reserve(iraw_skb, headroom); 572 skb_reset_mac_header(iraw_skb);
529 memcpy(iraw_skb->head, skb->head, headroom); 573 skb_reserve(iraw_skb, skb->mac_len);
574 memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
575 iraw_skb->mac_len = skb->mac_len;
530 576
577 /* copy and stuff data */
531 cp = skb->data; 578 cp = skb->data;
532 len = skb->len; 579 len = skb->len;
533 while (len--) { 580 while (len--) {
@@ -536,7 +583,7 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int headroom)
536 *(skb_put(iraw_skb, 1)) = c; 583 *(skb_put(iraw_skb, 1)) = c;
537 *(skb_put(iraw_skb, 1)) = c; 584 *(skb_put(iraw_skb, 1)) = c;
538 } 585 }
539 dev_kfree_skb(skb); 586 dev_kfree_skb_any(skb);
540 return iraw_skb; 587 return iraw_skb;
541} 588}
542 589
@@ -548,7 +595,7 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int headroom)
548 * Called by LL to encode and queue an skb for sending, and start 595 * Called by LL to encode and queue an skb for sending, and start
549 * transmission if necessary. 596 * transmission if necessary.
550 * Once the payload data has been transmitted completely, gigaset_skb_sent() 597 * Once the payload data has been transmitted completely, gigaset_skb_sent()
551 * will be called with the first cs->hw_hdr_len bytes of skb->head preserved. 598 * will be called with the skb's link layer header preserved.
552 * 599 *
553 * Return value: 600 * Return value:
554 * number of bytes accepted for sending (skb->len) if ok, 601 * number of bytes accepted for sending (skb->len) if ok,
@@ -556,24 +603,25 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int headroom)
556 */ 603 */
557int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) 604int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
558{ 605{
606 struct cardstate *cs = bcs->cs;
559 unsigned len = skb->len; 607 unsigned len = skb->len;
560 unsigned long flags; 608 unsigned long flags;
561 609
562 if (bcs->proto2 == L2_HDLC) 610 if (bcs->proto2 == L2_HDLC)
563 skb = HDLC_Encode(skb, bcs->cs->hw_hdr_len); 611 skb = HDLC_Encode(skb);
564 else 612 else
565 skb = iraw_encode(skb, bcs->cs->hw_hdr_len); 613 skb = iraw_encode(skb);
566 if (!skb) { 614 if (!skb) {
567 dev_err(bcs->cs->dev, 615 dev_err(cs->dev,
568 "unable to allocate memory for encoding!\n"); 616 "unable to allocate memory for encoding!\n");
569 return -ENOMEM; 617 return -ENOMEM;
570 } 618 }
571 619
572 skb_queue_tail(&bcs->squeue, skb); 620 skb_queue_tail(&bcs->squeue, skb);
573 spin_lock_irqsave(&bcs->cs->lock, flags); 621 spin_lock_irqsave(&cs->lock, flags);
574 if (bcs->cs->connected) 622 if (cs->connected)
575 tasklet_schedule(&bcs->cs->write_tasklet); 623 tasklet_schedule(&cs->write_tasklet);
576 spin_unlock_irqrestore(&bcs->cs->lock, flags); 624 spin_unlock_irqrestore(&cs->lock, flags);
577 625
578 return len; /* ok so far */ 626 return len; /* ok so far */
579} 627}
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 388e63a8ae94..9fd19db045fb 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -57,7 +57,7 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode");
57#define USB_SX353_PRODUCT_ID 0x0022 57#define USB_SX353_PRODUCT_ID 0x0022
58 58
59/* table of devices that work with this driver */ 59/* table of devices that work with this driver */
60static const struct usb_device_id gigaset_table [] = { 60static const struct usb_device_id gigaset_table[] = {
61 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) }, 61 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) },
62 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) }, 62 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) },
63 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) }, 63 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
@@ -137,7 +137,7 @@ struct bas_cardstate {
137#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */ 137#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
138 138
139 139
140static struct gigaset_driver *driver = NULL; 140static struct gigaset_driver *driver;
141 141
142/* usb specific object needed to register this driver with the usb subsystem */ 142/* usb specific object needed to register this driver with the usb subsystem */
143static struct usb_driver gigaset_usb_driver = { 143static struct usb_driver gigaset_usb_driver = {
@@ -601,11 +601,12 @@ static int atread_submit(struct cardstate *cs, int timeout)
601 ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size); 601 ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
602 usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev, 602 usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
603 usb_rcvctrlpipe(ucs->udev, 0), 603 usb_rcvctrlpipe(ucs->udev, 0),
604 (unsigned char*) & ucs->dr_cmd_in, 604 (unsigned char *) &ucs->dr_cmd_in,
605 ucs->rcvbuf, ucs->rcvbuf_size, 605 ucs->rcvbuf, ucs->rcvbuf_size,
606 read_ctrl_callback, cs->inbuf); 606 read_ctrl_callback, cs->inbuf);
607 607
608 if ((ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC)) != 0) { 608 ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC);
609 if (ret != 0) {
609 update_basstate(ucs, 0, BS_ATRDPEND); 610 update_basstate(ucs, 0, BS_ATRDPEND);
610 dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n", 611 dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
611 get_usb_rcmsg(ret)); 612 get_usb_rcmsg(ret));
@@ -652,13 +653,11 @@ static void read_int_callback(struct urb *urb)
652 return; 653 return;
653 case -ENODEV: /* device removed */ 654 case -ENODEV: /* device removed */
654 case -ESHUTDOWN: /* device shut down */ 655 case -ESHUTDOWN: /* device shut down */
655 //FIXME use this as disconnect indicator?
656 gig_dbg(DEBUG_USBREQ, "%s: device disconnected", __func__); 656 gig_dbg(DEBUG_USBREQ, "%s: device disconnected", __func__);
657 return; 657 return;
658 default: /* severe trouble */ 658 default: /* severe trouble */
659 dev_warn(cs->dev, "interrupt read: %s\n", 659 dev_warn(cs->dev, "interrupt read: %s\n",
660 get_usb_statmsg(status)); 660 get_usb_statmsg(status));
661 //FIXME corrective action? resubmission always ok?
662 goto resubmit; 661 goto resubmit;
663 } 662 }
664 663
@@ -742,7 +741,8 @@ static void read_int_callback(struct urb *urb)
742 kfree(ucs->rcvbuf); 741 kfree(ucs->rcvbuf);
743 ucs->rcvbuf_size = 0; 742 ucs->rcvbuf_size = 0;
744 } 743 }
745 if ((ucs->rcvbuf = kmalloc(l, GFP_ATOMIC)) == NULL) { 744 ucs->rcvbuf = kmalloc(l, GFP_ATOMIC);
745 if (ucs->rcvbuf == NULL) {
746 spin_unlock_irqrestore(&cs->lock, flags); 746 spin_unlock_irqrestore(&cs->lock, flags);
747 dev_err(cs->dev, "out of memory receiving AT data\n"); 747 dev_err(cs->dev, "out of memory receiving AT data\n");
748 error_reset(cs); 748 error_reset(cs);
@@ -750,12 +750,12 @@ static void read_int_callback(struct urb *urb)
750 } 750 }
751 ucs->rcvbuf_size = l; 751 ucs->rcvbuf_size = l;
752 ucs->retry_cmd_in = 0; 752 ucs->retry_cmd_in = 0;
753 if ((rc = atread_submit(cs, BAS_TIMEOUT)) < 0) { 753 rc = atread_submit(cs, BAS_TIMEOUT);
754 if (rc < 0) {
754 kfree(ucs->rcvbuf); 755 kfree(ucs->rcvbuf);
755 ucs->rcvbuf = NULL; 756 ucs->rcvbuf = NULL;
756 ucs->rcvbuf_size = 0; 757 ucs->rcvbuf_size = 0;
757 if (rc != -ENODEV) { 758 if (rc != -ENODEV) {
758 //FIXME corrective action?
759 spin_unlock_irqrestore(&cs->lock, flags); 759 spin_unlock_irqrestore(&cs->lock, flags);
760 error_reset(cs); 760 error_reset(cs);
761 break; 761 break;
@@ -940,7 +940,8 @@ static int starturbs(struct bc_state *bcs)
940 } 940 }
941 941
942 dump_urb(DEBUG_ISO, "Initial isoc read", urb); 942 dump_urb(DEBUG_ISO, "Initial isoc read", urb);
943 if ((rc = usb_submit_urb(urb, GFP_ATOMIC)) != 0) 943 rc = usb_submit_urb(urb, GFP_ATOMIC);
944 if (rc != 0)
944 goto error; 945 goto error;
945 } 946 }
946 947
@@ -1045,7 +1046,8 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
1045 1046
1046 /* compute frame length according to flow control */ 1047 /* compute frame length according to flow control */
1047 ifd->length = BAS_NORMFRAME; 1048 ifd->length = BAS_NORMFRAME;
1048 if ((corrbytes = atomic_read(&ubc->corrbytes)) != 0) { 1049 corrbytes = atomic_read(&ubc->corrbytes);
1050 if (corrbytes != 0) {
1049 gig_dbg(DEBUG_ISO, "%s: corrbytes=%d", 1051 gig_dbg(DEBUG_ISO, "%s: corrbytes=%d",
1050 __func__, corrbytes); 1052 __func__, corrbytes);
1051 if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME) 1053 if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
@@ -1284,7 +1286,8 @@ static void read_iso_tasklet(unsigned long data)
1284 for (;;) { 1286 for (;;) {
1285 /* retrieve URB */ 1287 /* retrieve URB */
1286 spin_lock_irqsave(&ubc->isoinlock, flags); 1288 spin_lock_irqsave(&ubc->isoinlock, flags);
1287 if (!(urb = ubc->isoindone)) { 1289 urb = ubc->isoindone;
1290 if (!urb) {
1288 spin_unlock_irqrestore(&ubc->isoinlock, flags); 1291 spin_unlock_irqrestore(&ubc->isoinlock, flags);
1289 return; 1292 return;
1290 } 1293 }
@@ -1371,7 +1374,7 @@ static void read_iso_tasklet(unsigned long data)
1371 "isochronous read: %d data bytes missing\n", 1374 "isochronous read: %d data bytes missing\n",
1372 totleft); 1375 totleft);
1373 1376
1374 error: 1377error:
1375 /* URB processed, resubmit */ 1378 /* URB processed, resubmit */
1376 for (frame = 0; frame < BAS_NUMFRAMES; frame++) { 1379 for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
1377 urb->iso_frame_desc[frame].status = 0; 1380 urb->iso_frame_desc[frame].status = 0;
@@ -1568,7 +1571,7 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
1568 ucs->dr_ctrl.wLength = 0; 1571 ucs->dr_ctrl.wLength = 0;
1569 usb_fill_control_urb(ucs->urb_ctrl, ucs->udev, 1572 usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
1570 usb_sndctrlpipe(ucs->udev, 0), 1573 usb_sndctrlpipe(ucs->udev, 0),
1571 (unsigned char*) &ucs->dr_ctrl, NULL, 0, 1574 (unsigned char *) &ucs->dr_ctrl, NULL, 0,
1572 write_ctrl_callback, ucs); 1575 write_ctrl_callback, ucs);
1573 ucs->retry_ctrl = 0; 1576 ucs->retry_ctrl = 0;
1574 ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC); 1577 ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
@@ -1621,7 +1624,8 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
1621 return -EHOSTUNREACH; 1624 return -EHOSTUNREACH;
1622 } 1625 }
1623 1626
1624 if ((ret = starturbs(bcs)) < 0) { 1627 ret = starturbs(bcs);
1628 if (ret < 0) {
1625 dev_err(cs->dev, 1629 dev_err(cs->dev,
1626 "could not start isochronous I/O for channel B%d: %s\n", 1630 "could not start isochronous I/O for channel B%d: %s\n",
1627 bcs->channel + 1, 1631 bcs->channel + 1,
@@ -1633,7 +1637,8 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
1633 } 1637 }
1634 1638
1635 req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL; 1639 req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
1636 if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) { 1640 ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
1641 if (ret < 0) {
1637 dev_err(cs->dev, "could not open channel B%d\n", 1642 dev_err(cs->dev, "could not open channel B%d\n",
1638 bcs->channel + 1); 1643 bcs->channel + 1);
1639 stopurbs(bcs->hw.bas); 1644 stopurbs(bcs->hw.bas);
@@ -1677,7 +1682,8 @@ static int gigaset_close_bchannel(struct bc_state *bcs)
1677 1682
1678 /* channel running: tell device to close it */ 1683 /* channel running: tell device to close it */
1679 req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL; 1684 req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
1680 if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) 1685 ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
1686 if (ret < 0)
1681 dev_err(cs->dev, "closing channel B%d failed\n", 1687 dev_err(cs->dev, "closing channel B%d failed\n",
1682 bcs->channel + 1); 1688 bcs->channel + 1);
1683 1689
@@ -1703,10 +1709,12 @@ static void complete_cb(struct cardstate *cs)
1703 gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD, 1709 gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD,
1704 "write_command: sent %u bytes, %u left", 1710 "write_command: sent %u bytes, %u left",
1705 cs->curlen, cs->cmdbytes); 1711 cs->curlen, cs->cmdbytes);
1706 if ((cs->cmdbuf = cb->next) != NULL) { 1712 if (cb->next != NULL) {
1713 cs->cmdbuf = cb->next;
1707 cs->cmdbuf->prev = NULL; 1714 cs->cmdbuf->prev = NULL;
1708 cs->curlen = cs->cmdbuf->len; 1715 cs->curlen = cs->cmdbuf->len;
1709 } else { 1716 } else {
1717 cs->cmdbuf = NULL;
1710 cs->lastcmdbuf = NULL; 1718 cs->lastcmdbuf = NULL;
1711 cs->curlen = 0; 1719 cs->curlen = 0;
1712 } 1720 }
@@ -1833,7 +1841,7 @@ static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
1833 ucs->dr_cmd_out.wLength = cpu_to_le16(len); 1841 ucs->dr_cmd_out.wLength = cpu_to_le16(len);
1834 usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev, 1842 usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
1835 usb_sndctrlpipe(ucs->udev, 0), 1843 usb_sndctrlpipe(ucs->udev, 0),
1836 (unsigned char*) &ucs->dr_cmd_out, buf, len, 1844 (unsigned char *) &ucs->dr_cmd_out, buf, len,
1837 write_command_callback, cs); 1845 write_command_callback, cs);
1838 rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC); 1846 rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
1839 if (unlikely(rc)) { 1847 if (unlikely(rc)) {
@@ -1953,7 +1961,8 @@ static int gigaset_write_cmd(struct cardstate *cs,
1953 1961
1954 if (len > IF_WRITEBUF) 1962 if (len > IF_WRITEBUF)
1955 len = IF_WRITEBUF; 1963 len = IF_WRITEBUF;
1956 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 1964 cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
1965 if (!cb) {
1957 dev_err(cs->dev, "%s: out of memory\n", __func__); 1966 dev_err(cs->dev, "%s: out of memory\n", __func__);
1958 rc = -ENOMEM; 1967 rc = -ENOMEM;
1959 goto notqueued; 1968 goto notqueued;
@@ -2100,7 +2109,8 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2100 } 2109 }
2101 ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL; 2110 ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
2102 ubc->numsub = 0; 2111 ubc->numsub = 0;
2103 if (!(ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL))) { 2112 ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL);
2113 if (!ubc->isooutbuf) {
2104 pr_err("out of memory\n"); 2114 pr_err("out of memory\n");
2105 kfree(ubc); 2115 kfree(ubc);
2106 bcs->hw.bas = NULL; 2116 bcs->hw.bas = NULL;
@@ -2252,7 +2262,8 @@ static int gigaset_probe(struct usb_interface *interface,
2252 gig_dbg(DEBUG_ANY, 2262 gig_dbg(DEBUG_ANY,
2253 "%s: wrong alternate setting %d - trying to switch", 2263 "%s: wrong alternate setting %d - trying to switch",
2254 __func__, hostif->desc.bAlternateSetting); 2264 __func__, hostif->desc.bAlternateSetting);
2255 if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3) < 0) { 2265 if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3)
2266 < 0) {
2256 dev_warn(&udev->dev, "usb_set_interface failed, " 2267 dev_warn(&udev->dev, "usb_set_interface failed, "
2257 "device %d interface %d altsetting %d\n", 2268 "device %d interface %d altsetting %d\n",
2258 udev->devnum, hostif->desc.bInterfaceNumber, 2269 udev->devnum, hostif->desc.bInterfaceNumber,
@@ -2321,14 +2332,16 @@ static int gigaset_probe(struct usb_interface *interface,
2321 (endpoint->bEndpointAddress) & 0x0f), 2332 (endpoint->bEndpointAddress) & 0x0f),
2322 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs, 2333 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
2323 endpoint->bInterval); 2334 endpoint->bInterval);
2324 if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) { 2335 rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
2336 if (rc != 0) {
2325 dev_err(cs->dev, "could not submit interrupt URB: %s\n", 2337 dev_err(cs->dev, "could not submit interrupt URB: %s\n",
2326 get_usb_rcmsg(rc)); 2338 get_usb_rcmsg(rc));
2327 goto error; 2339 goto error;
2328 } 2340 }
2329 2341
2330 /* tell the device that the driver is ready */ 2342 /* tell the device that the driver is ready */
2331 if ((rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0)) != 0) 2343 rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
2344 if (rc != 0)
2332 goto error; 2345 goto error;
2333 2346
2334 /* tell common part that the device is ready */ 2347 /* tell common part that the device is ready */
@@ -2524,9 +2537,10 @@ static int __init bas_gigaset_init(void)
2524 int result; 2537 int result;
2525 2538
2526 /* allocate memory for our driver state and intialize it */ 2539 /* allocate memory for our driver state and intialize it */
2527 if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 2540 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
2528 GIGASET_MODULENAME, GIGASET_DEVNAME, 2541 GIGASET_MODULENAME, GIGASET_DEVNAME,
2529 &gigops, THIS_MODULE)) == NULL) 2542 &gigops, THIS_MODULE);
2543 if (driver == NULL)
2530 goto error; 2544 goto error;
2531 2545
2532 /* register this driver with the USB subsystem */ 2546 /* register this driver with the USB subsystem */
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index c276a925b36f..3f5cd06af104 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -168,14 +168,6 @@ static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
168 msgname, paramname); 168 msgname, paramname);
169} 169}
170 170
171static inline void ignore_cmstruct_param(struct cardstate *cs, _cmstruct param,
172 char *msgname, char *paramname)
173{
174 if (param != CAPI_DEFAULT)
175 dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n",
176 msgname, paramname);
177}
178
179/* 171/*
180 * check for legal hex digit 172 * check for legal hex digit
181 */ 173 */
@@ -370,6 +362,7 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
370 struct cardstate *cs = bcs->cs; 362 struct cardstate *cs = bcs->cs;
371 struct gigaset_capi_ctr *iif = cs->iif; 363 struct gigaset_capi_ctr *iif = cs->iif;
372 struct gigaset_capi_appl *ap = bcs->ap; 364 struct gigaset_capi_appl *ap = bcs->ap;
365 unsigned char *req = skb_mac_header(dskb);
373 struct sk_buff *cskb; 366 struct sk_buff *cskb;
374 u16 flags; 367 u16 flags;
375 368
@@ -388,7 +381,7 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
388 } 381 }
389 382
390 /* ToDo: honor unset "delivery confirmation" bit */ 383 /* ToDo: honor unset "delivery confirmation" bit */
391 flags = CAPIMSG_FLAGS(dskb->head); 384 flags = CAPIMSG_FLAGS(req);
392 385
393 /* build DATA_B3_CONF message */ 386 /* build DATA_B3_CONF message */
394 cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC); 387 cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
@@ -401,11 +394,11 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
401 CAPIMSG_SETAPPID(cskb->data, ap->id); 394 CAPIMSG_SETAPPID(cskb->data, ap->id);
402 CAPIMSG_SETCOMMAND(cskb->data, CAPI_DATA_B3); 395 CAPIMSG_SETCOMMAND(cskb->data, CAPI_DATA_B3);
403 CAPIMSG_SETSUBCOMMAND(cskb->data, CAPI_CONF); 396 CAPIMSG_SETSUBCOMMAND(cskb->data, CAPI_CONF);
404 CAPIMSG_SETMSGID(cskb->data, CAPIMSG_MSGID(dskb->head)); 397 CAPIMSG_SETMSGID(cskb->data, CAPIMSG_MSGID(req));
405 CAPIMSG_SETCONTROLLER(cskb->data, iif->ctr.cnr); 398 CAPIMSG_SETCONTROLLER(cskb->data, iif->ctr.cnr);
406 CAPIMSG_SETPLCI_PART(cskb->data, bcs->channel + 1); 399 CAPIMSG_SETPLCI_PART(cskb->data, bcs->channel + 1);
407 CAPIMSG_SETNCCI_PART(cskb->data, 1); 400 CAPIMSG_SETNCCI_PART(cskb->data, 1);
408 CAPIMSG_SETHANDLE_CONF(cskb->data, CAPIMSG_HANDLE_REQ(dskb->head)); 401 CAPIMSG_SETHANDLE_CONF(cskb->data, CAPIMSG_HANDLE_REQ(req));
409 if (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION) 402 if (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION)
410 CAPIMSG_SETINFO_CONF(cskb->data, 403 CAPIMSG_SETINFO_CONF(cskb->data,
411 CapiFlagsNotSupportedByProtocol); 404 CapiFlagsNotSupportedByProtocol);
@@ -445,7 +438,7 @@ void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
445 /* don't send further B3 messages if disconnected */ 438 /* don't send further B3 messages if disconnected */
446 if (ap->connected < APCONN_ACTIVE) { 439 if (ap->connected < APCONN_ACTIVE) {
447 gig_dbg(DEBUG_LLDATA, "disconnected, discarding data"); 440 gig_dbg(DEBUG_LLDATA, "disconnected, discarding data");
448 dev_kfree_skb(skb); 441 dev_kfree_skb_any(skb);
449 return; 442 return;
450 } 443 }
451 444
@@ -1062,6 +1055,7 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1062 struct sk_buff *skb) 1055 struct sk_buff *skb)
1063{ 1056{
1064 struct cardstate *cs = iif->ctr.driverdata; 1057 struct cardstate *cs = iif->ctr.driverdata;
1058 _cmsg *cmsg = &iif->acmsg;
1065 struct sk_buff *cskb; 1059 struct sk_buff *cskb;
1066 u8 *pparam; 1060 u8 *pparam;
1067 unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN; 1061 unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN;
@@ -1069,14 +1063,14 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1069 static u8 confparam[10]; /* max. 9 octets + length byte */ 1063 static u8 confparam[10]; /* max. 9 octets + length byte */
1070 1064
1071 /* decode message */ 1065 /* decode message */
1072 capi_message2cmsg(&iif->acmsg, skb->data); 1066 capi_message2cmsg(cmsg, skb->data);
1073 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1067 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1074 1068
1075 /* 1069 /*
1076 * Facility Request Parameter is not decoded by capi_message2cmsg() 1070 * Facility Request Parameter is not decoded by capi_message2cmsg()
1077 * encoding depends on Facility Selector 1071 * encoding depends on Facility Selector
1078 */ 1072 */
1079 switch (iif->acmsg.FacilitySelector) { 1073 switch (cmsg->FacilitySelector) {
1080 case CAPI_FACILITY_DTMF: /* ToDo */ 1074 case CAPI_FACILITY_DTMF: /* ToDo */
1081 info = CapiFacilityNotSupported; 1075 info = CapiFacilityNotSupported;
1082 confparam[0] = 2; /* length */ 1076 confparam[0] = 2; /* length */
@@ -1093,7 +1087,7 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1093 1087
1094 case CAPI_FACILITY_SUPPSVC: 1088 case CAPI_FACILITY_SUPPSVC:
1095 /* decode Function parameter */ 1089 /* decode Function parameter */
1096 pparam = iif->acmsg.FacilityRequestParameter; 1090 pparam = cmsg->FacilityRequestParameter;
1097 if (pparam == NULL || *pparam < 2) { 1091 if (pparam == NULL || *pparam < 2) {
1098 dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ", 1092 dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ",
1099 "Facility Request Parameter"); 1093 "Facility Request Parameter");
@@ -1141,18 +1135,18 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1141 } 1135 }
1142 1136
1143 /* send FACILITY_CONF with given Info and confirmation parameter */ 1137 /* send FACILITY_CONF with given Info and confirmation parameter */
1144 capi_cmsg_answer(&iif->acmsg); 1138 capi_cmsg_answer(cmsg);
1145 iif->acmsg.Info = info; 1139 cmsg->Info = info;
1146 iif->acmsg.FacilityConfirmationParameter = confparam; 1140 cmsg->FacilityConfirmationParameter = confparam;
1147 msgsize += confparam[0]; /* length */ 1141 msgsize += confparam[0]; /* length */
1148 cskb = alloc_skb(msgsize, GFP_ATOMIC); 1142 cskb = alloc_skb(msgsize, GFP_ATOMIC);
1149 if (!cskb) { 1143 if (!cskb) {
1150 dev_err(cs->dev, "%s: out of memory\n", __func__); 1144 dev_err(cs->dev, "%s: out of memory\n", __func__);
1151 return; 1145 return;
1152 } 1146 }
1153 capi_cmsg2message(&iif->acmsg, __skb_put(cskb, msgsize)); 1147 capi_cmsg2message(cmsg, __skb_put(cskb, msgsize));
1154 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1148 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1155 capi_ctr_handle_message(&iif->ctr, ap->id, cskb); 1149 capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
1156} 1150}
1157 1151
1158 1152
@@ -1207,8 +1201,8 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
1207 u16 info; 1201 u16 info;
1208 1202
1209 /* decode message */ 1203 /* decode message */
1210 capi_message2cmsg(&iif->acmsg, skb->data); 1204 capi_message2cmsg(cmsg, skb->data);
1211 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1205 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1212 1206
1213 /* get free B channel & construct PLCI */ 1207 /* get free B channel & construct PLCI */
1214 bcs = gigaset_get_free_channel(cs); 1208 bcs = gigaset_get_free_channel(cs);
@@ -1261,7 +1255,7 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
1261 commands[AT_DIAL] = kmalloc(l+3, GFP_KERNEL); 1255 commands[AT_DIAL] = kmalloc(l+3, GFP_KERNEL);
1262 if (!commands[AT_DIAL]) 1256 if (!commands[AT_DIAL])
1263 goto oom; 1257 goto oom;
1264 snprintf(commands[AT_DIAL], l+3, "D%*s\r", l, pp); 1258 snprintf(commands[AT_DIAL], l+3, "D%.*s\r", l, pp);
1265 1259
1266 /* encode parameter: Calling party number */ 1260 /* encode parameter: Calling party number */
1267 pp = cmsg->CallingPartyNumber; 1261 pp = cmsg->CallingPartyNumber;
@@ -1411,8 +1405,16 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
1411 "CONNECT_REQ", "Calling pty subaddr"); 1405 "CONNECT_REQ", "Calling pty subaddr");
1412 ignore_cstruct_param(cs, cmsg->LLC, 1406 ignore_cstruct_param(cs, cmsg->LLC,
1413 "CONNECT_REQ", "LLC"); 1407 "CONNECT_REQ", "LLC");
1414 ignore_cmstruct_param(cs, cmsg->AdditionalInfo, 1408 if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
1415 "CONNECT_REQ", "Additional Info"); 1409 ignore_cstruct_param(cs, cmsg->BChannelinformation,
1410 "CONNECT_REQ", "B Channel Information");
1411 ignore_cstruct_param(cs, cmsg->Keypadfacility,
1412 "CONNECT_REQ", "Keypad Facility");
1413 ignore_cstruct_param(cs, cmsg->Useruserdata,
1414 "CONNECT_REQ", "User-User Data");
1415 ignore_cstruct_param(cs, cmsg->Facilitydataarray,
1416 "CONNECT_REQ", "Facility Data Array");
1417 }
1416 1418
1417 /* encode parameter: B channel to use */ 1419 /* encode parameter: B channel to use */
1418 commands[AT_ISO] = kmalloc(9, GFP_KERNEL); 1420 commands[AT_ISO] = kmalloc(9, GFP_KERNEL);
@@ -1458,9 +1460,9 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif,
1458 int channel; 1460 int channel;
1459 1461
1460 /* decode message */ 1462 /* decode message */
1461 capi_message2cmsg(&iif->acmsg, skb->data); 1463 capi_message2cmsg(cmsg, skb->data);
1462 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1464 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1463 dev_kfree_skb(skb); 1465 dev_kfree_skb_any(skb);
1464 1466
1465 /* extract and check channel number from PLCI */ 1467 /* extract and check channel number from PLCI */
1466 channel = (cmsg->adr.adrPLCI >> 8) & 0xff; 1468 channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
@@ -1524,8 +1526,16 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif,
1524 "CONNECT_RESP", "Connected Subaddress"); 1526 "CONNECT_RESP", "Connected Subaddress");
1525 ignore_cstruct_param(cs, cmsg->LLC, 1527 ignore_cstruct_param(cs, cmsg->LLC,
1526 "CONNECT_RESP", "LLC"); 1528 "CONNECT_RESP", "LLC");
1527 ignore_cmstruct_param(cs, cmsg->AdditionalInfo, 1529 if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
1528 "CONNECT_RESP", "Additional Info"); 1530 ignore_cstruct_param(cs, cmsg->BChannelinformation,
1531 "CONNECT_RESP", "BChannel Information");
1532 ignore_cstruct_param(cs, cmsg->Keypadfacility,
1533 "CONNECT_RESP", "Keypad Facility");
1534 ignore_cstruct_param(cs, cmsg->Useruserdata,
1535 "CONNECT_RESP", "User-User Data");
1536 ignore_cstruct_param(cs, cmsg->Facilitydataarray,
1537 "CONNECT_RESP", "Facility Data Array");
1538 }
1529 1539
1530 /* Accept call */ 1540 /* Accept call */
1531 if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state, 1541 if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
@@ -1587,17 +1597,18 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
1587 struct sk_buff *skb) 1597 struct sk_buff *skb)
1588{ 1598{
1589 struct cardstate *cs = iif->ctr.driverdata; 1599 struct cardstate *cs = iif->ctr.driverdata;
1600 _cmsg *cmsg = &iif->acmsg;
1590 int channel; 1601 int channel;
1591 1602
1592 /* decode message */ 1603 /* decode message */
1593 capi_message2cmsg(&iif->acmsg, skb->data); 1604 capi_message2cmsg(cmsg, skb->data);
1594 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1605 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1595 1606
1596 /* extract and check channel number from PLCI */ 1607 /* extract and check channel number from PLCI */
1597 channel = (iif->acmsg.adr.adrPLCI >> 8) & 0xff; 1608 channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
1598 if (!channel || channel > cs->channels) { 1609 if (!channel || channel > cs->channels) {
1599 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", 1610 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1600 "CONNECT_B3_REQ", "PLCI", iif->acmsg.adr.adrPLCI); 1611 "CONNECT_B3_REQ", "PLCI", cmsg->adr.adrPLCI);
1601 send_conf(iif, ap, skb, CapiIllContrPlciNcci); 1612 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1602 return; 1613 return;
1603 } 1614 }
@@ -1606,14 +1617,12 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
1606 ap->connected = APCONN_ACTIVE; 1617 ap->connected = APCONN_ACTIVE;
1607 1618
1608 /* build NCCI: always 1 (one B3 connection only) */ 1619 /* build NCCI: always 1 (one B3 connection only) */
1609 iif->acmsg.adr.adrNCCI |= 1 << 16; 1620 cmsg->adr.adrNCCI |= 1 << 16;
1610 1621
1611 /* NCPI parameter: not applicable for B3 Transparent */ 1622 /* NCPI parameter: not applicable for B3 Transparent */
1612 ignore_cstruct_param(cs, iif->acmsg.NCPI, 1623 ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
1613 "CONNECT_B3_REQ", "NCPI"); 1624 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
1614 send_conf(iif, ap, skb, 1625 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1615 (iif->acmsg.NCPI && iif->acmsg.NCPI[0]) ?
1616 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1617} 1626}
1618 1627
1619/* 1628/*
@@ -1628,27 +1637,28 @@ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
1628 struct sk_buff *skb) 1637 struct sk_buff *skb)
1629{ 1638{
1630 struct cardstate *cs = iif->ctr.driverdata; 1639 struct cardstate *cs = iif->ctr.driverdata;
1631 struct bc_state *bcs = NULL; 1640 _cmsg *cmsg = &iif->acmsg;
1641 struct bc_state *bcs;
1632 int channel; 1642 int channel;
1633 unsigned int msgsize; 1643 unsigned int msgsize;
1634 u8 command; 1644 u8 command;
1635 1645
1636 /* decode message */ 1646 /* decode message */
1637 capi_message2cmsg(&iif->acmsg, skb->data); 1647 capi_message2cmsg(cmsg, skb->data);
1638 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1648 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1639 1649
1640 /* extract and check channel number and NCCI */ 1650 /* extract and check channel number and NCCI */
1641 channel = (iif->acmsg.adr.adrNCCI >> 8) & 0xff; 1651 channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
1642 if (!channel || channel > cs->channels || 1652 if (!channel || channel > cs->channels ||
1643 ((iif->acmsg.adr.adrNCCI >> 16) & 0xffff) != 1) { 1653 ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
1644 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", 1654 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1645 "CONNECT_B3_RESP", "NCCI", iif->acmsg.adr.adrNCCI); 1655 "CONNECT_B3_RESP", "NCCI", cmsg->adr.adrNCCI);
1646 dev_kfree_skb(skb); 1656 dev_kfree_skb_any(skb);
1647 return; 1657 return;
1648 } 1658 }
1649 bcs = &cs->bcs[channel-1]; 1659 bcs = &cs->bcs[channel-1];
1650 1660
1651 if (iif->acmsg.Reject) { 1661 if (cmsg->Reject) {
1652 /* Reject: clear B3 connect received flag */ 1662 /* Reject: clear B3 connect received flag */
1653 ap->connected = APCONN_SETUP; 1663 ap->connected = APCONN_SETUP;
1654 1664
@@ -1656,7 +1666,7 @@ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
1656 if (!gigaset_add_event(cs, &bcs->at_state, 1666 if (!gigaset_add_event(cs, &bcs->at_state,
1657 EV_HUP, NULL, 0, NULL)) { 1667 EV_HUP, NULL, 0, NULL)) {
1658 dev_err(cs->dev, "%s: out of memory\n", __func__); 1668 dev_err(cs->dev, "%s: out of memory\n", __func__);
1659 dev_kfree_skb(skb); 1669 dev_kfree_skb_any(skb);
1660 return; 1670 return;
1661 } 1671 }
1662 gig_dbg(DEBUG_CMD, "scheduling HUP"); 1672 gig_dbg(DEBUG_CMD, "scheduling HUP");
@@ -1673,11 +1683,11 @@ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
1673 command = CAPI_CONNECT_B3_ACTIVE; 1683 command = CAPI_CONNECT_B3_ACTIVE;
1674 msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN; 1684 msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
1675 } 1685 }
1676 capi_cmsg_header(&iif->acmsg, ap->id, command, CAPI_IND, 1686 capi_cmsg_header(cmsg, ap->id, command, CAPI_IND,
1677 ap->nextMessageNumber++, iif->acmsg.adr.adrNCCI); 1687 ap->nextMessageNumber++, cmsg->adr.adrNCCI);
1678 __skb_trim(skb, msgsize); 1688 __skb_trim(skb, msgsize);
1679 capi_cmsg2message(&iif->acmsg, skb->data); 1689 capi_cmsg2message(cmsg, skb->data);
1680 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1690 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1681 capi_ctr_handle_message(&iif->ctr, ap->id, skb); 1691 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
1682} 1692}
1683 1693
@@ -1691,28 +1701,37 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1691 struct sk_buff *skb) 1701 struct sk_buff *skb)
1692{ 1702{
1693 struct cardstate *cs = iif->ctr.driverdata; 1703 struct cardstate *cs = iif->ctr.driverdata;
1704 _cmsg *cmsg = &iif->acmsg;
1694 struct bc_state *bcs; 1705 struct bc_state *bcs;
1695 _cmsg *b3cmsg; 1706 _cmsg *b3cmsg;
1696 struct sk_buff *b3skb; 1707 struct sk_buff *b3skb;
1697 int channel; 1708 int channel;
1698 1709
1699 /* decode message */ 1710 /* decode message */
1700 capi_message2cmsg(&iif->acmsg, skb->data); 1711 capi_message2cmsg(cmsg, skb->data);
1701 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1712 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1702 1713
1703 /* extract and check channel number from PLCI */ 1714 /* extract and check channel number from PLCI */
1704 channel = (iif->acmsg.adr.adrPLCI >> 8) & 0xff; 1715 channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
1705 if (!channel || channel > cs->channels) { 1716 if (!channel || channel > cs->channels) {
1706 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", 1717 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1707 "DISCONNECT_REQ", "PLCI", iif->acmsg.adr.adrPLCI); 1718 "DISCONNECT_REQ", "PLCI", cmsg->adr.adrPLCI);
1708 send_conf(iif, ap, skb, CapiIllContrPlciNcci); 1719 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1709 return; 1720 return;
1710 } 1721 }
1711 bcs = cs->bcs + channel - 1; 1722 bcs = cs->bcs + channel - 1;
1712 1723
1713 /* ToDo: process parameter: Additional info */ 1724 /* ToDo: process parameter: Additional info */
1714 ignore_cmstruct_param(cs, iif->acmsg.AdditionalInfo, 1725 if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
1715 "DISCONNECT_REQ", "Additional Info"); 1726 ignore_cstruct_param(cs, cmsg->BChannelinformation,
1727 "DISCONNECT_REQ", "B Channel Information");
1728 ignore_cstruct_param(cs, cmsg->Keypadfacility,
1729 "DISCONNECT_REQ", "Keypad Facility");
1730 ignore_cstruct_param(cs, cmsg->Useruserdata,
1731 "DISCONNECT_REQ", "User-User Data");
1732 ignore_cstruct_param(cs, cmsg->Facilitydataarray,
1733 "DISCONNECT_REQ", "Facility Data Array");
1734 }
1716 1735
1717 /* skip if DISCONNECT_IND already sent */ 1736 /* skip if DISCONNECT_IND already sent */
1718 if (!ap->connected) 1737 if (!ap->connected)
@@ -1733,7 +1752,7 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1733 } 1752 }
1734 capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND, 1753 capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
1735 ap->nextMessageNumber++, 1754 ap->nextMessageNumber++,
1736 iif->acmsg.adr.adrPLCI | (1 << 16)); 1755 cmsg->adr.adrPLCI | (1 << 16));
1737 b3cmsg->Reason_B3 = CapiProtocolErrorLayer1; 1756 b3cmsg->Reason_B3 = CapiProtocolErrorLayer1;
1738 b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL); 1757 b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL);
1739 if (b3skb == NULL) { 1758 if (b3skb == NULL) {
@@ -1769,18 +1788,19 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
1769 struct sk_buff *skb) 1788 struct sk_buff *skb)
1770{ 1789{
1771 struct cardstate *cs = iif->ctr.driverdata; 1790 struct cardstate *cs = iif->ctr.driverdata;
1791 _cmsg *cmsg = &iif->acmsg;
1772 int channel; 1792 int channel;
1773 1793
1774 /* decode message */ 1794 /* decode message */
1775 capi_message2cmsg(&iif->acmsg, skb->data); 1795 capi_message2cmsg(cmsg, skb->data);
1776 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1796 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1777 1797
1778 /* extract and check channel number and NCCI */ 1798 /* extract and check channel number and NCCI */
1779 channel = (iif->acmsg.adr.adrNCCI >> 8) & 0xff; 1799 channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
1780 if (!channel || channel > cs->channels || 1800 if (!channel || channel > cs->channels ||
1781 ((iif->acmsg.adr.adrNCCI >> 16) & 0xffff) != 1) { 1801 ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
1782 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", 1802 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1783 "DISCONNECT_B3_REQ", "NCCI", iif->acmsg.adr.adrNCCI); 1803 "DISCONNECT_B3_REQ", "NCCI", cmsg->adr.adrNCCI);
1784 send_conf(iif, ap, skb, CapiIllContrPlciNcci); 1804 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1785 return; 1805 return;
1786 } 1806 }
@@ -1803,11 +1823,10 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
1803 gigaset_schedule_event(cs); 1823 gigaset_schedule_event(cs);
1804 1824
1805 /* NCPI parameter: not applicable for B3 Transparent */ 1825 /* NCPI parameter: not applicable for B3 Transparent */
1806 ignore_cstruct_param(cs, iif->acmsg.NCPI, 1826 ignore_cstruct_param(cs, cmsg->NCPI,
1807 "DISCONNECT_B3_REQ", "NCPI"); 1827 "DISCONNECT_B3_REQ", "NCPI");
1808 send_conf(iif, ap, skb, 1828 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
1809 (iif->acmsg.NCPI && iif->acmsg.NCPI[0]) ? 1829 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1810 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1811} 1830}
1812 1831
1813/* 1832/*
@@ -1862,12 +1881,12 @@ static void do_data_b3_req(struct gigaset_capi_ctr *iif,
1862 return; 1881 return;
1863 } 1882 }
1864 1883
1865 /* 1884 /* pull CAPI message into link layer header */
1866 * pull CAPI message from skb, 1885 skb_reset_mac_header(skb);
1867 * pass payload data to device-specific module 1886 skb->mac_len = msglen;
1868 * CAPI message will be preserved in headroom
1869 */
1870 skb_pull(skb, msglen); 1887 skb_pull(skb, msglen);
1888
1889 /* pass to device-specific module */
1871 if (cs->ops->send_skb(&cs->bcs[channel-1], skb) < 0) { 1890 if (cs->ops->send_skb(&cs->bcs[channel-1], skb) < 0) {
1872 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); 1891 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1873 return; 1892 return;
@@ -1928,7 +1947,7 @@ static void do_nothing(struct gigaset_capi_ctr *iif,
1928 capi_message2cmsg(&iif->acmsg, skb->data); 1947 capi_message2cmsg(&iif->acmsg, skb->data);
1929 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1948 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1930 } 1949 }
1931 dev_kfree_skb(skb); 1950 dev_kfree_skb_any(skb);
1932} 1951}
1933 1952
1934static void do_data_b3_resp(struct gigaset_capi_ctr *iif, 1953static void do_data_b3_resp(struct gigaset_capi_ctr *iif,
@@ -1936,7 +1955,7 @@ static void do_data_b3_resp(struct gigaset_capi_ctr *iif,
1936 struct sk_buff *skb) 1955 struct sk_buff *skb)
1937{ 1956{
1938 dump_rawmsg(DEBUG_LLDATA, __func__, skb->data); 1957 dump_rawmsg(DEBUG_LLDATA, __func__, skb->data);
1939 dev_kfree_skb(skb); 1958 dev_kfree_skb_any(skb);
1940} 1959}
1941 1960
1942/* table of outgoing CAPI message handlers with lookup function */ 1961/* table of outgoing CAPI message handlers with lookup function */
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 1d2ae2e05e0b..c438cfcb7c6d 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -108,7 +108,7 @@ int gigaset_enterconfigmode(struct cardstate *cs)
108{ 108{
109 int i, r; 109 int i, r;
110 110
111 cs->control_state = TIOCM_RTS; //FIXME 111 cs->control_state = TIOCM_RTS;
112 112
113 r = setflags(cs, TIOCM_DTR, 200); 113 r = setflags(cs, TIOCM_DTR, 200);
114 if (r < 0) 114 if (r < 0)
@@ -132,10 +132,10 @@ int gigaset_enterconfigmode(struct cardstate *cs)
132 132
133error: 133error:
134 dev_err(cs->dev, "error %d on setuartbits\n", -r); 134 dev_err(cs->dev, "error %d on setuartbits\n", -r);
135 cs->control_state = TIOCM_RTS|TIOCM_DTR; // FIXME is this a good value? 135 cs->control_state = TIOCM_RTS|TIOCM_DTR;
136 cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR); 136 cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR);
137 137
138 return -1; //r 138 return -1;
139} 139}
140 140
141static int test_timeout(struct at_state_t *at_state) 141static int test_timeout(struct at_state_t *at_state)
@@ -150,10 +150,9 @@ static int test_timeout(struct at_state_t *at_state)
150 } 150 }
151 151
152 if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL, 152 if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
153 at_state->timer_index, NULL)) { 153 at_state->timer_index, NULL))
154 //FIXME what should we do? 154 dev_err(at_state->cs->dev, "%s: out of memory\n",
155 } 155 __func__);
156
157 return 1; 156 return 1;
158} 157}
159 158
@@ -393,16 +392,15 @@ static void gigaset_freebcs(struct bc_state *bcs)
393 int i; 392 int i;
394 393
395 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel); 394 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
396 if (!bcs->cs->ops->freebcshw(bcs)) { 395 if (!bcs->cs->ops->freebcshw(bcs))
397 gig_dbg(DEBUG_INIT, "failed"); 396 gig_dbg(DEBUG_INIT, "failed");
398 }
399 397
400 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); 398 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
401 clear_at_state(&bcs->at_state); 399 clear_at_state(&bcs->at_state);
402 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel); 400 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
401 dev_kfree_skb(bcs->skb);
402 bcs->skb = NULL;
403 403
404 if (bcs->skb)
405 dev_kfree_skb(bcs->skb);
406 for (i = 0; i < AT_NUM; ++i) { 404 for (i = 0; i < AT_NUM; ++i) {
407 kfree(bcs->commands[i]); 405 kfree(bcs->commands[i]);
408 bcs->commands[i] = NULL; 406 bcs->commands[i] = NULL;
@@ -503,8 +501,6 @@ void gigaset_freecs(struct cardstate *cs)
503 gig_dbg(DEBUG_INIT, "clearing hw"); 501 gig_dbg(DEBUG_INIT, "clearing hw");
504 cs->ops->freecshw(cs); 502 cs->ops->freecshw(cs);
505 503
506 //FIXME cmdbuf
507
508 /* fall through */ 504 /* fall through */
509 case 2: /* error in initcshw */ 505 case 2: /* error in initcshw */
510 /* Deregister from LL */ 506 /* Deregister from LL */
@@ -560,16 +556,13 @@ void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
560} 556}
561 557
562 558
563static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs, 559static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
564 struct cardstate *cs, int inputstate)
565/* inbuf->read must be allocated before! */ 560/* inbuf->read must be allocated before! */
566{ 561{
567 inbuf->head = 0; 562 inbuf->head = 0;
568 inbuf->tail = 0; 563 inbuf->tail = 0;
569 inbuf->cs = cs; 564 inbuf->cs = cs;
570 inbuf->bcs = bcs; /*base driver: NULL*/ 565 inbuf->inputstate = INS_command;
571 inbuf->rcvbuf = NULL;
572 inbuf->inputstate = inputstate;
573} 566}
574 567
575/** 568/**
@@ -625,7 +618,7 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
625{ 618{
626 int i; 619 int i;
627 620
628 bcs->tx_skb = NULL; //FIXME -> hw part 621 bcs->tx_skb = NULL;
629 622
630 skb_queue_head_init(&bcs->squeue); 623 skb_queue_head_init(&bcs->squeue);
631 624
@@ -644,16 +637,13 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
644 bcs->fcs = PPP_INITFCS; 637 bcs->fcs = PPP_INITFCS;
645 bcs->inputstate = 0; 638 bcs->inputstate = 0;
646 if (cs->ignoreframes) { 639 if (cs->ignoreframes) {
647 bcs->inputstate |= INS_skip_frame;
648 bcs->skb = NULL; 640 bcs->skb = NULL;
649 } else { 641 } else {
650 bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len); 642 bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
651 if (bcs->skb != NULL) 643 if (bcs->skb != NULL)
652 skb_reserve(bcs->skb, cs->hw_hdr_len); 644 skb_reserve(bcs->skb, cs->hw_hdr_len);
653 else { 645 else
654 pr_err("out of memory\n"); 646 pr_err("out of memory\n");
655 bcs->inputstate |= INS_skip_frame;
656 }
657 } 647 }
658 648
659 bcs->channel = channel; 649 bcs->channel = channel;
@@ -674,8 +664,8 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
674 gig_dbg(DEBUG_INIT, " failed"); 664 gig_dbg(DEBUG_INIT, " failed");
675 665
676 gig_dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel); 666 gig_dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel);
677 if (bcs->skb) 667 dev_kfree_skb(bcs->skb);
678 dev_kfree_skb(bcs->skb); 668 bcs->skb = NULL;
679 669
680 return NULL; 670 return NULL;
681} 671}
@@ -702,12 +692,13 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
702 int onechannel, int ignoreframes, 692 int onechannel, int ignoreframes,
703 int cidmode, const char *modulename) 693 int cidmode, const char *modulename)
704{ 694{
705 struct cardstate *cs = NULL; 695 struct cardstate *cs;
706 unsigned long flags; 696 unsigned long flags;
707 int i; 697 int i;
708 698
709 gig_dbg(DEBUG_INIT, "allocating cs"); 699 gig_dbg(DEBUG_INIT, "allocating cs");
710 if (!(cs = alloc_cs(drv))) { 700 cs = alloc_cs(drv);
701 if (!cs) {
711 pr_err("maximum number of devices exceeded\n"); 702 pr_err("maximum number of devices exceeded\n");
712 return NULL; 703 return NULL;
713 } 704 }
@@ -764,10 +755,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
764 cs->cbytes = 0; 755 cs->cbytes = 0;
765 756
766 gig_dbg(DEBUG_INIT, "setting up inbuf"); 757 gig_dbg(DEBUG_INIT, "setting up inbuf");
767 if (onechannel) { //FIXME distinction necessary? 758 gigaset_inbuf_init(cs->inbuf, cs);
768 gigaset_inbuf_init(cs->inbuf, cs->bcs, cs, INS_command);
769 } else
770 gigaset_inbuf_init(cs->inbuf, NULL, cs, INS_command);
771 759
772 cs->connected = 0; 760 cs->connected = 0;
773 cs->isdn_up = 0; 761 cs->isdn_up = 0;
@@ -854,9 +842,10 @@ void gigaset_bcs_reinit(struct bc_state *bcs)
854 bcs->chstate = 0; 842 bcs->chstate = 0;
855 843
856 bcs->ignore = cs->ignoreframes; 844 bcs->ignore = cs->ignoreframes;
857 if (bcs->ignore) 845 if (bcs->ignore) {
858 bcs->inputstate |= INS_skip_frame; 846 dev_kfree_skb(bcs->skb);
859 847 bcs->skb = NULL;
848 }
860 849
861 cs->ops->reinitbcshw(bcs); 850 cs->ops->reinitbcshw(bcs);
862} 851}
@@ -877,8 +866,6 @@ static void cleanup_cs(struct cardstate *cs)
877 free_strings(&cs->at_state); 866 free_strings(&cs->at_state);
878 gigaset_at_init(&cs->at_state, NULL, cs, 0); 867 gigaset_at_init(&cs->at_state, NULL, cs, 0);
879 868
880 kfree(cs->inbuf->rcvbuf);
881 cs->inbuf->rcvbuf = NULL;
882 cs->inbuf->inputstate = INS_command; 869 cs->inbuf->inputstate = INS_command;
883 cs->inbuf->head = 0; 870 cs->inbuf->head = 0;
884 cs->inbuf->tail = 0; 871 cs->inbuf->tail = 0;
@@ -941,15 +928,13 @@ int gigaset_start(struct cardstate *cs)
941 cs->ops->baud_rate(cs, B115200); 928 cs->ops->baud_rate(cs, B115200);
942 cs->ops->set_line_ctrl(cs, CS8); 929 cs->ops->set_line_ctrl(cs, CS8);
943 cs->control_state = TIOCM_DTR|TIOCM_RTS; 930 cs->control_state = TIOCM_DTR|TIOCM_RTS;
944 } else {
945 //FIXME use some saved values?
946 } 931 }
947 932
948 cs->waiting = 1; 933 cs->waiting = 1;
949 934
950 if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) { 935 if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
951 cs->waiting = 0; 936 cs->waiting = 0;
952 //FIXME what should we do? 937 dev_err(cs->dev, "%s: out of memory\n", __func__);
953 goto error; 938 goto error;
954 } 939 }
955 940
@@ -989,7 +974,7 @@ int gigaset_shutdown(struct cardstate *cs)
989 cs->waiting = 1; 974 cs->waiting = 1;
990 975
991 if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) { 976 if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
992 //FIXME what should we do? 977 dev_err(cs->dev, "%s: out of memory\n", __func__);
993 goto exit; 978 goto exit;
994 } 979 }
995 980
@@ -1020,7 +1005,7 @@ void gigaset_stop(struct cardstate *cs)
1020 cs->waiting = 1; 1005 cs->waiting = 1;
1021 1006
1022 if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) { 1007 if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) {
1023 //FIXME what should we do? 1008 dev_err(cs->dev, "%s: out of memory\n", __func__);
1024 goto exit; 1009 goto exit;
1025 } 1010 }
1026 1011
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 369927f90729..ddeb0456d202 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -40,8 +40,8 @@
40 40
41/* Possible ASCII responses */ 41/* Possible ASCII responses */
42#define RSP_OK 0 42#define RSP_OK 0
43//#define RSP_BUSY 1 43#define RSP_BUSY 1
44//#define RSP_CONNECT 2 44#define RSP_CONNECT 2
45#define RSP_ZGCI 3 45#define RSP_ZGCI 3
46#define RSP_RING 4 46#define RSP_RING 4
47#define RSP_ZAOC 5 47#define RSP_ZAOC 5
@@ -68,7 +68,6 @@
68#define RSP_ZHLC (RSP_STR + STR_ZHLC) 68#define RSP_ZHLC (RSP_STR + STR_ZHLC)
69#define RSP_ERROR -1 /* ERROR */ 69#define RSP_ERROR -1 /* ERROR */
70#define RSP_WRONG_CID -2 /* unknown cid in cmd */ 70#define RSP_WRONG_CID -2 /* unknown cid in cmd */
71//#define RSP_EMPTY -3
72#define RSP_UNKNOWN -4 /* unknown response */ 71#define RSP_UNKNOWN -4 /* unknown response */
73#define RSP_FAIL -5 /* internal error */ 72#define RSP_FAIL -5 /* internal error */
74#define RSP_INVAL -6 /* invalid response */ 73#define RSP_INVAL -6 /* invalid response */
@@ -76,9 +75,9 @@
76#define RSP_NONE -19 75#define RSP_NONE -19
77#define RSP_STRING -20 76#define RSP_STRING -20
78#define RSP_NULL -21 77#define RSP_NULL -21
79//#define RSP_RETRYFAIL -22 78#define RSP_RETRYFAIL -22
80//#define RSP_RETRY -23 79#define RSP_RETRY -23
81//#define RSP_SKIP -24 80#define RSP_SKIP -24
82#define RSP_INIT -27 81#define RSP_INIT -27
83#define RSP_ANY -26 82#define RSP_ANY -26
84#define RSP_LAST -28 83#define RSP_LAST -28
@@ -158,229 +157,229 @@
158#define SEQ_UMMODE 11 157#define SEQ_UMMODE 11
159 158
160 159
161// 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring 160/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid),
161 * 400: hup, 500: reset, 600: dial, 700: ring */
162struct reply_t gigaset_tab_nocid[] = 162struct reply_t gigaset_tab_nocid[] =
163{ 163{
164 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ 164/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
165 165 * action, command */
166 /* initialize device, set cid mode if possible */ 166
167 //{RSP_INIT, -1, -1,100, 900, 0, {ACT_TEST}}, 167/* initialize device, set cid mode if possible */
168 //{RSP_ERROR, 900,900, -1, 0, 0, {ACT_FAILINIT}}, 168{RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} },
169 //{RSP_OK, 900,900, -1, 100, INIT_TIMEOUT, 169
170 // {ACT_TIMEOUT}}, 170{EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"},
171 171{RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING},
172 {RSP_INIT, -1, -1,SEQ_INIT, 100, INIT_TIMEOUT, 172 "+GMR\r"},
173 {ACT_TIMEOUT}}, /* wait until device is ready */ 173
174 174{EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"},
175 {EV_TIMEOUT, 100,100, -1, 101, 3, {0}, "Z\r"}, /* device in transparent mode? try to initialize it. */ 175{RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"},
176 {RSP_OK, 101,103, -1, 120, 5, {ACT_GETSTRING}, "+GMR\r"}, /* get version */ 176
177 177{EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1},
178 {EV_TIMEOUT, 101,101, -1, 102, 5, {0}, "Z\r"}, /* timeout => try once again. */ 178 "^SDLE=0\r"},
179 {RSP_ERROR, 101,101, -1, 102, 5, {0}, "Z\r"}, /* error => try once again. */ 179{RSP_OK, 108, 108, -1, 104, -1},
180 180{RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"},
181 {EV_TIMEOUT, 102,102, -1, 108, 5, {ACT_SETDLE1}, "^SDLE=0\r"}, /* timeout => try again in DLE mode. */ 181{EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} },
182 {RSP_OK, 108,108, -1, 104,-1}, 182{RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} },
183 {RSP_ZDLE, 104,104, 0, 103, 5, {0}, "Z\r"}, 183
184 {EV_TIMEOUT, 104,104, -1, 0, 0, {ACT_FAILINIT}}, 184{EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0,
185 {RSP_ERROR, 108,108, -1, 0, 0, {ACT_FAILINIT}}, 185 ACT_HUPMODEM,
186 186 ACT_TIMEOUT} },
187 {EV_TIMEOUT, 108,108, -1, 105, 2, {ACT_SETDLE0, 187{EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"},
188 ACT_HUPMODEM, 188
189 ACT_TIMEOUT}}, /* still timeout => connection in unimodem mode? */ 189{RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"},
190 {EV_TIMEOUT, 105,105, -1, 103, 5, {0}, "Z\r"}, 190{RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} },
191 191{RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
192 {RSP_ERROR, 102,102, -1, 107, 5, {0}, "^GETPRE\r"}, /* ERROR on ATZ => maybe in config mode? */ 192{EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
193 {RSP_OK, 107,107, -1, 0, 0, {ACT_CONFIGMODE}}, 193
194 {RSP_ERROR, 107,107, -1, 0, 0, {ACT_FAILINIT}}, 194{RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
195 {EV_TIMEOUT, 107,107, -1, 0, 0, {ACT_FAILINIT}}, 195{EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
196 196
197 {RSP_ERROR, 103,103, -1, 0, 0, {ACT_FAILINIT}}, 197{RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} },
198 {EV_TIMEOUT, 103,103, -1, 0, 0, {ACT_FAILINIT}}, 198
199 199{EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER,
200 {RSP_STRING, 120,120, -1, 121,-1, {ACT_SETVER}}, 200 ACT_INIT} },
201 201{RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER,
202 {EV_TIMEOUT, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}}, 202 ACT_INIT} },
203 {RSP_ERROR, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}}, 203{RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
204 {RSP_OK, 121,121, -1, 0, 0, {ACT_GOTVER, ACT_INIT}}, 204 ACT_INIT} },
205 205
206 /* leave dle mode */ 206/* leave dle mode */
207 {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, 207{RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
208 {RSP_OK, 201,201, -1, 202,-1}, 208{RSP_OK, 201, 201, -1, 202, -1},
209 {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, 209{RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} },
210 {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, 210{RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} },
211 {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, 211{RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
212 {EV_TIMEOUT, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, 212{EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
213 213
214 /* enter dle mode */ 214/* enter dle mode */
215 {RSP_INIT, 0, 0,SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"}, 215{RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
216 {RSP_OK, 251,251, -1, 252,-1}, 216{RSP_OK, 251, 251, -1, 252, -1},
217 {RSP_ZDLE, 252,252, 1, 0, 0, {ACT_DLE1}}, 217{RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} },
218 {RSP_ERROR, 250,299, -1, 0, 0, {ACT_FAILDLE1}}, 218{RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
219 {EV_TIMEOUT, 250,299, -1, 0, 0, {ACT_FAILDLE1}}, 219{EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
220 220
221 /* incoming call */ 221/* incoming call */
222 {RSP_RING, -1, -1, -1, -1,-1, {ACT_RING}}, 222{RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} },
223 223
224 /* get cid */ 224/* get cid */
225 //{RSP_INIT, 0, 0,300, 901, 0, {ACT_TEST}}, 225{RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
226 //{RSP_ERROR, 901,901, -1, 0, 0, {ACT_FAILCID}}, 226{RSP_OK, 301, 301, -1, 302, -1},
227 //{RSP_OK, 901,901, -1, 301, 5, {0}, "^SGCI?\r"}, 227{RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} },
228 228{RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} },
229 {RSP_INIT, 0, 0,SEQ_CID, 301, 5, {0}, "^SGCI?\r"}, 229{EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} },
230 {RSP_OK, 301,301, -1, 302,-1}, 230
231 {RSP_ZGCI, 302,302, -1, 0, 0, {ACT_CID}}, 231/* enter cid mode */
232 {RSP_ERROR, 301,349, -1, 0, 0, {ACT_FAILCID}}, 232{RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
233 {EV_TIMEOUT, 301,349, -1, 0, 0, {ACT_FAILCID}}, 233{RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} },
234 234{RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
235 /* enter cid mode */ 235{EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
236 {RSP_INIT, 0, 0,SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"}, 236
237 {RSP_OK, 150,150, -1, 0, 0, {ACT_CMODESET}}, 237/* leave cid mode */
238 {RSP_ERROR, 150,150, -1, 0, 0, {ACT_FAILCMODE}}, 238{RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"},
239 {EV_TIMEOUT, 150,150, -1, 0, 0, {ACT_FAILCMODE}}, 239{RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} },
240 240{RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
241 /* leave cid mode */ 241{EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
242 //{RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "^SGCI=0\r"}, 242
243 {RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "Z\r"}, 243/* abort getting cid */
244 {RSP_OK, 160,160, -1, 0, 0, {ACT_UMODESET}}, 244{RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} },
245 {RSP_ERROR, 160,160, -1, 0, 0, {ACT_FAILUMODE}}, 245
246 {EV_TIMEOUT, 160,160, -1, 0, 0, {ACT_FAILUMODE}}, 246/* reset */
247 247{RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
248 /* abort getting cid */ 248{RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} },
249 {RSP_INIT, 0, 0,SEQ_NOCID, 0, 0, {ACT_ABORTCID}}, 249{RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
250 250{EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
251 /* reset */ 251{RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} },
252 {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"}, 252
253 {RSP_OK, 504,504, -1, 0, 0, {ACT_SDOWN}}, 253{EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} },
254 {RSP_ERROR, 501,599, -1, 0, 0, {ACT_FAILSDOWN}}, 254{EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} },
255 {EV_TIMEOUT, 501,599, -1, 0, 0, {ACT_FAILSDOWN}}, 255{EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} },
256 {RSP_NODEV, 501,599, -1, 0, 0, {ACT_FAKESDOWN}}, 256{EV_START, -1, -1, -1, -1, -1, {ACT_START} },
257 257{EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} },
258 {EV_PROC_CIDMODE,-1, -1, -1, -1,-1, {ACT_PROC_CIDMODE}}, //FIXME 258{EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} },
259 {EV_IF_LOCK, -1, -1, -1, -1,-1, {ACT_IF_LOCK}}, //FIXME 259
260 {EV_IF_VER, -1, -1, -1, -1,-1, {ACT_IF_VER}}, //FIXME 260/* misc. */
261 {EV_START, -1, -1, -1, -1,-1, {ACT_START}}, //FIXME 261{RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
262 {EV_STOP, -1, -1, -1, -1,-1, {ACT_STOP}}, //FIXME 262{RSP_ZCFGT, -1, -1, -1, -1, -1, {ACT_DEBUG} },
263 {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME 263{RSP_ZCFG, -1, -1, -1, -1, -1, {ACT_DEBUG} },
264 264{RSP_ZLOG, -1, -1, -1, -1, -1, {ACT_DEBUG} },
265 /* misc. */ 265{RSP_ZMWI, -1, -1, -1, -1, -1, {ACT_DEBUG} },
266 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, 266{RSP_ZABINFO, -1, -1, -1, -1, -1, {ACT_DEBUG} },
267 {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 267{RSP_ZSMLSTCHG, -1, -1, -1, -1, -1, {ACT_DEBUG} },
268 {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 268
269 {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 269{RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
270 {RSP_ZLOG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 270{RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
271 {RSP_ZMWI, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 271{RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
272 {RSP_ZABINFO, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 272{RSP_LAST}
273 {RSP_ZSMLSTCHG,-1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
274
275 {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}},
276 {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}},
277 {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}},
278 {RSP_LAST}
279}; 273};
280 274
281// 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall 275/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring,
276 * 400: hup, 750: accepted icall */
282struct reply_t gigaset_tab_cid[] = 277struct reply_t gigaset_tab_cid[] =
283{ 278{
284 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ 279/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
285 280 * action, command */
286 /* dial */ 281
287 {EV_DIAL, -1, -1, -1, -1,-1, {ACT_DIAL}}, //FIXME 282/* dial */
288 {RSP_INIT, 0, 0,SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC}}, 283{EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
289 {RSP_OK, 601,601, -1, 602, 5, {ACT_CMD+AT_HLC}}, 284{RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC} },
290 {RSP_NULL, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}}, 285{RSP_OK, 601, 601, -1, 602, 5, {ACT_CMD+AT_HLC} },
291 {RSP_OK, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}}, 286{RSP_NULL, 602, 602, -1, 603, 5, {ACT_CMD+AT_PROTO} },
292 {RSP_OK, 603,603, -1, 604, 5, {ACT_CMD+AT_TYPE}}, 287{RSP_OK, 602, 602, -1, 603, 5, {ACT_CMD+AT_PROTO} },
293 {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}}, 288{RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD+AT_TYPE} },
294 {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} }, 289{RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD+AT_MSN} },
295 {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} }, 290{RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} },
296 {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD+AT_ISO} }, 291{RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} },
297 {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD+AT_ISO} }, 292{RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD+AT_ISO} },
298 {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"}, 293{RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD+AT_ISO} },
299 {RSP_OK, 608, 608, -1, 609, -1}, 294{RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"},
300 {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD+AT_DIAL} }, 295{RSP_OK, 608, 608, -1, 609, -1},
301 {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} }, 296{RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD+AT_DIAL} },
302 297{RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} },
303 {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, 298
304 {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, 299{RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
305 300{EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
306 /* optional dialing responses */ 301
307 {EV_BC_OPEN, 650,650, -1, 651,-1}, 302/* optional dialing responses */
308 {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} }, 303{EV_BC_OPEN, 650, 650, -1, 651, -1},
309 {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} }, 304{RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} },
310 {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} }, 305{RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} },
311 {RSP_ZSAU, 650,651,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, 306{RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} },
312 307{RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
313 /* connect */ 308
314 {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, 309/* connect */
315 {RSP_ZSAU, 651,651,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT, 310{RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
316 ACT_NOTIFY_BC_UP}}, 311{RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
317 {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, 312 ACT_NOTIFY_BC_UP} },
318 {RSP_ZSAU, 751,751,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT, 313{RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
319 ACT_NOTIFY_BC_UP}}, 314{RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
320 {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, 315 ACT_NOTIFY_BC_UP} },
321 316{EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} },
322 /* remote hangup */ 317
323 {RSP_ZSAU, 650,651,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}}, 318/* remote hangup */
324 {RSP_ZSAU, 750,751,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, 319{RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
325 {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, 320{RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
326 321{RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
327 /* hangup */ 322
328 {EV_HUP, -1, -1, -1, -1,-1, {ACT_HUP}}, //FIXME 323/* hangup */
329 {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? 324{EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} },
330 {RSP_OK, 401,401, -1, 402, 5}, 325{RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"},
331 {RSP_ZVLS, 402,402, 0, 403, 5}, 326{RSP_OK, 401, 401, -1, 402, 5},
332 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, 327{RSP_ZVLS, 402, 402, 0, 403, 5},
333 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, 328{RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
334 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, 329{RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
335 {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, 330{RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
336 {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, 331{RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} },
337 332{EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} },
338 {EV_BC_CLOSED, 0, 0, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME new constate + timeout 333
339 334{EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
340 /* ring */ 335
341 {RSP_ZBC, 700,700, -1, -1,-1, {0}}, 336/* ring */
342 {RSP_ZHLC, 700,700, -1, -1,-1, {0}}, 337{RSP_ZBC, 700, 700, -1, -1, -1, {0} },
343 {RSP_NMBR, 700,700, -1, -1,-1, {0}}, 338{RSP_ZHLC, 700, 700, -1, -1, -1, {0} },
344 {RSP_ZCPN, 700,700, -1, -1,-1, {0}}, 339{RSP_NMBR, 700, 700, -1, -1, -1, {0} },
345 {RSP_ZCTP, 700,700, -1, -1,-1, {0}}, 340{RSP_ZCPN, 700, 700, -1, -1, -1, {0} },
346 {EV_TIMEOUT, 700,700, -1, 720,720, {ACT_ICALL}}, 341{RSP_ZCTP, 700, 700, -1, -1, -1, {0} },
347 {EV_BC_CLOSED,720,720, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, 342{EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} },
348 343{EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
349 /*accept icall*/ 344
350 {EV_ACCEPT, -1, -1, -1, -1,-1, {ACT_ACCEPT}}, //FIXME 345/*accept icall*/
351 {RSP_INIT, 720,720,SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO}}, 346{EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} },
352 {RSP_OK, 721,721, -1, 722, 5, {ACT_CMD+AT_ISO}}, 347{RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO} },
353 {RSP_OK, 722,722, -1, 723, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */ 348{RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD+AT_ISO} },
354 {RSP_OK, 723,723, -1, 724, 5, {0}}, 349{RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"},
355 {RSP_ZVLS, 724,724, 17, 750,50, {ACT_ACCEPTED}}, 350{RSP_OK, 723, 723, -1, 724, 5, {0} },
356 {RSP_ERROR, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}}, 351{RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} },
357 {EV_TIMEOUT, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}}, 352{RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
358 {RSP_ZSAU, 700,729,ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT}}, 353{EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
359 {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}}, 354{RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} },
360 {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}}, 355{RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} },
361 356{RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
362 {EV_BC_OPEN, 750,750, -1, 751,-1}, 357
363 {EV_TIMEOUT, 750,751, -1, 0, 0, {ACT_CONNTIMEOUT}}, 358{EV_BC_OPEN, 750, 750, -1, 751, -1},
364 359{EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} },
365 /* B channel closed (general case) */ 360
366 {EV_BC_CLOSED, -1, -1, -1, -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME 361/* B channel closed (general case) */
367 362{EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} },
368 /* misc. */ 363
369 {RSP_ZCON, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 364/* misc. */
370 {RSP_ZCCR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 365{RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} },
371 {RSP_ZAOC, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 366{RSP_ZCCR, -1, -1, -1, -1, -1, {ACT_DEBUG} },
372 {RSP_ZCSTR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 367{RSP_ZAOC, -1, -1, -1, -1, -1, {ACT_DEBUG} },
373 368{RSP_ZCSTR, -1, -1, -1, -1, -1, {ACT_DEBUG} },
374 {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}}, 369
375 {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}}, 370{RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
376 {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}}, 371{RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
377 {RSP_LAST} 372{RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
373{RSP_LAST}
378}; 374};
379 375
380 376
381static const struct resp_type_t resp_type[] = 377static const struct resp_type_t {
378 unsigned char *response;
379 int resp_code;
380 int type;
381} resp_type[] =
382{ 382{
383 /*{"", RSP_EMPTY, RT_NOTHING},*/
384 {"OK", RSP_OK, RT_NOTHING}, 383 {"OK", RSP_OK, RT_NOTHING},
385 {"ERROR", RSP_ERROR, RT_NOTHING}, 384 {"ERROR", RSP_ERROR, RT_NOTHING},
386 {"ZSAU", RSP_ZSAU, RT_ZSAU}, 385 {"ZSAU", RSP_ZSAU, RT_ZSAU},
@@ -404,7 +403,21 @@ static const struct resp_type_t resp_type[] =
404 {"ZLOG", RSP_ZLOG, RT_NOTHING}, 403 {"ZLOG", RSP_ZLOG, RT_NOTHING},
405 {"ZABINFO", RSP_ZABINFO, RT_NOTHING}, 404 {"ZABINFO", RSP_ZABINFO, RT_NOTHING},
406 {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING}, 405 {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING},
407 {NULL,0,0} 406 {NULL, 0, 0}
407};
408
409static const struct zsau_resp_t {
410 unsigned char *str;
411 int code;
412} zsau_resp[] =
413{
414 {"OUTGOING_CALL_PROCEEDING", ZSAU_OUTGOING_CALL_PROCEEDING},
415 {"CALL_DELIVERED", ZSAU_CALL_DELIVERED},
416 {"ACTIVE", ZSAU_ACTIVE},
417 {"DISCONNECT_IND", ZSAU_DISCONNECT_IND},
418 {"NULL", ZSAU_NULL},
419 {"DISCONNECT_REQ", ZSAU_DISCONNECT_REQ},
420 {NULL, ZSAU_UNKNOWN}
408}; 421};
409 422
410/* 423/*
@@ -469,7 +482,6 @@ static int cid_of_response(char *s)
469 if (cid < 1 || cid > 65535) 482 if (cid < 1 || cid > 65535)
470 return -1; /* CID out of range */ 483 return -1; /* CID out of range */
471 return cid; 484 return cid;
472 //FIXME is ;<digit>+ at end of non-CID response really impossible?
473} 485}
474 486
475/** 487/**
@@ -486,6 +498,7 @@ void gigaset_handle_modem_response(struct cardstate *cs)
486 int params; 498 int params;
487 int i, j; 499 int i, j;
488 const struct resp_type_t *rt; 500 const struct resp_type_t *rt;
501 const struct zsau_resp_t *zr;
489 int curarg; 502 int curarg;
490 unsigned long flags; 503 unsigned long flags;
491 unsigned next, tail, head; 504 unsigned next, tail, head;
@@ -612,24 +625,14 @@ void gigaset_handle_modem_response(struct cardstate *cs)
612 event->parameter = ZSAU_NONE; 625 event->parameter = ZSAU_NONE;
613 break; 626 break;
614 } 627 }
615 if (!strcmp(argv[curarg], "OUTGOING_CALL_PROCEEDING")) 628 for (zr = zsau_resp; zr->str; ++zr)
616 event->parameter = ZSAU_OUTGOING_CALL_PROCEEDING; 629 if (!strcmp(argv[curarg], zr->str))
617 else if (!strcmp(argv[curarg], "CALL_DELIVERED")) 630 break;
618 event->parameter = ZSAU_CALL_DELIVERED; 631 event->parameter = zr->code;
619 else if (!strcmp(argv[curarg], "ACTIVE")) 632 if (!zr->str)
620 event->parameter = ZSAU_ACTIVE;
621 else if (!strcmp(argv[curarg], "DISCONNECT_IND"))
622 event->parameter = ZSAU_DISCONNECT_IND;
623 else if (!strcmp(argv[curarg], "NULL"))
624 event->parameter = ZSAU_NULL;
625 else if (!strcmp(argv[curarg], "DISCONNECT_REQ"))
626 event->parameter = ZSAU_DISCONNECT_REQ;
627 else {
628 event->parameter = ZSAU_UNKNOWN;
629 dev_warn(cs->dev, 633 dev_warn(cs->dev,
630 "%s: unknown parameter %s after ZSAU\n", 634 "%s: unknown parameter %s after ZSAU\n",
631 __func__, argv[curarg]); 635 __func__, argv[curarg]);
632 }
633 ++curarg; 636 ++curarg;
634 break; 637 break;
635 case RT_STRING: 638 case RT_STRING:
@@ -896,7 +899,8 @@ static void bchannel_up(struct bc_state *bcs)
896 gigaset_isdn_connB(bcs); 899 gigaset_isdn_connB(bcs);
897} 900}
898 901
899static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_index) 902static void start_dial(struct at_state_t *at_state, void *data,
903 unsigned seq_index)
900{ 904{
901 struct bc_state *bcs = at_state->bcs; 905 struct bc_state *bcs = at_state->bcs;
902 struct cardstate *cs = at_state->cs; 906 struct cardstate *cs = at_state->cs;
@@ -973,8 +977,6 @@ static void do_start(struct cardstate *cs)
973 977
974 cs->isdn_up = 1; 978 cs->isdn_up = 1;
975 gigaset_isdn_start(cs); 979 gigaset_isdn_start(cs);
976 // FIXME: not in locked mode
977 // FIXME 2: only after init sequence
978 980
979 cs->waiting = 0; 981 cs->waiting = 0;
980 wake_up(&cs->waitqueue); 982 wake_up(&cs->waitqueue);
@@ -1128,7 +1130,6 @@ static int do_lock(struct cardstate *cs)
1128 1130
1129 break; 1131 break;
1130 case MS_LOCKED: 1132 case MS_LOCKED:
1131 //retval = -EACCES;
1132 break; 1133 break;
1133 default: 1134 default:
1134 return -EBUSY; 1135 return -EBUSY;
@@ -1384,7 +1385,7 @@ static void do_action(int action, struct cardstate *cs,
1384 cs->cur_at_seq = SEQ_NONE; 1385 cs->cur_at_seq = SEQ_NONE;
1385 break; 1386 break;
1386 1387
1387 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */ 1388 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL procssng */
1388 disconnect(p_at_state); 1389 disconnect(p_at_state);
1389 break; 1390 break;
1390 1391
@@ -1458,17 +1459,6 @@ static void do_action(int action, struct cardstate *cs,
1458 __func__, at_state->ConState); 1459 __func__, at_state->ConState);
1459 cs->cur_at_seq = SEQ_NONE; 1460 cs->cur_at_seq = SEQ_NONE;
1460 break; 1461 break;
1461#ifdef CONFIG_GIGASET_DEBUG
1462 case ACT_TEST:
1463 {
1464 static int count = 3; //2; //1;
1465 *p_genresp = 1;
1466 *p_resp_code = count ? RSP_ERROR : RSP_OK;
1467 if (count > 0)
1468 --count;
1469 }
1470 break;
1471#endif
1472 case ACT_DEBUG: 1462 case ACT_DEBUG:
1473 gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d", 1463 gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
1474 __func__, ev->type, at_state->ConState); 1464 __func__, ev->type, at_state->ConState);
@@ -1503,7 +1493,7 @@ static void do_action(int action, struct cardstate *cs,
1503 do_start(cs); 1493 do_start(cs);
1504 break; 1494 break;
1505 1495
1506 /* events from the interface */ // FIXME without ACT_xxxx? 1496 /* events from the interface */
1507 case ACT_IF_LOCK: 1497 case ACT_IF_LOCK:
1508 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); 1498 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
1509 cs->waiting = 0; 1499 cs->waiting = 0;
@@ -1522,7 +1512,7 @@ static void do_action(int action, struct cardstate *cs,
1522 wake_up(&cs->waitqueue); 1512 wake_up(&cs->waitqueue);
1523 break; 1513 break;
1524 1514
1525 /* events from the proc file system */ // FIXME without ACT_xxxx? 1515 /* events from the proc file system */
1526 case ACT_PROC_CIDMODE: 1516 case ACT_PROC_CIDMODE:
1527 spin_lock_irqsave(&cs->lock, flags); 1517 spin_lock_irqsave(&cs->lock, flags);
1528 if (ev->parameter != cs->cidmode) { 1518 if (ev->parameter != cs->cidmode) {
@@ -1659,7 +1649,8 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
1659 for (curact = 0; curact < MAXACT; ++curact) { 1649 for (curact = 0; curact < MAXACT; ++curact) {
1660 /* The row tells us what we should do .. 1650 /* The row tells us what we should do ..
1661 */ 1651 */
1662 do_action(rep->action[curact], cs, bcs, &at_state, &p_command, &genresp, &resp_code, ev); 1652 do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
1653 &genresp, &resp_code, ev);
1663 if (!at_state) 1654 if (!at_state)
1664 break; /* may be freed after disconnect */ 1655 break; /* may be freed after disconnect */
1665 } 1656 }
@@ -1671,13 +1662,14 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
1671 1662
1672 if (genresp) { 1663 if (genresp) {
1673 spin_lock_irqsave(&cs->lock, flags); 1664 spin_lock_irqsave(&cs->lock, flags);
1674 at_state->timer_expires = 0; //FIXME 1665 at_state->timer_expires = 0;
1675 at_state->timer_active = 0; //FIXME 1666 at_state->timer_active = 0;
1676 spin_unlock_irqrestore(&cs->lock, flags); 1667 spin_unlock_irqrestore(&cs->lock, flags);
1677 gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL); 1668 gigaset_add_event(cs, at_state, resp_code,
1669 NULL, 0, NULL);
1678 } else { 1670 } else {
1679 /* Send command to modem if not NULL... */ 1671 /* Send command to modem if not NULL... */
1680 if (p_command/*rep->command*/) { 1672 if (p_command) {
1681 if (cs->connected) 1673 if (cs->connected)
1682 send_command(cs, p_command, 1674 send_command(cs, p_command,
1683 sendcid, cs->dle, 1675 sendcid, cs->dle,
@@ -1764,7 +1756,8 @@ static void process_command_flags(struct cardstate *cs)
1764 } 1756 }
1765 } 1757 }
1766 1758
1767 /* only switch back to unimodem mode, if no commands are pending and no channels are up */ 1759 /* only switch back to unimodem mode if no commands are pending and
1760 * no channels are up */
1768 spin_lock_irqsave(&cs->lock, flags); 1761 spin_lock_irqsave(&cs->lock, flags);
1769 if (cs->at_state.pending_commands == PC_UMMODE 1762 if (cs->at_state.pending_commands == PC_UMMODE
1770 && !cs->cidmode 1763 && !cs->cidmode
@@ -1823,9 +1816,8 @@ static void process_command_flags(struct cardstate *cs)
1823 1816
1824 if (cs->at_state.pending_commands & PC_INIT) { 1817 if (cs->at_state.pending_commands & PC_INIT) {
1825 cs->at_state.pending_commands &= ~PC_INIT; 1818 cs->at_state.pending_commands &= ~PC_INIT;
1826 cs->dle = 0; //FIXME 1819 cs->dle = 0;
1827 cs->inbuf->inputstate = INS_command; 1820 cs->inbuf->inputstate = INS_command;
1828 //FIXME reset card state (or -> LOCK0)?
1829 schedule_sequence(cs, &cs->at_state, SEQ_INIT); 1821 schedule_sequence(cs, &cs->at_state, SEQ_INIT);
1830 return; 1822 return;
1831 } 1823 }
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 4749ef100fd3..e963a6c2e86d 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -34,8 +34,8 @@
34#include <linux/list.h> 34#include <linux/list.h>
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36 36
37#define GIG_VERSION {0,5,0,0} 37#define GIG_VERSION {0, 5, 0, 0}
38#define GIG_COMPAT {0,4,0,0} 38#define GIG_COMPAT {0, 4, 0, 0}
39 39
40#define MAX_REC_PARAMS 10 /* Max. number of params in response string */ 40#define MAX_REC_PARAMS 10 /* Max. number of params in response string */
41#define MAX_RESP_SIZE 512 /* Max. size of a response string */ 41#define MAX_RESP_SIZE 512 /* Max. size of a response string */
@@ -133,35 +133,32 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
133#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT) 133#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
134#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT) 134#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
135 135
136/* int-in-events 3070 */ 136/* interrupt pipe messages */
137#define HD_B1_FLOW_CONTROL 0x80 137#define HD_B1_FLOW_CONTROL 0x80
138#define HD_B2_FLOW_CONTROL 0x81 138#define HD_B2_FLOW_CONTROL 0x81
139#define HD_RECEIVEATDATA_ACK (0x35) // 3070 139#define HD_RECEIVEATDATA_ACK (0x35) /* 3070 */
140 // att: HD_RECEIVE>>AT<<DATA_ACK 140#define HD_READY_SEND_ATDATA (0x36) /* 3070 */
141#define HD_READY_SEND_ATDATA (0x36) // 3070 141#define HD_OPEN_ATCHANNEL_ACK (0x37) /* 3070 */
142#define HD_OPEN_ATCHANNEL_ACK (0x37) // 3070 142#define HD_CLOSE_ATCHANNEL_ACK (0x38) /* 3070 */
143#define HD_CLOSE_ATCHANNEL_ACK (0x38) // 3070 143#define HD_DEVICE_INIT_OK (0x11) /* ISurf USB + 3070 */
144#define HD_DEVICE_INIT_OK (0x11) // ISurf USB + 3070 144#define HD_OPEN_B1CHANNEL_ACK (0x51) /* ISurf USB + 3070 */
145#define HD_OPEN_B1CHANNEL_ACK (0x51) // ISurf USB + 3070 145#define HD_OPEN_B2CHANNEL_ACK (0x52) /* ISurf USB + 3070 */
146#define HD_OPEN_B2CHANNEL_ACK (0x52) // ISurf USB + 3070 146#define HD_CLOSE_B1CHANNEL_ACK (0x53) /* ISurf USB + 3070 */
147#define HD_CLOSE_B1CHANNEL_ACK (0x53) // ISurf USB + 3070 147#define HD_CLOSE_B2CHANNEL_ACK (0x54) /* ISurf USB + 3070 */
148#define HD_CLOSE_B2CHANNEL_ACK (0x54) // ISurf USB + 3070 148#define HD_SUSPEND_END (0x61) /* ISurf USB */
149// Powermangment 149#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) /* ISurf USB + 3070 */
150#define HD_SUSPEND_END (0x61) // ISurf USB 150
151// Configuration 151/* control requests */
152#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) // ISurf USB + 3070 152#define HD_OPEN_B1CHANNEL (0x23) /* ISurf USB + 3070 */
153 153#define HD_CLOSE_B1CHANNEL (0x24) /* ISurf USB + 3070 */
154/* control requests 3070 */ 154#define HD_OPEN_B2CHANNEL (0x25) /* ISurf USB + 3070 */
155#define HD_OPEN_B1CHANNEL (0x23) // ISurf USB + 3070 155#define HD_CLOSE_B2CHANNEL (0x26) /* ISurf USB + 3070 */
156#define HD_CLOSE_B1CHANNEL (0x24) // ISurf USB + 3070 156#define HD_RESET_INTERRUPT_PIPE (0x27) /* ISurf USB + 3070 */
157#define HD_OPEN_B2CHANNEL (0x25) // ISurf USB + 3070 157#define HD_DEVICE_INIT_ACK (0x34) /* ISurf USB + 3070 */
158#define HD_CLOSE_B2CHANNEL (0x26) // ISurf USB + 3070 158#define HD_WRITE_ATMESSAGE (0x12) /* 3070 */
159#define HD_RESET_INTERRUPT_PIPE (0x27) // ISurf USB + 3070 159#define HD_READ_ATMESSAGE (0x13) /* 3070 */
160#define HD_DEVICE_INIT_ACK (0x34) // ISurf USB + 3070 160#define HD_OPEN_ATCHANNEL (0x28) /* 3070 */
161#define HD_WRITE_ATMESSAGE (0x12) // 3070 161#define HD_CLOSE_ATCHANNEL (0x29) /* 3070 */
162#define HD_READ_ATMESSAGE (0x13) // 3070
163#define HD_OPEN_ATCHANNEL (0x28) // 3070
164#define HD_CLOSE_ATCHANNEL (0x29) // 3070
165 162
166/* number of B channels supported by base driver */ 163/* number of B channels supported by base driver */
167#define BAS_CHANNELS 2 164#define BAS_CHANNELS 2
@@ -223,12 +220,11 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
223#define EV_BC_CLOSED -118 220#define EV_BC_CLOSED -118
224 221
225/* input state */ 222/* input state */
226#define INS_command 0x0001 223#define INS_command 0x0001 /* receiving messages (not payload data) */
227#define INS_DLE_char 0x0002 224#define INS_DLE_char 0x0002 /* DLE flag received (in DLE mode) */
228#define INS_byte_stuff 0x0004 225#define INS_byte_stuff 0x0004
229#define INS_have_data 0x0008 226#define INS_have_data 0x0008
230#define INS_skip_frame 0x0010 227#define INS_DLE_command 0x0020 /* DLE message start (<DLE> X) received */
231#define INS_DLE_command 0x0020
232#define INS_flag_hunt 0x0040 228#define INS_flag_hunt 0x0040
233 229
234/* channel state */ 230/* channel state */
@@ -290,8 +286,6 @@ extern struct reply_t gigaset_tab_cid[];
290extern struct reply_t gigaset_tab_nocid[]; 286extern struct reply_t gigaset_tab_nocid[];
291 287
292struct inbuf_t { 288struct inbuf_t {
293 unsigned char *rcvbuf; /* usb-gigaset receive buffer */
294 struct bc_state *bcs;
295 struct cardstate *cs; 289 struct cardstate *cs;
296 int inputstate; 290 int inputstate;
297 int head, tail; 291 int head, tail;
@@ -363,12 +357,6 @@ struct at_state_t {
363 struct bc_state *bcs; 357 struct bc_state *bcs;
364}; 358};
365 359
366struct resp_type_t {
367 unsigned char *response;
368 int resp_code; /* RSP_XXXX */
369 int type; /* RT_XXXX */
370};
371
372struct event_t { 360struct event_t {
373 int type; 361 int type;
374 void *ptr, *arg; 362 void *ptr, *arg;
@@ -483,8 +471,8 @@ struct cardstate {
483 471
484 struct timer_list timer; 472 struct timer_list timer;
485 int retry_count; 473 int retry_count;
486 int dle; /* !=0 if modem commands/responses are 474 int dle; /* !=0 if DLE mode is active
487 dle encoded */ 475 (ZDLE=1 received -- M10x only) */
488 int cur_at_seq; /* sequence of AT commands being 476 int cur_at_seq; /* sequence of AT commands being
489 processed */ 477 processed */
490 int curchannel; /* channel those commands are meant 478 int curchannel; /* channel those commands are meant
@@ -625,7 +613,7 @@ struct gigaset_ops {
625 613
626 /* Called from LL interface to put an skb into the send-queue. 614 /* Called from LL interface to put an skb into the send-queue.
627 * After sending is completed, gigaset_skb_sent() must be called 615 * After sending is completed, gigaset_skb_sent() must be called
628 * with the first cs->hw_hdr_len bytes of skb->head preserved. */ 616 * with the skb's link layer header preserved. */
629 int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb); 617 int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
630 618
631 /* Called from ev-layer.c to process a block of data 619 /* Called from ev-layer.c to process a block of data
@@ -634,7 +622,8 @@ struct gigaset_ops {
634 622
635}; 623};
636 624
637/* = Common structures and definitions ======================================= */ 625/* = Common structures and definitions =======================================
626 */
638 627
639/* Parser states for DLE-Event: 628/* Parser states for DLE-Event:
640 * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "." 629 * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
@@ -779,7 +768,7 @@ struct event_t *gigaset_add_event(struct cardstate *cs,
779 void *ptr, int parameter, void *arg); 768 void *ptr, int parameter, void *arg);
780 769
781/* Called on CONFIG1 command from frontend. */ 770/* Called on CONFIG1 command from frontend. */
782int gigaset_enterconfigmode(struct cardstate *cs); //0: success <0: errorcode 771int gigaset_enterconfigmode(struct cardstate *cs);
783 772
784/* cs->lock must not be locked */ 773/* cs->lock must not be locked */
785static inline void gigaset_schedule_event(struct cardstate *cs) 774static inline void gigaset_schedule_event(struct cardstate *cs)
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index aca72a06184e..c129ee47a8fb 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -39,12 +39,12 @@
39static int writebuf_from_LL(int driverID, int channel, int ack, 39static int writebuf_from_LL(int driverID, int channel, int ack,
40 struct sk_buff *skb) 40 struct sk_buff *skb)
41{ 41{
42 struct cardstate *cs; 42 struct cardstate *cs = gigaset_get_cs_by_id(driverID);
43 struct bc_state *bcs; 43 struct bc_state *bcs;
44 unsigned char *ack_header;
44 unsigned len; 45 unsigned len;
45 unsigned skblen;
46 46
47 if (!(cs = gigaset_get_cs_by_id(driverID))) { 47 if (!cs) {
48 pr_err("%s: invalid driver ID (%d)\n", __func__, driverID); 48 pr_err("%s: invalid driver ID (%d)\n", __func__, driverID);
49 return -ENODEV; 49 return -ENODEV;
50 } 50 }
@@ -78,11 +78,23 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
78 return -EINVAL; 78 return -EINVAL;
79 } 79 }
80 80
81 skblen = ack ? len : 0; 81 /* set up acknowledgement header */
82 skb->head[0] = skblen & 0xff; 82 if (skb_headroom(skb) < HW_HDR_LEN) {
83 skb->head[1] = skblen >> 8; 83 /* should never happen */
84 gig_dbg(DEBUG_MCMD, "skb: len=%u, skblen=%u: %02x %02x", 84 dev_err(cs->dev, "%s: insufficient skb headroom\n", __func__);
85 len, skblen, (unsigned) skb->head[0], (unsigned) skb->head[1]); 85 return -ENOMEM;
86 }
87 skb_set_mac_header(skb, -HW_HDR_LEN);
88 skb->mac_len = HW_HDR_LEN;
89 ack_header = skb_mac_header(skb);
90 if (ack) {
91 ack_header[0] = len & 0xff;
92 ack_header[1] = len >> 8;
93 } else {
94 ack_header[0] = ack_header[1] = 0;
95 }
96 gig_dbg(DEBUG_MCMD, "skb: len=%u, ack=%d: %02x %02x",
97 len, ack, ack_header[0], ack_header[1]);
86 98
87 /* pass to device-specific module */ 99 /* pass to device-specific module */
88 return cs->ops->send_skb(bcs, skb); 100 return cs->ops->send_skb(bcs, skb);
@@ -99,6 +111,7 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
99void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) 111void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
100{ 112{
101 isdn_if *iif = bcs->cs->iif; 113 isdn_if *iif = bcs->cs->iif;
114 unsigned char *ack_header = skb_mac_header(skb);
102 unsigned len; 115 unsigned len;
103 isdn_ctrl response; 116 isdn_ctrl response;
104 117
@@ -108,8 +121,7 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
108 dev_warn(bcs->cs->dev, "%s: skb->len==%d\n", 121 dev_warn(bcs->cs->dev, "%s: skb->len==%d\n",
109 __func__, skb->len); 122 __func__, skb->len);
110 123
111 len = (unsigned char) skb->head[0] | 124 len = ack_header[0] + ((unsigned) ack_header[1] << 8);
112 (unsigned) (unsigned char) skb->head[1] << 8;
113 if (len) { 125 if (len) {
114 gig_dbg(DEBUG_MCMD, "ACKing to LL (id: %d, ch: %d, sz: %u)", 126 gig_dbg(DEBUG_MCMD, "ACKing to LL (id: %d, ch: %d, sz: %u)",
115 bcs->cs->myid, bcs->channel, len); 127 bcs->cs->myid, bcs->channel, len);
@@ -379,22 +391,19 @@ static int command_from_LL(isdn_ctrl *cntrl)
379 391
380 break; 392 break;
381 case ISDN_CMD_PROCEED: 393 case ISDN_CMD_PROCEED:
382 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED"); //FIXME 394 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED");
383 break; 395 break;
384 case ISDN_CMD_ALERT: 396 case ISDN_CMD_ALERT:
385 gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME 397 gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT");
386 if (cntrl->arg >= cs->channels) { 398 if (cntrl->arg >= cs->channels) {
387 dev_err(cs->dev, 399 dev_err(cs->dev,
388 "ISDN_CMD_ALERT: invalid channel (%d)\n", 400 "ISDN_CMD_ALERT: invalid channel (%d)\n",
389 (int) cntrl->arg); 401 (int) cntrl->arg);
390 return -EINVAL; 402 return -EINVAL;
391 } 403 }
392 //bcs = cs->bcs + cntrl->arg;
393 //bcs->proto2 = -1;
394 // FIXME
395 break; 404 break;
396 case ISDN_CMD_REDIR: 405 case ISDN_CMD_REDIR:
397 gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR"); //FIXME 406 gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR");
398 break; 407 break;
399 case ISDN_CMD_PROT_IO: 408 case ISDN_CMD_PROT_IO:
400 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO"); 409 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO");
@@ -474,7 +483,7 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
474 /* fill ICALL structure */ 483 /* fill ICALL structure */
475 response.parm.setup.si1 = 0; /* default: unknown */ 484 response.parm.setup.si1 = 0; /* default: unknown */
476 response.parm.setup.si2 = 0; 485 response.parm.setup.si2 = 0;
477 response.parm.setup.screen = 0; //FIXME how to set these? 486 response.parm.setup.screen = 0;
478 response.parm.setup.plan = 0; 487 response.parm.setup.plan = 0;
479 if (!at_state->str_var[STR_ZBC]) { 488 if (!at_state->str_var[STR_ZBC]) {
480 /* no BC (internal call): assume speech, A-law */ 489 /* no BC (internal call): assume speech, A-law */
@@ -495,26 +504,24 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
495 return ICALL_IGNORE; 504 return ICALL_IGNORE;
496 } 505 }
497 if (at_state->str_var[STR_NMBR]) { 506 if (at_state->str_var[STR_NMBR]) {
498 strncpy(response.parm.setup.phone, at_state->str_var[STR_NMBR], 507 strlcpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
499 sizeof response.parm.setup.phone - 1); 508 sizeof response.parm.setup.phone);
500 response.parm.setup.phone[sizeof response.parm.setup.phone - 1] = 0;
501 } else 509 } else
502 response.parm.setup.phone[0] = 0; 510 response.parm.setup.phone[0] = 0;
503 if (at_state->str_var[STR_ZCPN]) { 511 if (at_state->str_var[STR_ZCPN]) {
504 strncpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN], 512 strlcpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
505 sizeof response.parm.setup.eazmsn - 1); 513 sizeof response.parm.setup.eazmsn);
506 response.parm.setup.eazmsn[sizeof response.parm.setup.eazmsn - 1] = 0;
507 } else 514 } else
508 response.parm.setup.eazmsn[0] = 0; 515 response.parm.setup.eazmsn[0] = 0;
509 516
510 if (!bcs) { 517 if (!bcs) {
511 dev_notice(cs->dev, "no channel for incoming call\n"); 518 dev_notice(cs->dev, "no channel for incoming call\n");
512 response.command = ISDN_STAT_ICALLW; 519 response.command = ISDN_STAT_ICALLW;
513 response.arg = 0; //FIXME 520 response.arg = 0;
514 } else { 521 } else {
515 gig_dbg(DEBUG_CMD, "Sending ICALL"); 522 gig_dbg(DEBUG_CMD, "Sending ICALL");
516 response.command = ISDN_STAT_ICALL; 523 response.command = ISDN_STAT_ICALL;
517 response.arg = bcs->channel; //FIXME 524 response.arg = bcs->channel;
518 } 525 }
519 response.driver = cs->myid; 526 response.driver = cs->myid;
520 retval = iif->statcallb(&response); 527 retval = iif->statcallb(&response);
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 6a8e1384e7bd..577809c03aed 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -162,7 +162,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
162 return -ENODEV; 162 return -ENODEV;
163 163
164 if (mutex_lock_interruptible(&cs->mutex)) 164 if (mutex_lock_interruptible(&cs->mutex))
165 return -ERESTARTSYS; // FIXME -EINTR? 165 return -ERESTARTSYS;
166 tty->driver_data = cs; 166 tty->driver_data = cs;
167 167
168 ++cs->open_count; 168 ++cs->open_count;
@@ -171,7 +171,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
171 spin_lock_irqsave(&cs->lock, flags); 171 spin_lock_irqsave(&cs->lock, flags);
172 cs->tty = tty; 172 cs->tty = tty;
173 spin_unlock_irqrestore(&cs->lock, flags); 173 spin_unlock_irqrestore(&cs->lock, flags);
174 tty->low_latency = 1; //FIXME test 174 tty->low_latency = 1;
175 } 175 }
176 176
177 mutex_unlock(&cs->mutex); 177 mutex_unlock(&cs->mutex);
@@ -228,7 +228,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
228 gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd); 228 gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd);
229 229
230 if (mutex_lock_interruptible(&cs->mutex)) 230 if (mutex_lock_interruptible(&cs->mutex))
231 return -ERESTARTSYS; // FIXME -EINTR? 231 return -ERESTARTSYS;
232 232
233 if (!cs->connected) { 233 if (!cs->connected) {
234 gig_dbg(DEBUG_IF, "not connected"); 234 gig_dbg(DEBUG_IF, "not connected");
@@ -299,9 +299,8 @@ static int if_tiocmget(struct tty_struct *tty, struct file *file)
299 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); 299 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
300 300
301 if (mutex_lock_interruptible(&cs->mutex)) 301 if (mutex_lock_interruptible(&cs->mutex))
302 return -ERESTARTSYS; // FIXME -EINTR? 302 return -ERESTARTSYS;
303 303
304 // FIXME read from device?
305 retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR); 304 retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR);
306 305
307 mutex_unlock(&cs->mutex); 306 mutex_unlock(&cs->mutex);
@@ -326,7 +325,7 @@ static int if_tiocmset(struct tty_struct *tty, struct file *file,
326 cs->minor_index, __func__, set, clear); 325 cs->minor_index, __func__, set, clear);
327 326
328 if (mutex_lock_interruptible(&cs->mutex)) 327 if (mutex_lock_interruptible(&cs->mutex))
329 return -ERESTARTSYS; // FIXME -EINTR? 328 return -ERESTARTSYS;
330 329
331 if (!cs->connected) { 330 if (!cs->connected) {
332 gig_dbg(DEBUG_IF, "not connected"); 331 gig_dbg(DEBUG_IF, "not connected");
@@ -356,7 +355,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
356 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); 355 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
357 356
358 if (mutex_lock_interruptible(&cs->mutex)) 357 if (mutex_lock_interruptible(&cs->mutex))
359 return -ERESTARTSYS; // FIXME -EINTR? 358 return -ERESTARTSYS;
360 359
361 if (!cs->connected) { 360 if (!cs->connected) {
362 gig_dbg(DEBUG_IF, "not connected"); 361 gig_dbg(DEBUG_IF, "not connected");
@@ -390,7 +389,7 @@ static int if_write_room(struct tty_struct *tty)
390 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); 389 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
391 390
392 if (mutex_lock_interruptible(&cs->mutex)) 391 if (mutex_lock_interruptible(&cs->mutex))
393 return -ERESTARTSYS; // FIXME -EINTR? 392 return -ERESTARTSYS;
394 393
395 if (!cs->connected) { 394 if (!cs->connected) {
396 gig_dbg(DEBUG_IF, "not connected"); 395 gig_dbg(DEBUG_IF, "not connected");
@@ -455,9 +454,8 @@ static void if_throttle(struct tty_struct *tty)
455 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ 454 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
456 else if (!cs->open_count) 455 else if (!cs->open_count)
457 dev_warn(cs->dev, "%s: device not opened\n", __func__); 456 dev_warn(cs->dev, "%s: device not opened\n", __func__);
458 else { 457 else
459 //FIXME 458 gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
460 }
461 459
462 mutex_unlock(&cs->mutex); 460 mutex_unlock(&cs->mutex);
463} 461}
@@ -480,9 +478,8 @@ static void if_unthrottle(struct tty_struct *tty)
480 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ 478 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
481 else if (!cs->open_count) 479 else if (!cs->open_count)
482 dev_warn(cs->dev, "%s: device not opened\n", __func__); 480 dev_warn(cs->dev, "%s: device not opened\n", __func__);
483 else { 481 else
484 //FIXME 482 gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
485 }
486 483
487 mutex_unlock(&cs->mutex); 484 mutex_unlock(&cs->mutex);
488} 485}
@@ -515,10 +512,9 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
515 goto out; 512 goto out;
516 } 513 }
517 514
518 // stolen from mct_u232.c
519 iflag = tty->termios->c_iflag; 515 iflag = tty->termios->c_iflag;
520 cflag = tty->termios->c_cflag; 516 cflag = tty->termios->c_cflag;
521 old_cflag = old ? old->c_cflag : cflag; //FIXME? 517 old_cflag = old ? old->c_cflag : cflag;
522 gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x", 518 gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x",
523 cs->minor_index, iflag, cflag, old_cflag); 519 cs->minor_index, iflag, cflag, old_cflag);
524 520
@@ -632,7 +628,8 @@ void gigaset_if_receive(struct cardstate *cs,
632 struct tty_struct *tty; 628 struct tty_struct *tty;
633 629
634 spin_lock_irqsave(&cs->lock, flags); 630 spin_lock_irqsave(&cs->lock, flags);
635 if ((tty = cs->tty) == NULL) 631 tty = cs->tty;
632 if (tty == NULL)
636 gig_dbg(DEBUG_ANY, "receive on closed device"); 633 gig_dbg(DEBUG_ANY, "receive on closed device");
637 else { 634 else {
638 tty_buffer_request_room(tty, len); 635 tty_buffer_request_room(tty, len);
@@ -659,9 +656,9 @@ void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
659 656
660 drv->have_tty = 0; 657 drv->have_tty = 0;
661 658
662 if ((drv->tty = alloc_tty_driver(minors)) == NULL) 659 drv->tty = tty = alloc_tty_driver(minors);
660 if (tty == NULL)
663 goto enomem; 661 goto enomem;
664 tty = drv->tty;
665 662
666 tty->magic = TTY_DRIVER_MAGIC, 663 tty->magic = TTY_DRIVER_MAGIC,
667 tty->major = GIG_MAJOR, 664 tty->major = GIG_MAJOR,
@@ -676,8 +673,8 @@ void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
676 673
677 tty->owner = THIS_MODULE; 674 tty->owner = THIS_MODULE;
678 675
679 tty->init_termios = tty_std_termios; //FIXME 676 tty->init_termios = tty_std_termios;
680 tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; //FIXME 677 tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
681 tty_set_operations(tty, &if_ops); 678 tty_set_operations(tty, &if_ops);
682 679
683 ret = tty_register_driver(tty); 680 ret = tty_register_driver(tty);
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index 7dabfd35874c..85394a6ebae8 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -41,7 +41,8 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
41 41
42 read = iwb->read; 42 read = iwb->read;
43 write = iwb->write; 43 write = iwb->write;
44 if ((freebytes = read - write) > 0) { 44 freebytes = read - write;
45 if (freebytes > 0) {
45 /* no wraparound: need padding space within regular area */ 46 /* no wraparound: need padding space within regular area */
46 return freebytes - BAS_OUTBUFPAD; 47 return freebytes - BAS_OUTBUFPAD;
47 } else if (read < BAS_OUTBUFPAD) { 48 } else if (read < BAS_OUTBUFPAD) {
@@ -53,29 +54,6 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
53 } 54 }
54} 55}
55 56
56/* compare two offsets within the buffer
57 * The buffer is seen as circular, with the read position as start
58 * returns -1/0/1 if position a </=/> position b without crossing 'read'
59 */
60static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b)
61{
62 int read;
63 if (a == b)
64 return 0;
65 read = iwb->read;
66 if (a < b) {
67 if (a < read && read <= b)
68 return +1;
69 else
70 return -1;
71 } else {
72 if (b < read && read <= a)
73 return -1;
74 else
75 return +1;
76 }
77}
78
79/* start writing 57/* start writing
80 * acquire the write semaphore 58 * acquire the write semaphore
81 * return true if acquired, false if busy 59 * return true if acquired, false if busy
@@ -271,7 +249,7 @@ static inline void dump_bytes(enum debuglevel level, const char *tag,
271 * bit 14..13 = number of bits added by stuffing 249 * bit 14..13 = number of bits added by stuffing
272 */ 250 */
273static const u16 stufftab[5 * 256] = { 251static const u16 stufftab[5 * 256] = {
274// previous 1s = 0: 252/* previous 1s = 0: */
275 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, 253 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
276 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f, 254 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
277 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, 255 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
@@ -289,7 +267,7 @@ static const u16 stufftab[5 * 256] = {
289 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef, 267 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
290 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf, 268 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
291 269
292// previous 1s = 1: 270/* previous 1s = 1: */
293 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f, 271 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
294 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f, 272 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
295 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f, 273 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
@@ -307,7 +285,7 @@ static const u16 stufftab[5 * 256] = {
307 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf, 285 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
308 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef, 286 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
309 287
310// previous 1s = 2: 288/* previous 1s = 2: */
311 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017, 289 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
312 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037, 290 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
313 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057, 291 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
@@ -325,7 +303,7 @@ static const u16 stufftab[5 * 256] = {
325 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7, 303 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
326 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7, 304 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
327 305
328// previous 1s = 3: 306/* previous 1s = 3: */
329 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b, 307 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
330 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b, 308 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
331 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b, 309 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
@@ -343,7 +321,7 @@ static const u16 stufftab[5 * 256] = {
343 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb, 321 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
344 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb, 322 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
345 323
346// previous 1s = 4: 324/* previous 1s = 4: */
347 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d, 325 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
348 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d, 326 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
349 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d, 327 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
@@ -367,7 +345,8 @@ static const u16 stufftab[5 * 256] = {
367 * parameters: 345 * parameters:
368 * cin input byte 346 * cin input byte
369 * ones number of trailing '1' bits in result before this step 347 * ones number of trailing '1' bits in result before this step
370 * iwb pointer to output buffer structure (write semaphore must be held) 348 * iwb pointer to output buffer structure
349 * (write semaphore must be held)
371 * return value: 350 * return value:
372 * number of trailing '1' bits in result after this step 351 * number of trailing '1' bits in result after this step
373 */ 352 */
@@ -408,7 +387,8 @@ static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
408 * parameters: 387 * parameters:
409 * in input buffer 388 * in input buffer
410 * count number of bytes in input buffer 389 * count number of bytes in input buffer
411 * iwb pointer to output buffer structure (write semaphore must be held) 390 * iwb pointer to output buffer structure
391 * (write semaphore must be held)
412 * return value: 392 * return value:
413 * position of end of packet in output buffer on success, 393 * position of end of packet in output buffer on success,
414 * -EAGAIN if write semaphore busy or buffer full 394 * -EAGAIN if write semaphore busy or buffer full
@@ -440,7 +420,8 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
440 fcs = crc_ccitt_byte(fcs, c); 420 fcs = crc_ccitt_byte(fcs, c);
441 } 421 }
442 422
443 /* bitstuff and append FCS (complemented, least significant byte first) */ 423 /* bitstuff and append FCS
424 * (complemented, least significant byte first) */
444 fcs ^= 0xffff; 425 fcs ^= 0xffff;
445 ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones); 426 ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
446 ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones); 427 ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
@@ -459,7 +440,8 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
459 * parameters: 440 * parameters:
460 * in input buffer 441 * in input buffer
461 * count number of bytes in input buffer 442 * count number of bytes in input buffer
462 * iwb pointer to output buffer structure (write semaphore must be held) 443 * iwb pointer to output buffer structure
444 * (write semaphore must be held)
463 * return value: 445 * return value:
464 * position of end of packet in output buffer on success, 446 * position of end of packet in output buffer on success,
465 * -EAGAIN if write semaphore busy or buffer full 447 * -EAGAIN if write semaphore busy or buffer full
@@ -567,8 +549,8 @@ static inline void hdlc_done(struct bc_state *bcs)
567 hdlc_flush(bcs); 549 hdlc_flush(bcs);
568 return; 550 return;
569 } 551 }
570 552 procskb = bcs->skb;
571 if ((procskb = bcs->skb) == NULL) { 553 if (procskb == NULL) {
572 /* previous error */ 554 /* previous error */
573 gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__); 555 gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
574 gigaset_isdn_rcv_err(bcs); 556 gigaset_isdn_rcv_err(bcs);
@@ -576,12 +558,12 @@ static inline void hdlc_done(struct bc_state *bcs)
576 dev_notice(cs->dev, "received short frame (%d octets)\n", 558 dev_notice(cs->dev, "received short frame (%d octets)\n",
577 procskb->len); 559 procskb->len);
578 bcs->hw.bas->runts++; 560 bcs->hw.bas->runts++;
579 dev_kfree_skb(procskb); 561 dev_kfree_skb_any(procskb);
580 gigaset_isdn_rcv_err(bcs); 562 gigaset_isdn_rcv_err(bcs);
581 } else if (bcs->fcs != PPP_GOODFCS) { 563 } else if (bcs->fcs != PPP_GOODFCS) {
582 dev_notice(cs->dev, "frame check error (0x%04x)\n", bcs->fcs); 564 dev_notice(cs->dev, "frame check error (0x%04x)\n", bcs->fcs);
583 bcs->hw.bas->fcserrs++; 565 bcs->hw.bas->fcserrs++;
584 dev_kfree_skb(procskb); 566 dev_kfree_skb_any(procskb);
585 gigaset_isdn_rcv_err(bcs); 567 gigaset_isdn_rcv_err(bcs);
586 } else { 568 } else {
587 len = procskb->len; 569 len = procskb->len;
@@ -646,8 +628,8 @@ static const unsigned char bitcounts[256] = {
646}; 628};
647 629
648/* hdlc_unpack 630/* hdlc_unpack
649 * perform HDLC frame processing (bit unstuffing, flag detection, FCS calculation) 631 * perform HDLC frame processing (bit unstuffing, flag detection, FCS
650 * on a sequence of received data bytes (8 bits each, LSB first) 632 * calculation) on a sequence of received data bytes (8 bits each, LSB first)
651 * pass on successfully received, complete frames as SKBs via gigaset_skb_rcvd 633 * pass on successfully received, complete frames as SKBs via gigaset_skb_rcvd
652 * notify of errors via gigaset_isdn_rcv_err 634 * notify of errors via gigaset_isdn_rcv_err
653 * tally frames, errors etc. in BC structure counters 635 * tally frames, errors etc. in BC structure counters
@@ -665,9 +647,12 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
665 647
666 /* load previous state: 648 /* load previous state:
667 * inputstate = set of flag bits: 649 * inputstate = set of flag bits:
668 * - INS_flag_hunt: no complete opening flag received since connection setup or last abort 650 * - INS_flag_hunt: no complete opening flag received since connection
669 * - INS_have_data: at least one complete data byte received since last flag 651 * setup or last abort
670 * seqlen = number of consecutive '1' bits in last 7 input stream bits (0..7) 652 * - INS_have_data: at least one complete data byte received since last
653 * flag
654 * seqlen = number of consecutive '1' bits in last 7 input stream bits
655 * (0..7)
671 * inbyte = accumulated partial data byte (if !INS_flag_hunt) 656 * inbyte = accumulated partial data byte (if !INS_flag_hunt)
672 * inbits = number of valid bits in inbyte, starting at LSB (0..6) 657 * inbits = number of valid bits in inbyte, starting at LSB (0..6)
673 */ 658 */
@@ -701,9 +686,11 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
701 inbyte = c >> (lead1 + 1); 686 inbyte = c >> (lead1 + 1);
702 inbits = 7 - lead1; 687 inbits = 7 - lead1;
703 if (trail1 >= 8) { 688 if (trail1 >= 8) {
704 /* interior stuffing: omitting the MSB handles most cases */ 689 /* interior stuffing:
690 * omitting the MSB handles most cases,
691 * correct the incorrectly handled
692 * cases individually */
705 inbits--; 693 inbits--;
706 /* correct the incorrectly handled cases individually */
707 switch (c) { 694 switch (c) {
708 case 0xbe: 695 case 0xbe:
709 inbyte = 0x3f; 696 inbyte = 0x3f;
@@ -729,13 +716,14 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
729 hdlc_flush(bcs); 716 hdlc_flush(bcs);
730 inputstate |= INS_flag_hunt; 717 inputstate |= INS_flag_hunt;
731 } else if (seqlen == 6) { 718 } else if (seqlen == 6) {
732 /* closing flag, including (6 - lead1) '1's and one '0' from inbits */ 719 /* closing flag, including (6 - lead1) '1's
720 * and one '0' from inbits */
733 if (inbits > 7 - lead1) { 721 if (inbits > 7 - lead1) {
734 hdlc_frag(bcs, inbits + lead1 - 7); 722 hdlc_frag(bcs, inbits + lead1 - 7);
735 inputstate &= ~INS_have_data; 723 inputstate &= ~INS_have_data;
736 } else { 724 } else {
737 if (inbits < 7 - lead1) 725 if (inbits < 7 - lead1)
738 ubc->stolen0s ++; 726 ubc->stolen0s++;
739 if (inputstate & INS_have_data) { 727 if (inputstate & INS_have_data) {
740 hdlc_done(bcs); 728 hdlc_done(bcs);
741 inputstate &= ~INS_have_data; 729 inputstate &= ~INS_have_data;
@@ -744,7 +732,7 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
744 732
745 if (c == PPP_FLAG) { 733 if (c == PPP_FLAG) {
746 /* complete flag, LSB overlaps preceding flag */ 734 /* complete flag, LSB overlaps preceding flag */
747 ubc->shared0s ++; 735 ubc->shared0s++;
748 inbits = 0; 736 inbits = 0;
749 inbyte = 0; 737 inbyte = 0;
750 } else if (trail1 != 7) { 738 } else if (trail1 != 7) {
@@ -752,9 +740,11 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
752 inbyte = c >> (lead1 + 1); 740 inbyte = c >> (lead1 + 1);
753 inbits = 7 - lead1; 741 inbits = 7 - lead1;
754 if (trail1 >= 8) { 742 if (trail1 >= 8) {
755 /* interior stuffing: omitting the MSB handles most cases */ 743 /* interior stuffing:
744 * omitting the MSB handles most cases,
745 * correct the incorrectly handled
746 * cases individually */
756 inbits--; 747 inbits--;
757 /* correct the incorrectly handled cases individually */
758 switch (c) { 748 switch (c) {
759 case 0xbe: 749 case 0xbe:
760 inbyte = 0x3f; 750 inbyte = 0x3f;
@@ -762,7 +752,8 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
762 } 752 }
763 } 753 }
764 } else { 754 } else {
765 /* abort sequence follows, skb already empty anyway */ 755 /* abort sequence follows,
756 * skb already empty anyway */
766 ubc->aborts++; 757 ubc->aborts++;
767 inputstate |= INS_flag_hunt; 758 inputstate |= INS_flag_hunt;
768 } 759 }
@@ -787,14 +778,17 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
787 } else { 778 } else {
788 /* stuffed data */ 779 /* stuffed data */
789 if (trail1 < 7) { /* => seqlen == 5 */ 780 if (trail1 < 7) { /* => seqlen == 5 */
790 /* stuff bit at position lead1, no interior stuffing */ 781 /* stuff bit at position lead1,
782 * no interior stuffing */
791 unsigned char mask = (1 << lead1) - 1; 783 unsigned char mask = (1 << lead1) - 1;
792 c = (c & mask) | ((c & ~mask) >> 1); 784 c = (c & mask) | ((c & ~mask) >> 1);
793 inbyte |= c << inbits; 785 inbyte |= c << inbits;
794 inbits += 7; 786 inbits += 7;
795 } else if (seqlen < 5) { /* trail1 >= 8 */ 787 } else if (seqlen < 5) { /* trail1 >= 8 */
796 /* interior stuffing: omitting the MSB handles most cases */ 788 /* interior stuffing:
797 /* correct the incorrectly handled cases individually */ 789 * omitting the MSB handles most cases,
790 * correct the incorrectly handled
791 * cases individually */
798 switch (c) { 792 switch (c) {
799 case 0xbe: 793 case 0xbe:
800 c = 0x7e; 794 c = 0x7e;
@@ -804,8 +798,9 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
804 inbits += 7; 798 inbits += 7;
805 } else { /* seqlen == 5 && trail1 >= 8 */ 799 } else { /* seqlen == 5 && trail1 >= 8 */
806 800
807 /* stuff bit at lead1 *and* interior stuffing */ 801 /* stuff bit at lead1 *and* interior
808 switch (c) { /* unstuff individually */ 802 * stuffing -- unstuff individually */
803 switch (c) {
809 case 0x7d: 804 case 0x7d:
810 c = 0x3f; 805 c = 0x3f;
811 break; 806 break;
@@ -862,7 +857,8 @@ static inline void trans_receive(unsigned char *src, unsigned count,
862 hdlc_flush(bcs); 857 hdlc_flush(bcs);
863 return; 858 return;
864 } 859 }
865 if (unlikely((skb = bcs->skb) == NULL)) { 860 skb = bcs->skb;
861 if (unlikely(skb == NULL)) {
866 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len); 862 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
867 if (!skb) { 863 if (!skb) {
868 dev_err(cs->dev, "could not allocate skb\n"); 864 dev_err(cs->dev, "could not allocate skb\n");
@@ -895,7 +891,8 @@ static inline void trans_receive(unsigned char *src, unsigned count,
895 } 891 }
896} 892}
897 893
898void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs) 894void gigaset_isoc_receive(unsigned char *src, unsigned count,
895 struct bc_state *bcs)
899{ 896{
900 switch (bcs->proto2) { 897 switch (bcs->proto2) {
901 case L2_HDLC: 898 case L2_HDLC:
@@ -985,7 +982,7 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
985 * Called by LL to queue an skb for sending, and start transmission if 982 * Called by LL to queue an skb for sending, and start transmission if
986 * necessary. 983 * necessary.
987 * Once the payload data has been transmitted completely, gigaset_skb_sent() 984 * Once the payload data has been transmitted completely, gigaset_skb_sent()
988 * will be called with the first cs->hw_hdr_len bytes of skb->head preserved. 985 * will be called with the skb's link layer header preserved.
989 * 986 *
990 * Return value: 987 * Return value:
991 * number of bytes accepted for sending (skb->len) if ok, 988 * number of bytes accepted for sending (skb->len) if ok,
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index 9715aad9c3f0..758a00c1d2e2 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -39,7 +39,7 @@ static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
39 return -EINVAL; 39 return -EINVAL;
40 40
41 if (mutex_lock_interruptible(&cs->mutex)) 41 if (mutex_lock_interruptible(&cs->mutex))
42 return -ERESTARTSYS; // FIXME -EINTR? 42 return -ERESTARTSYS;
43 43
44 cs->waiting = 1; 44 cs->waiting = 1;
45 if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE, 45 if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 3071a52467ed..ac3409ea5d99 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -164,9 +164,15 @@ static void gigaset_modem_fill(unsigned long data)
164{ 164{
165 struct cardstate *cs = (struct cardstate *) data; 165 struct cardstate *cs = (struct cardstate *) data;
166 struct bc_state *bcs; 166 struct bc_state *bcs;
167 struct sk_buff *nextskb;
167 int sent = 0; 168 int sent = 0;
168 169
169 if (!cs || !(bcs = cs->bcs)) { 170 if (!cs) {
171 gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
172 return;
173 }
174 bcs = cs->bcs;
175 if (!bcs) {
170 gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__); 176 gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
171 return; 177 return;
172 } 178 }
@@ -179,9 +185,11 @@ static void gigaset_modem_fill(unsigned long data)
179 return; 185 return;
180 186
181 /* no command to send; get skb */ 187 /* no command to send; get skb */
182 if (!(bcs->tx_skb = skb_dequeue(&bcs->squeue))) 188 nextskb = skb_dequeue(&bcs->squeue);
189 if (!nextskb)
183 /* no skb either, nothing to do */ 190 /* no skb either, nothing to do */
184 return; 191 return;
192 bcs->tx_skb = nextskb;
185 193
186 gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)", 194 gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)",
187 (unsigned long) bcs->tx_skb); 195 (unsigned long) bcs->tx_skb);
@@ -236,19 +244,20 @@ static void flush_send_queue(struct cardstate *cs)
236 * number of bytes queued, or error code < 0 244 * number of bytes queued, or error code < 0
237 */ 245 */
238static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, 246static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
239 int len, struct tasklet_struct *wake_tasklet) 247 int len, struct tasklet_struct *wake_tasklet)
240{ 248{
241 struct cmdbuf_t *cb; 249 struct cmdbuf_t *cb;
242 unsigned long flags; 250 unsigned long flags;
243 251
244 gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? 252 gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
245 DEBUG_TRANSCMD : DEBUG_LOCKCMD, 253 DEBUG_TRANSCMD : DEBUG_LOCKCMD,
246 "CMD Transmit", len, buf); 254 "CMD Transmit", len, buf);
247 255
248 if (len <= 0) 256 if (len <= 0)
249 return 0; 257 return 0;
250 258
251 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 259 cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
260 if (!cb) {
252 dev_err(cs->dev, "%s: out of memory!\n", __func__); 261 dev_err(cs->dev, "%s: out of memory!\n", __func__);
253 return -ENOMEM; 262 return -ENOMEM;
254 } 263 }
@@ -392,7 +401,6 @@ static void gigaset_device_release(struct device *dev)
392 struct platform_device *pdev = to_platform_device(dev); 401 struct platform_device *pdev = to_platform_device(dev);
393 402
394 /* adapted from platform_device_release() in drivers/base/platform.c */ 403 /* adapted from platform_device_release() in drivers/base/platform.c */
395 //FIXME is this actually necessary?
396 kfree(dev->platform_data); 404 kfree(dev->platform_data);
397 kfree(pdev->resource); 405 kfree(pdev->resource);
398} 406}
@@ -404,16 +412,20 @@ static void gigaset_device_release(struct device *dev)
404static int gigaset_initcshw(struct cardstate *cs) 412static int gigaset_initcshw(struct cardstate *cs)
405{ 413{
406 int rc; 414 int rc;
415 struct ser_cardstate *scs;
407 416
408 if (!(cs->hw.ser = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL))) { 417 scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
418 if (!scs) {
409 pr_err("out of memory\n"); 419 pr_err("out of memory\n");
410 return 0; 420 return 0;
411 } 421 }
422 cs->hw.ser = scs;
412 423
413 cs->hw.ser->dev.name = GIGASET_MODULENAME; 424 cs->hw.ser->dev.name = GIGASET_MODULENAME;
414 cs->hw.ser->dev.id = cs->minor_index; 425 cs->hw.ser->dev.id = cs->minor_index;
415 cs->hw.ser->dev.dev.release = gigaset_device_release; 426 cs->hw.ser->dev.dev.release = gigaset_device_release;
416 if ((rc = platform_device_register(&cs->hw.ser->dev)) != 0) { 427 rc = platform_device_register(&cs->hw.ser->dev);
428 if (rc != 0) {
417 pr_err("error %d registering platform device\n", rc); 429 pr_err("error %d registering platform device\n", rc);
418 kfree(cs->hw.ser); 430 kfree(cs->hw.ser);
419 cs->hw.ser = NULL; 431 cs->hw.ser = NULL;
@@ -422,7 +434,7 @@ static int gigaset_initcshw(struct cardstate *cs)
422 dev_set_drvdata(&cs->hw.ser->dev.dev, cs); 434 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
423 435
424 tasklet_init(&cs->write_tasklet, 436 tasklet_init(&cs->write_tasklet,
425 &gigaset_modem_fill, (unsigned long) cs); 437 &gigaset_modem_fill, (unsigned long) cs);
426 return 1; 438 return 1;
427} 439}
428 440
@@ -434,7 +446,8 @@ static int gigaset_initcshw(struct cardstate *cs)
434 * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c 446 * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c
435 * and by "if_lock" and "if_termios" in interface.c 447 * and by "if_lock" and "if_termios" in interface.c
436 */ 448 */
437static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, unsigned new_state) 449static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
450 unsigned new_state)
438{ 451{
439 struct tty_struct *tty = cs->hw.ser->tty; 452 struct tty_struct *tty = cs->hw.ser->tty;
440 unsigned int set, clear; 453 unsigned int set, clear;
@@ -520,8 +533,8 @@ gigaset_tty_open(struct tty_struct *tty)
520 } 533 }
521 534
522 /* allocate memory for our device state and intialize it */ 535 /* allocate memory for our device state and intialize it */
523 if (!(cs = gigaset_initcs(driver, 1, 1, 0, cidmode, 536 cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
524 GIGASET_MODULENAME))) 537 if (!cs)
525 goto error; 538 goto error;
526 539
527 cs->dev = &cs->hw.ser->dev.dev; 540 cs->dev = &cs->hw.ser->dev.dev;
@@ -690,7 +703,8 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
690 703
691 if (!cs) 704 if (!cs)
692 return; 705 return;
693 if (!(inbuf = cs->inbuf)) { 706 inbuf = cs->inbuf;
707 if (!inbuf) {
694 dev_err(cs->dev, "%s: no inbuf\n", __func__); 708 dev_err(cs->dev, "%s: no inbuf\n", __func__);
695 cs_put(cs); 709 cs_put(cs);
696 return; 710 return;
@@ -770,18 +784,21 @@ static int __init ser_gigaset_init(void)
770 int rc; 784 int rc;
771 785
772 gig_dbg(DEBUG_INIT, "%s", __func__); 786 gig_dbg(DEBUG_INIT, "%s", __func__);
773 if ((rc = platform_driver_register(&device_driver)) != 0) { 787 rc = platform_driver_register(&device_driver);
788 if (rc != 0) {
774 pr_err("error %d registering platform driver\n", rc); 789 pr_err("error %d registering platform driver\n", rc);
775 return rc; 790 return rc;
776 } 791 }
777 792
778 /* allocate memory for our driver state and intialize it */ 793 /* allocate memory for our driver state and intialize it */
779 if (!(driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 794 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
780 GIGASET_MODULENAME, GIGASET_DEVNAME, 795 GIGASET_MODULENAME, GIGASET_DEVNAME,
781 &ops, THIS_MODULE))) 796 &ops, THIS_MODULE);
797 if (!driver)
782 goto error; 798 goto error;
783 799
784 if ((rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc)) != 0) { 800 rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
801 if (rc != 0) {
785 pr_err("error %d registering line discipline\n", rc); 802 pr_err("error %d registering line discipline\n", rc);
786 goto error; 803 goto error;
787 } 804 }
@@ -808,7 +825,8 @@ static void __exit ser_gigaset_exit(void)
808 driver = NULL; 825 driver = NULL;
809 } 826 }
810 827
811 if ((rc = tty_unregister_ldisc(N_GIGASET_M101)) != 0) 828 rc = tty_unregister_ldisc(N_GIGASET_M101);
829 if (rc != 0)
812 pr_err("error %d unregistering line discipline\n", rc); 830 pr_err("error %d unregistering line discipline\n", rc);
813 831
814 platform_driver_unregister(&device_driver); 832 platform_driver_unregister(&device_driver);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 4deb1ab0dbf8..f56b2a83793e 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -43,14 +43,14 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode");
43#define GIGASET_MODULENAME "usb_gigaset" 43#define GIGASET_MODULENAME "usb_gigaset"
44#define GIGASET_DEVNAME "ttyGU" 44#define GIGASET_DEVNAME "ttyGU"
45 45
46#define IF_WRITEBUF 2000 //FIXME // WAKEUP_CHARS: 256 46#define IF_WRITEBUF 2000 /* arbitrary limit */
47 47
48/* Values for the Gigaset M105 Data */ 48/* Values for the Gigaset M105 Data */
49#define USB_M105_VENDOR_ID 0x0681 49#define USB_M105_VENDOR_ID 0x0681
50#define USB_M105_PRODUCT_ID 0x0009 50#define USB_M105_PRODUCT_ID 0x0009
51 51
52/* table of devices that work with this driver */ 52/* table of devices that work with this driver */
53static const struct usb_device_id gigaset_table [] = { 53static const struct usb_device_id gigaset_table[] = {
54 { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) }, 54 { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
55 { } /* Terminating entry */ 55 { } /* Terminating entry */
56}; 56};
@@ -97,8 +97,8 @@ MODULE_DEVICE_TABLE(usb, gigaset_table);
97 * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13 97 * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
98 * Used after every "configuration sequence" (RQ 12, RQs 01/03/13). 98 * Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
99 * xx is usually 0x00 but was 0x7e before starting data transfer 99 * xx is usually 0x00 but was 0x7e before starting data transfer
100 * in unimodem mode. So, this might be an array of characters that need 100 * in unimodem mode. So, this might be an array of characters that
101 * special treatment ("commit all bufferd data"?), 11=^Q, 13=^S. 101 * need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
102 * 102 *
103 * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two 103 * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
104 * flags per packet. 104 * flags per packet.
@@ -114,7 +114,7 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
114static int gigaset_resume(struct usb_interface *intf); 114static int gigaset_resume(struct usb_interface *intf);
115static int gigaset_pre_reset(struct usb_interface *intf); 115static int gigaset_pre_reset(struct usb_interface *intf);
116 116
117static struct gigaset_driver *driver = NULL; 117static struct gigaset_driver *driver;
118 118
119/* usb specific object needed to register this driver with the usb subsystem */ 119/* usb specific object needed to register this driver with the usb subsystem */
120static struct usb_driver gigaset_usb_driver = { 120static struct usb_driver gigaset_usb_driver = {
@@ -141,6 +141,7 @@ struct usb_cardstate {
141 struct urb *bulk_out_urb; 141 struct urb *bulk_out_urb;
142 142
143 /* Input buffer */ 143 /* Input buffer */
144 unsigned char *rcvbuf;
144 int rcvbuf_size; 145 int rcvbuf_size;
145 struct urb *read_urb; 146 struct urb *read_urb;
146 __u8 int_in_endpointAddr; 147 __u8 int_in_endpointAddr;
@@ -164,13 +165,11 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
164 val = tiocm_to_gigaset(new_state); 165 val = tiocm_to_gigaset(new_state);
165 166
166 gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask); 167 gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
167 // don't use this in an interrupt/BH
168 r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41, 168 r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41,
169 (val & 0xff) | ((mask & 0xff) << 8), 0, 169 (val & 0xff) | ((mask & 0xff) << 8), 0,
170 NULL, 0, 2000 /* timeout? */); 170 NULL, 0, 2000 /* timeout? */);
171 if (r < 0) 171 if (r < 0)
172 return r; 172 return r;
173 //..
174 return 0; 173 return 0;
175} 174}
176 175
@@ -220,7 +219,6 @@ static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
220 cflag &= CBAUD; 219 cflag &= CBAUD;
221 220
222 switch (cflag) { 221 switch (cflag) {
223 //FIXME more values?
224 case B300: rate = 300; break; 222 case B300: rate = 300; break;
225 case B600: rate = 600; break; 223 case B600: rate = 600; break;
226 case B1200: rate = 1200; break; 224 case B1200: rate = 1200; break;
@@ -273,7 +271,7 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
273 /* set the number of stop bits */ 271 /* set the number of stop bits */
274 if (cflag & CSTOPB) { 272 if (cflag & CSTOPB) {
275 if ((cflag & CSIZE) == CS5) 273 if ((cflag & CSIZE) == CS5)
276 val |= 1; /* 1.5 stop bits */ //FIXME is this okay? 274 val |= 1; /* 1.5 stop bits */
277 else 275 else
278 val |= 2; /* 2 stop bits */ 276 val |= 2; /* 2 stop bits */
279 } 277 }
@@ -282,7 +280,7 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
282} 280}
283 281
284 282
285 /*================================================================================================================*/ 283/*============================================================================*/
286static int gigaset_init_bchannel(struct bc_state *bcs) 284static int gigaset_init_bchannel(struct bc_state *bcs)
287{ 285{
288 /* nothing to do for M10x */ 286 /* nothing to do for M10x */
@@ -344,7 +342,6 @@ static void gigaset_modem_fill(unsigned long data)
344 if (write_modem(cs) < 0) { 342 if (write_modem(cs) < 0) {
345 gig_dbg(DEBUG_OUTPUT, 343 gig_dbg(DEBUG_OUTPUT,
346 "modem_fill: write_modem failed"); 344 "modem_fill: write_modem failed");
347 // FIXME should we tell the LL?
348 again = 1; /* no callback will be called! */ 345 again = 1; /* no callback will be called! */
349 } 346 }
350 } 347 }
@@ -356,8 +353,8 @@ static void gigaset_modem_fill(unsigned long data)
356 */ 353 */
357static void gigaset_read_int_callback(struct urb *urb) 354static void gigaset_read_int_callback(struct urb *urb)
358{ 355{
359 struct inbuf_t *inbuf = urb->context; 356 struct cardstate *cs = urb->context;
360 struct cardstate *cs = inbuf->cs; 357 struct inbuf_t *inbuf = cs->inbuf;
361 int status = urb->status; 358 int status = urb->status;
362 int r; 359 int r;
363 unsigned numbytes; 360 unsigned numbytes;
@@ -368,7 +365,7 @@ static void gigaset_read_int_callback(struct urb *urb)
368 numbytes = urb->actual_length; 365 numbytes = urb->actual_length;
369 366
370 if (numbytes) { 367 if (numbytes) {
371 src = inbuf->rcvbuf; 368 src = cs->hw.usb->rcvbuf;
372 if (unlikely(*src)) 369 if (unlikely(*src))
373 dev_warn(cs->dev, 370 dev_warn(cs->dev,
374 "%s: There was no leading 0, but 0x%02x!\n", 371 "%s: There was no leading 0, but 0x%02x!\n",
@@ -440,7 +437,7 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
440 struct cmdbuf_t *tcb; 437 struct cmdbuf_t *tcb;
441 unsigned long flags; 438 unsigned long flags;
442 int count; 439 int count;
443 int status = -ENOENT; // FIXME 440 int status = -ENOENT;
444 struct usb_cardstate *ucs = cs->hw.usb; 441 struct usb_cardstate *ucs = cs->hw.usb;
445 442
446 do { 443 do {
@@ -480,7 +477,9 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
480 ucs->busy = 1; 477 ucs->busy = 1;
481 478
482 spin_lock_irqsave(&cs->lock, flags); 479 spin_lock_irqsave(&cs->lock, flags);
483 status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV; 480 status = cs->connected ?
481 usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) :
482 -ENODEV;
484 spin_unlock_irqrestore(&cs->lock, flags); 483 spin_unlock_irqrestore(&cs->lock, flags);
485 484
486 if (status) { 485 if (status) {
@@ -510,8 +509,8 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
510 509
511 if (len <= 0) 510 if (len <= 0)
512 return 0; 511 return 0;
513 512 cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
514 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 513 if (!cb) {
515 dev_err(cs->dev, "%s: out of memory\n", __func__); 514 dev_err(cs->dev, "%s: out of memory\n", __func__);
516 return -ENOMEM; 515 return -ENOMEM;
517 } 516 }
@@ -637,9 +636,7 @@ static int write_modem(struct cardstate *cs)
637 return -EINVAL; 636 return -EINVAL;
638 } 637 }
639 638
640 /* Copy data to bulk out buffer and // FIXME copying not necessary 639 /* Copy data to bulk out buffer and transmit data */
641 * transmit data
642 */
643 count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); 640 count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
644 skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count); 641 skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
645 skb_pull(bcs->tx_skb, count); 642 skb_pull(bcs->tx_skb, count);
@@ -650,7 +647,8 @@ static int write_modem(struct cardstate *cs)
650 if (cs->connected) { 647 if (cs->connected) {
651 usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, 648 usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
652 usb_sndbulkpipe(ucs->udev, 649 usb_sndbulkpipe(ucs->udev,
653 ucs->bulk_out_endpointAddr & 0x0f), 650 ucs->bulk_out_endpointAddr &
651 0x0f),
654 ucs->bulk_out_buffer, count, 652 ucs->bulk_out_buffer, count,
655 gigaset_write_bulk_callback, cs); 653 gigaset_write_bulk_callback, cs);
656 ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC); 654 ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
@@ -666,7 +664,7 @@ static int write_modem(struct cardstate *cs)
666 664
667 if (!bcs->tx_skb->len) { 665 if (!bcs->tx_skb->len) {
668 /* skb sent completely */ 666 /* skb sent completely */
669 gigaset_skb_sent(bcs, bcs->tx_skb); //FIXME also, when ret<0? 667 gigaset_skb_sent(bcs, bcs->tx_skb);
670 668
671 gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!", 669 gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
672 (unsigned long) bcs->tx_skb); 670 (unsigned long) bcs->tx_skb);
@@ -763,8 +761,8 @@ static int gigaset_probe(struct usb_interface *interface,
763 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); 761 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
764 ucs->rcvbuf_size = buffer_size; 762 ucs->rcvbuf_size = buffer_size;
765 ucs->int_in_endpointAddr = endpoint->bEndpointAddress; 763 ucs->int_in_endpointAddr = endpoint->bEndpointAddress;
766 cs->inbuf[0].rcvbuf = kmalloc(buffer_size, GFP_KERNEL); 764 ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
767 if (!cs->inbuf[0].rcvbuf) { 765 if (!ucs->rcvbuf) {
768 dev_err(cs->dev, "Couldn't allocate rcvbuf\n"); 766 dev_err(cs->dev, "Couldn't allocate rcvbuf\n");
769 retval = -ENOMEM; 767 retval = -ENOMEM;
770 goto error; 768 goto error;
@@ -773,9 +771,9 @@ static int gigaset_probe(struct usb_interface *interface,
773 usb_fill_int_urb(ucs->read_urb, udev, 771 usb_fill_int_urb(ucs->read_urb, udev,
774 usb_rcvintpipe(udev, 772 usb_rcvintpipe(udev,
775 endpoint->bEndpointAddress & 0x0f), 773 endpoint->bEndpointAddress & 0x0f),
776 cs->inbuf[0].rcvbuf, buffer_size, 774 ucs->rcvbuf, buffer_size,
777 gigaset_read_int_callback, 775 gigaset_read_int_callback,
778 cs->inbuf + 0, endpoint->bInterval); 776 cs, endpoint->bInterval);
779 777
780 retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL); 778 retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
781 if (retval) { 779 if (retval) {
@@ -789,7 +787,7 @@ static int gigaset_probe(struct usb_interface *interface,
789 787
790 if (!gigaset_start(cs)) { 788 if (!gigaset_start(cs)) {
791 tasklet_kill(&cs->write_tasklet); 789 tasklet_kill(&cs->write_tasklet);
792 retval = -ENODEV; //FIXME 790 retval = -ENODEV;
793 goto error; 791 goto error;
794 } 792 }
795 return 0; 793 return 0;
@@ -798,11 +796,11 @@ error:
798 usb_kill_urb(ucs->read_urb); 796 usb_kill_urb(ucs->read_urb);
799 kfree(ucs->bulk_out_buffer); 797 kfree(ucs->bulk_out_buffer);
800 usb_free_urb(ucs->bulk_out_urb); 798 usb_free_urb(ucs->bulk_out_urb);
801 kfree(cs->inbuf[0].rcvbuf); 799 kfree(ucs->rcvbuf);
802 usb_free_urb(ucs->read_urb); 800 usb_free_urb(ucs->read_urb);
803 usb_set_intfdata(interface, NULL); 801 usb_set_intfdata(interface, NULL);
804 ucs->read_urb = ucs->bulk_out_urb = NULL; 802 ucs->read_urb = ucs->bulk_out_urb = NULL;
805 cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL; 803 ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
806 usb_put_dev(ucs->udev); 804 usb_put_dev(ucs->udev);
807 ucs->udev = NULL; 805 ucs->udev = NULL;
808 ucs->interface = NULL; 806 ucs->interface = NULL;
@@ -831,10 +829,10 @@ static void gigaset_disconnect(struct usb_interface *interface)
831 829
832 kfree(ucs->bulk_out_buffer); 830 kfree(ucs->bulk_out_buffer);
833 usb_free_urb(ucs->bulk_out_urb); 831 usb_free_urb(ucs->bulk_out_urb);
834 kfree(cs->inbuf[0].rcvbuf); 832 kfree(ucs->rcvbuf);
835 usb_free_urb(ucs->read_urb); 833 usb_free_urb(ucs->read_urb);
836 ucs->read_urb = ucs->bulk_out_urb = NULL; 834 ucs->read_urb = ucs->bulk_out_urb = NULL;
837 cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL; 835 ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
838 836
839 usb_put_dev(ucs->udev); 837 usb_put_dev(ucs->udev);
840 ucs->interface = NULL; 838 ucs->interface = NULL;
@@ -916,9 +914,10 @@ static int __init usb_gigaset_init(void)
916 int result; 914 int result;
917 915
918 /* allocate memory for our driver state and intialize it */ 916 /* allocate memory for our driver state and intialize it */
919 if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 917 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
920 GIGASET_MODULENAME, GIGASET_DEVNAME, 918 GIGASET_MODULENAME, GIGASET_DEVNAME,
921 &ops, THIS_MODULE)) == NULL) 919 &ops, THIS_MODULE);
920 if (driver == NULL)
922 goto error; 921 goto error;
923 922
924 /* register this driver with the USB subsystem */ 923 /* register this driver with the USB subsystem */
diff --git a/drivers/isdn/hardware/eicon/maintidi.c b/drivers/isdn/hardware/eicon/maintidi.c
index 23960cb6eaab..41c26e756452 100644
--- a/drivers/isdn/hardware/eicon/maintidi.c
+++ b/drivers/isdn/hardware/eicon/maintidi.c
@@ -959,8 +959,9 @@ static int process_idi_event (diva_strace_context_t* pLib,
959 } 959 }
960 if (!strncmp("State\\Layer2 No1", path, pVar->path_length)) { 960 if (!strncmp("State\\Layer2 No1", path, pVar->path_length)) {
961 char* tmp = &pLib->lines[0].pInterface->Layer2[0]; 961 char* tmp = &pLib->lines[0].pInterface->Layer2[0];
962 dword l2_state; 962 dword l2_state;
963 diva_strace_read_uint (pVar, &l2_state); 963 if (diva_strace_read_uint(pVar, &l2_state))
964 return -1;
964 965
965 switch (l2_state) { 966 switch (l2_state) {
966 case 0: 967 case 0:
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 27d5dd68f4fb..ae89fb89da64 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -2692,7 +2692,7 @@ static byte connect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
2692 if (!(fax_control_bits & T30_CONTROL_BIT_MORE_DOCUMENTS) 2692 if (!(fax_control_bits & T30_CONTROL_BIT_MORE_DOCUMENTS)
2693 || (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS)) 2693 || (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS))
2694 { 2694 {
2695 len = (byte)(&(((T30_INFO *) 0)->universal_6)); 2695 len = offsetof(T30_INFO, universal_6);
2696 fax_info_change = false; 2696 fax_info_change = false;
2697 if (ncpi->length >= 4) 2697 if (ncpi->length >= 4)
2698 { 2698 {
@@ -2754,7 +2754,7 @@ static byte connect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
2754 for (i = 0; i < w; i++) 2754 for (i = 0; i < w; i++)
2755 ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id[i] = fax_parms[4].info[1+i]; 2755 ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id[i] = fax_parms[4].info[1+i];
2756 ((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0; 2756 ((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
2757 len = (byte)(((T30_INFO *) 0)->station_id + 20); 2757 len = offsetof(T30_INFO, station_id) + 20;
2758 w = fax_parms[5].length; 2758 w = fax_parms[5].length;
2759 if (w > 20) 2759 if (w > 20)
2760 w = 20; 2760 w = 20;
@@ -2788,7 +2788,7 @@ static byte connect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
2788 } 2788 }
2789 else 2789 else
2790 { 2790 {
2791 len = (byte)(&(((T30_INFO *) 0)->universal_6)); 2791 len = offsetof(T30_INFO, universal_6);
2792 } 2792 }
2793 fax_info_change = true; 2793 fax_info_change = true;
2794 2794
@@ -2892,7 +2892,7 @@ static byte connect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
2892 && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF) 2892 && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
2893 && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP)) 2893 && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
2894 { 2894 {
2895 len = ((byte)(((T30_INFO *) 0)->station_id + 20)); 2895 len = offsetof(T30_INFO, station_id) + 20;
2896 if (plci->fax_connect_info_length < len) 2896 if (plci->fax_connect_info_length < len)
2897 { 2897 {
2898 ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0; 2898 ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
@@ -3802,7 +3802,7 @@ static byte manufacturer_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
3802 break; 3802 break;
3803 } 3803 }
3804 ncpi = &m_parms[1]; 3804 ncpi = &m_parms[1];
3805 len = ((byte)(((T30_INFO *) 0)->station_id + 20)); 3805 len = offsetof(T30_INFO, station_id) + 20;
3806 if (plci->fax_connect_info_length < len) 3806 if (plci->fax_connect_info_length < len)
3807 { 3807 {
3808 ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0; 3808 ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
@@ -6844,7 +6844,7 @@ static void nl_ind(PLCI *plci)
6844 if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id-1]) 6844 if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id-1])
6845 & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD))) 6845 & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
6846 { 6846 {
6847 i = ((word)(((T30_INFO *) 0)->station_id + 20)) + ((T30_INFO *)plci->NL.RBuffer->P)->head_line_len; 6847 i = offsetof(T30_INFO, station_id) + 20 + ((T30_INFO *)plci->NL.RBuffer->P)->head_line_len;
6848 while (i < plci->NL.RBuffer->length) 6848 while (i < plci->NL.RBuffer->length)
6849 plci->ncpi_buffer[++len] = plci->NL.RBuffer->P[i++]; 6849 plci->ncpi_buffer[++len] = plci->NL.RBuffer->P[i++];
6850 } 6850 }
@@ -7236,7 +7236,7 @@ static void nl_ind(PLCI *plci)
7236 { 7236 {
7237 plci->RData[1].P = plci->RData[0].P; 7237 plci->RData[1].P = plci->RData[0].P;
7238 plci->RData[1].PLength = plci->RData[0].PLength; 7238 plci->RData[1].PLength = plci->RData[0].PLength;
7239 plci->RData[0].P = v120_header_buffer + (-((int) v120_header_buffer) & 3); 7239 plci->RData[0].P = v120_header_buffer + (-((unsigned long)v120_header_buffer) & 3);
7240 if ((plci->NL.RBuffer->P[0] & V120_HEADER_EXTEND_BIT) || (plci->NL.RLength == 1)) 7240 if ((plci->NL.RBuffer->P[0] & V120_HEADER_EXTEND_BIT) || (plci->NL.RLength == 1))
7241 plci->RData[0].PLength = 1; 7241 plci->RData[0].PLength = 1;
7242 else 7242 else
@@ -8473,7 +8473,7 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
8473 fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING; 8473 fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING;
8474 } 8474 }
8475 len = nlc[0]; 8475 len = nlc[0];
8476 pos = ((byte)(((T30_INFO *) 0)->station_id + 20)); 8476 pos = offsetof(T30_INFO, station_id) + 20;
8477 if (pos < plci->fax_connect_info_length) 8477 if (pos < plci->fax_connect_info_length)
8478 { 8478 {
8479 for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--) 8479 for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
@@ -8525,7 +8525,7 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
8525 } 8525 }
8526 8526
8527 PUT_WORD(&(((T30_INFO *)&nlc[1])->control_bits_low), fax_control_bits); 8527 PUT_WORD(&(((T30_INFO *)&nlc[1])->control_bits_low), fax_control_bits);
8528 len = ((byte)(((T30_INFO *) 0)->station_id + 20)); 8528 len = offsetof(T30_INFO, station_id) + 20;
8529 for (i = 0; i < len; i++) 8529 for (i = 0; i < len; i++)
8530 plci->fax_connect_info_buffer[i] = nlc[1+i]; 8530 plci->fax_connect_info_buffer[i] = nlc[1+i];
8531 ((T30_INFO *) plci->fax_connect_info_buffer)->head_line_len = 0; 8531 ((T30_INFO *) plci->fax_connect_info_buffer)->head_line_len = 0;
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index bf526a7a63af..d6fdf1f66754 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -594,6 +594,7 @@ Amd7930_l1hw(struct PStack *st, int pr, void *arg)
594 if (cs->debug & L1_DEB_WARN) 594 if (cs->debug & L1_DEB_WARN)
595 debugl1(cs, "Amd7930: l1hw: l2l1 tx_skb exist this shouldn't happen"); 595 debugl1(cs, "Amd7930: l1hw: l2l1 tx_skb exist this shouldn't happen");
596 skb_queue_tail(&cs->sq, skb); 596 skb_queue_tail(&cs->sq, skb);
597 spin_unlock_irqrestore(&cs->lock, flags);
597 break; 598 break;
598 } 599 }
599 if (cs->debug & DEB_DLOG_HEX) 600 if (cs->debug & DEB_DLOG_HEX)
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 018bd293e580..0b0c2e5d806b 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -382,7 +382,7 @@ MemwaitforXFW(struct IsdnCardState *cs, int hscx)
382{ 382{
383 int to = 50; 383 int to = 50;
384 384
385 while ((!(MemReadHSCX(cs, hscx, HSCX_STAR) & 0x44) == 0x40) && to) { 385 while (((MemReadHSCX(cs, hscx, HSCX_STAR) & 0x44) != 0x40) && to) {
386 udelay(1); 386 udelay(1);
387 to--; 387 to--;
388 } 388 }
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 9de54202c90c..a420b64472e3 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -817,8 +817,8 @@ collect_rx_frame(usb_fifo * fifo, __u8 * data, int len, int finish)
817 } 817 }
818 /* we have a complete hdlc packet */ 818 /* we have a complete hdlc packet */
819 if (finish) { 819 if (finish) {
820 if ((!fifo->skbuff->data[fifo->skbuff->len - 1]) 820 if (fifo->skbuff->len > 3 &&
821 && (fifo->skbuff->len > 3)) { 821 !fifo->skbuff->data[fifo->skbuff->len - 1]) {
822 822
823 if (fifon == HFCUSB_D_RX) { 823 if (fifon == HFCUSB_D_RX) {
824 DBG(HFCUSB_DBG_DCHANNEL, 824 DBG(HFCUSB_DBG_DCHANNEL,
diff --git a/drivers/isdn/hisax/hscx_irq.c b/drivers/isdn/hisax/hscx_irq.c
index 7b1ad5e4ecda..2387d76c721a 100644
--- a/drivers/isdn/hisax/hscx_irq.c
+++ b/drivers/isdn/hisax/hscx_irq.c
@@ -32,7 +32,7 @@ waitforXFW(struct IsdnCardState *cs, int hscx)
32{ 32{
33 int to = 50; 33 int to = 50;
34 34
35 while ((!(READHSCX(cs, hscx, HSCX_STAR) & 0x44) == 0x40) && to) { 35 while (((READHSCX(cs, hscx, HSCX_STAR) & 0x44) != 0x40) && to) {
36 udelay(1); 36 udelay(1);
37 to--; 37 to--;
38 } 38 }
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index 9aba646ba221..c80cbb8a2ef9 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -468,6 +468,7 @@ ICC_l1hw(struct PStack *st, int pr, void *arg)
468 if (cs->debug & L1_DEB_WARN) 468 if (cs->debug & L1_DEB_WARN)
469 debugl1(cs, " l2l1 tx_skb exist this shouldn't happen"); 469 debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
470 skb_queue_tail(&cs->sq, skb); 470 skb_queue_tail(&cs->sq, skb);
471 spin_unlock_irqrestore(&cs->lock, flags);
471 break; 472 break;
472 } 473 }
473 if (cs->debug & DEB_DLOG_HEX) 474 if (cs->debug & DEB_DLOG_HEX)
diff --git a/drivers/isdn/i4l/isdn_net.h b/drivers/isdn/i4l/isdn_net.h
index 74032d0881ef..7511f08effa5 100644
--- a/drivers/isdn/i4l/isdn_net.h
+++ b/drivers/isdn/i4l/isdn_net.h
@@ -83,19 +83,19 @@ static __inline__ isdn_net_local * isdn_net_get_locked_lp(isdn_net_dev *nd)
83 83
84 spin_lock_irqsave(&nd->queue_lock, flags); 84 spin_lock_irqsave(&nd->queue_lock, flags);
85 lp = nd->queue; /* get lp on top of queue */ 85 lp = nd->queue; /* get lp on top of queue */
86 spin_lock(&nd->queue->xmit_lock);
87 while (isdn_net_lp_busy(nd->queue)) { 86 while (isdn_net_lp_busy(nd->queue)) {
88 spin_unlock(&nd->queue->xmit_lock);
89 nd->queue = nd->queue->next; 87 nd->queue = nd->queue->next;
90 if (nd->queue == lp) { /* not found -- should never happen */ 88 if (nd->queue == lp) { /* not found -- should never happen */
91 lp = NULL; 89 lp = NULL;
92 goto errout; 90 goto errout;
93 } 91 }
94 spin_lock(&nd->queue->xmit_lock);
95 } 92 }
96 lp = nd->queue; 93 lp = nd->queue;
97 nd->queue = nd->queue->next; 94 nd->queue = nd->queue->next;
95 spin_unlock_irqrestore(&nd->queue_lock, flags);
96 spin_lock(&lp->xmit_lock);
98 local_bh_disable(); 97 local_bh_disable();
98 return lp;
99errout: 99errout:
100 spin_unlock_irqrestore(&nd->queue_lock, flags); 100 spin_unlock_irqrestore(&nd->queue_lock, flags);
101 return lp; 101 return lp;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 28182ed8dea1..fcfe17a19a61 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -779,7 +779,7 @@ base_sock_create(struct net *net, struct socket *sock, int protocol)
779} 779}
780 780
781static int 781static int
782mISDN_sock_create(struct net *net, struct socket *sock, int proto) 782mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
783{ 783{
784 int err = -EPROTONOSUPPORT; 784 int err = -EPROTONOSUPPORT;
785 785
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 3e1532a180ff..0d05ec43012c 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -364,7 +364,7 @@ add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
364static int 364static int
365st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) 365st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
366{ 366{
367 if (!ch->st || ch->st->layer1) 367 if (!ch->st || !ch->st->layer1)
368 return -EINVAL; 368 return -EINVAL;
369 return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg); 369 return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
370} 370}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index df1f86b5c83e..a2ea383105a6 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -249,5 +249,6 @@ config EP93XX_PWM
249source "drivers/misc/c2port/Kconfig" 249source "drivers/misc/c2port/Kconfig"
250source "drivers/misc/eeprom/Kconfig" 250source "drivers/misc/eeprom/Kconfig"
251source "drivers/misc/cb710/Kconfig" 251source "drivers/misc/cb710/Kconfig"
252source "drivers/misc/iwmc3200top/Kconfig"
252 253
253endif # MISC_DEVICES 254endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f982d2ecfde7..e311267a355f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_HP_ILO) += hpilo.o
21obj-$(CONFIG_ISL29003) += isl29003.o 21obj-$(CONFIG_ISL29003) += isl29003.o
22obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o 22obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
23obj-$(CONFIG_C2PORT) += c2port/ 23obj-$(CONFIG_C2PORT) += c2port/
24obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
24obj-y += eeprom/ 25obj-y += eeprom/
25obj-y += cb710/ 26obj-y += cb710/
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig
new file mode 100644
index 000000000000..9e4b88fb57f1
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Kconfig
@@ -0,0 +1,20 @@
1config IWMC3200TOP
2 tristate "Intel Wireless MultiCom Top Driver"
3 depends on MMC && EXPERIMENTAL
4 select FW_LOADER
5 ---help---
6 Intel Wireless MultiCom 3200 Top driver is responsible for
7 for firmware load and enabled coms enumeration
8
9config IWMC3200TOP_DEBUG
10 bool "Enable full debug output of iwmc3200top Driver"
11 depends on IWMC3200TOP
12 ---help---
13 Enable full debug output of iwmc3200top Driver
14
15config IWMC3200TOP_DEBUGFS
16 bool "Enable Debugfs debugging interface for iwmc3200top"
17 depends on IWMC3200TOP
18 ---help---
19 Enable creation of debugfs files for iwmc3200top
20
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile
new file mode 100644
index 000000000000..fbf53fb4634e
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Makefile
@@ -0,0 +1,29 @@
1# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
2# drivers/misc/iwmc3200top/Makefile
3#
4# Copyright (C) 2009 Intel Corporation. All rights reserved.
5#
6# This program is free software; you can redistribute it and/or
7# modify it under the terms of the GNU General Public License version
8# 2 as published by the Free Software Foundation.
9#
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13# GNU General Public License for more details.
14#
15# You should have received a copy of the GNU General Public License
16# along with this program; if not, write to the Free Software
17# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18# 02110-1301, USA.
19#
20#
21# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
22# -
23#
24#
25
26obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o
27iwmc3200top-objs := main.o fw-download.o
28iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
29iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c
new file mode 100644
index 000000000000..0c8ea0a1c8a3
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.c
@@ -0,0 +1,133 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/debufs.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/string.h>
29#include <linux/ctype.h>
30#include <linux/mmc/sdio_func.h>
31#include <linux/mmc/sdio.h>
32#include <linux/debugfs.h>
33
34#include "iwmc3200top.h"
35#include "fw-msg.h"
36#include "log.h"
37#include "debugfs.h"
38
39
40
41/* Constants definition */
42#define HEXADECIMAL_RADIX 16
43
44/* Functions definition */
45
46
47#define DEBUGFS_ADD(name, parent) do { \
48 dbgfs->dbgfs_##parent##_files.file_##name = \
49 debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \
50 &iwmct_dbgfs_##name##_ops); \
51} while (0)
52
53#define DEBUGFS_RM(name) do { \
54 debugfs_remove(name); \
55 name = NULL; \
56} while (0)
57
58#define DEBUGFS_READ_FUNC(name) \
59ssize_t iwmct_dbgfs_##name##_read(struct file *file, \
60 char __user *user_buf, \
61 size_t count, loff_t *ppos);
62
63#define DEBUGFS_WRITE_FUNC(name) \
64ssize_t iwmct_dbgfs_##name##_write(struct file *file, \
65 const char __user *user_buf, \
66 size_t count, loff_t *ppos);
67
68#define DEBUGFS_READ_FILE_OPS(name) \
69 DEBUGFS_READ_FUNC(name) \
70 static const struct file_operations iwmct_dbgfs_##name##_ops = { \
71 .read = iwmct_dbgfs_##name##_read, \
72 .open = iwmct_dbgfs_open_file_generic, \
73 };
74
75#define DEBUGFS_WRITE_FILE_OPS(name) \
76 DEBUGFS_WRITE_FUNC(name) \
77 static const struct file_operations iwmct_dbgfs_##name##_ops = { \
78 .write = iwmct_dbgfs_##name##_write, \
79 .open = iwmct_dbgfs_open_file_generic, \
80 };
81
82#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
83 DEBUGFS_READ_FUNC(name) \
84 DEBUGFS_WRITE_FUNC(name) \
85 static const struct file_operations iwmct_dbgfs_##name##_ops = {\
86 .write = iwmct_dbgfs_##name##_write, \
87 .read = iwmct_dbgfs_##name##_read, \
88 .open = iwmct_dbgfs_open_file_generic, \
89 };
90
91
92/* Debugfs file ops definitions */
93
94/*
95 * Create the debugfs files and directories
96 *
97 */
98void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
99{
100 struct iwmct_debugfs *dbgfs;
101
102 dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
103 if (!dbgfs) {
104 LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
105 sizeof(struct iwmct_debugfs));
106 return;
107 }
108
109 priv->dbgfs = dbgfs;
110 dbgfs->name = name;
111 dbgfs->dir_drv = debugfs_create_dir(name, NULL);
112 if (!dbgfs->dir_drv) {
113 LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
114 return;
115 }
116
117 return;
118}
119
120/**
121 * Remove the debugfs files and directories
122 *
123 */
124void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
125{
126 if (!dbgfs)
127 return;
128
129 DEBUGFS_RM(dbgfs->dir_drv);
130 kfree(dbgfs);
131 dbgfs = NULL;
132}
133
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h
new file mode 100644
index 000000000000..71d45759b40f
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.h
@@ -0,0 +1,58 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/debufs.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __DEBUGFS_H__
28#define __DEBUGFS_H__
29
30
31#ifdef CONFIG_IWMC3200TOP_DEBUGFS
32
33struct iwmct_debugfs {
34 const char *name;
35 struct dentry *dir_drv;
36 struct dir_drv_files {
37 } dbgfs_drv_files;
38};
39
40void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
41void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
42
43#else /* CONFIG_IWMC3200TOP_DEBUGFS */
44
45struct iwmct_debugfs;
46
47static inline void
48iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
49{}
50
51static inline void
52iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
53{}
54
55#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
56
57#endif /* __DEBUGFS_H__ */
58
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
new file mode 100644
index 000000000000..33cb693dd37c
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -0,0 +1,359 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/fw-download.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/mmc/sdio_func.h>
29#include <asm/unaligned.h>
30
31#include "iwmc3200top.h"
32#include "log.h"
33#include "fw-msg.h"
34
35#define CHECKSUM_BYTES_NUM sizeof(u32)
36
37/**
38 init parser struct with file
39 */
40static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
41 size_t file_size, size_t block_size)
42{
43 struct iwmct_parser *parser = &priv->parser;
44 struct iwmct_fw_hdr *fw_hdr = &parser->versions;
45
46 LOG_INFOEX(priv, INIT, "-->\n");
47
48 LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
49
50 parser->file = file;
51 parser->file_size = file_size;
52 parser->cur_pos = 0;
53 parser->buf = NULL;
54
55 parser->buf = kzalloc(block_size, GFP_KERNEL);
56 if (!parser->buf) {
57 LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
58 return -ENOMEM;
59 }
60 parser->buf_size = block_size;
61
62 /* extract fw versions */
63 memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
64 LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
65 "top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
66 fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
67 fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
68 fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
69 fw_hdr->tic_name);
70
71 parser->cur_pos += sizeof(struct iwmct_fw_hdr);
72
73 LOG_INFOEX(priv, INIT, "<--\n");
74 return 0;
75}
76
77static bool iwmct_checksum(struct iwmct_priv *priv)
78{
79 struct iwmct_parser *parser = &priv->parser;
80 __le32 *file = (__le32 *)parser->file;
81 int i, pad, steps;
82 u32 accum = 0;
83 u32 checksum;
84 u32 mask = 0xffffffff;
85
86 pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
87 steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
88
89 LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
90
91 for (i = 0; i < steps; i++)
92 accum += le32_to_cpu(file[i]);
93
94 if (pad) {
95 mask <<= 8 * (4 - pad);
96 accum += le32_to_cpu(file[steps]) & mask;
97 }
98
99 checksum = get_unaligned_le32((__le32 *)(parser->file +
100 parser->file_size - CHECKSUM_BYTES_NUM));
101
102 LOG_INFO(priv, FW_DOWNLOAD,
103 "compare checksum accum=0x%x to checksum=0x%x\n",
104 accum, checksum);
105
106 return checksum == accum;
107}
108
109static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
110 size_t *sec_size, __le32 *sec_addr)
111{
112 struct iwmct_parser *parser = &priv->parser;
113 struct iwmct_dbg *dbg = &priv->dbg;
114 struct iwmct_fw_sec_hdr *sec_hdr;
115
116 LOG_INFOEX(priv, INIT, "-->\n");
117
118 while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
119 <= parser->file_size) {
120
121 sec_hdr = (struct iwmct_fw_sec_hdr *)
122 (parser->file + parser->cur_pos);
123 parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
124
125 LOG_INFO(priv, FW_DOWNLOAD,
126 "sec hdr: type=%s addr=0x%x size=%d\n",
127 sec_hdr->type, sec_hdr->target_addr,
128 sec_hdr->data_size);
129
130 if (strcmp(sec_hdr->type, "ENT") == 0)
131 parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
132 else if (strcmp(sec_hdr->type, "LBL") == 0)
133 strcpy(dbg->label_fw, parser->file + parser->cur_pos);
134 else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
135 (priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
136 ((strcmp(sec_hdr->type, "GPS") == 0) &&
137 (priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
138 ((strcmp(sec_hdr->type, "BTH") == 0) &&
139 (priv->barker & BARKER_DNLOAD_BT_MSK))) {
140 *sec_addr = sec_hdr->target_addr;
141 *sec_size = le32_to_cpu(sec_hdr->data_size);
142 *p_sec = parser->file + parser->cur_pos;
143 parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
144 return 1;
145 } else if (strcmp(sec_hdr->type, "LOG") != 0)
146 LOG_WARNING(priv, FW_DOWNLOAD,
147 "skipping section type %s\n",
148 sec_hdr->type);
149
150 parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
151 LOG_INFO(priv, FW_DOWNLOAD,
152 "finished with section cur_pos=%zd\n", parser->cur_pos);
153 }
154
155 LOG_INFOEX(priv, INIT, "<--\n");
156 return 0;
157}
158
159static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
160 size_t sec_size, __le32 addr)
161{
162 struct iwmct_parser *parser = &priv->parser;
163 struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
164 const u8 *cur_block = p_sec;
165 size_t sent = 0;
166 int cnt = 0;
167 int ret = 0;
168 u32 cmd = 0;
169
170 LOG_INFOEX(priv, INIT, "-->\n");
171 LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
172 addr, sec_size);
173
174 while (sent < sec_size) {
175 int i;
176 u32 chksm = 0;
177 u32 reset = atomic_read(&priv->reset);
178 /* actual FW data */
179 u32 data_size = min(parser->buf_size - sizeof(*hdr),
180 sec_size - sent);
181 /* Pad to block size */
182 u32 trans_size = (data_size + sizeof(*hdr) +
183 IWMC_SDIO_BLK_SIZE - 1) &
184 ~(IWMC_SDIO_BLK_SIZE - 1);
185 ++cnt;
186
187 /* in case of reset, interrupt FW DOWNLAOD */
188 if (reset) {
189 LOG_INFO(priv, FW_DOWNLOAD,
190 "Reset detected. Abort FW download!!!");
191 ret = -ECANCELED;
192 goto exit;
193 }
194
195 memset(parser->buf, 0, parser->buf_size);
196 cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
197 cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
198 cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
199 cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
200 hdr->data_size = cpu_to_le32(data_size);
201 hdr->target_addr = addr;
202
203 /* checksum is allowed for sizes divisible by 4 */
204 if (data_size & 0x3)
205 cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
206
207 memcpy(hdr->data, cur_block, data_size);
208
209
210 if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
211
212 chksm = data_size + le32_to_cpu(addr) + cmd;
213 for (i = 0; i < data_size >> 2; i++)
214 chksm += ((u32 *)cur_block)[i];
215
216 hdr->block_chksm = cpu_to_le32(chksm);
217 LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
218 hdr->block_chksm);
219 }
220
221 LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
222 "sec_size=%zd, startAddress 0x%X\n",
223 cnt, trans_size, sent, sec_size, addr);
224
225 if (priv->dbg.dump)
226 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
227
228
229 hdr->cmd = cpu_to_le32(cmd);
230 /* send it down */
231 /* TODO: add more proper sending and error checking */
232 ret = iwmct_tx(priv, 0, parser->buf, trans_size);
233 if (ret != 0) {
234 LOG_INFO(priv, FW_DOWNLOAD,
235 "iwmct_tx returned %d\n", ret);
236 goto exit;
237 }
238
239 addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
240 sent += data_size;
241 cur_block = p_sec + sent;
242
243 if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
244 LOG_INFO(priv, FW_DOWNLOAD,
245 "Block number limit is reached [%d]\n",
246 priv->dbg.blocks);
247 break;
248 }
249 }
250
251 if (sent < sec_size)
252 ret = -EINVAL;
253exit:
254 LOG_INFOEX(priv, INIT, "<--\n");
255 return ret;
256}
257
258static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
259{
260 struct iwmct_parser *parser = &priv->parser;
261 struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
262 int ret;
263 u32 cmd;
264
265 LOG_INFOEX(priv, INIT, "-->\n");
266
267 memset(parser->buf, 0, parser->buf_size);
268 cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
269 if (jump) {
270 cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
271 hdr->target_addr = cpu_to_le32(parser->entry_point);
272 LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
273 parser->entry_point);
274 } else {
275 cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
276 LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
277 }
278
279 hdr->cmd = cpu_to_le32(cmd);
280
281 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
282 /* send it down */
283 /* TODO: add more proper sending and error checking */
284 ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE);
285 if (ret)
286 LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
287
288 LOG_INFOEX(priv, INIT, "<--\n");
289 return 0;
290}
291
292int iwmct_fw_load(struct iwmct_priv *priv)
293{
294 const struct firmware *raw = NULL;
295 __le32 addr;
296 size_t len;
297 const u8 *pdata;
298 const u8 *name = "iwmc3200top.1.fw";
299 int ret = 0;
300
301 /* clear parser struct */
302 memset(&priv->parser, 0, sizeof(struct iwmct_parser));
303 if (!name) {
304 ret = -EINVAL;
305 goto exit;
306 }
307
308 /* get the firmware */
309 ret = request_firmware(&raw, name, &priv->func->dev);
310 if (ret < 0) {
311 LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
312 name, ret);
313 goto exit;
314 }
315
316 if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
317 LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
318 name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
319 goto exit;
320 }
321
322 LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", name);
323
324 ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
325 if (ret < 0) {
326 LOG_ERROR(priv, FW_DOWNLOAD,
327 "iwmct_parser_init failed: Reason %d\n", ret);
328 goto exit;
329 }
330
331 /* checksum */
332 if (!iwmct_checksum(priv)) {
333 LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
334 ret = -EINVAL;
335 goto exit;
336 }
337
338 /* download firmware to device */
339 while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
340 if (iwmct_download_section(priv, pdata, len, addr)) {
341 LOG_ERROR(priv, FW_DOWNLOAD,
342 "%s download section failed\n", name);
343 ret = -EIO;
344 goto exit;
345 }
346 }
347
348 iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
349
350exit:
351 kfree(priv->parser.buf);
352
353 if (raw)
354 release_firmware(raw);
355
356 raw = NULL;
357
358 return ret;
359}
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h
new file mode 100644
index 000000000000..9e26b75bd482
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-msg.h
@@ -0,0 +1,113 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/fw-msg.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __FWMSG_H__
28#define __FWMSG_H__
29
30#define COMM_TYPE_D2H 0xFF
31#define COMM_TYPE_H2D 0xEE
32
33#define COMM_CATEGORY_OPERATIONAL 0x00
34#define COMM_CATEGORY_DEBUG 0x01
35#define COMM_CATEGORY_TESTABILITY 0x02
36#define COMM_CATEGORY_DIAGNOSTICS 0x03
37
38#define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A)
39
40#define FW_LOG_SRC_MAX 32
41#define FW_LOG_SRC_ALL 255
42
43#define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000)
44
45#define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001)
46#define CMD_TST_DEV_RESET cpu_to_le16(0x0060)
47#define CMD_TST_FUNC_RESET cpu_to_le16(0x0062)
48#define CMD_TST_IFACE_RESET cpu_to_le16(0x0064)
49#define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065)
50#define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080)
51#define CMD_TST_WAKEUP cpu_to_le16(0x0081)
52#define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082)
53#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083)
54#define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096)
55
56#define OP_OPR_ALIVE cpu_to_le16(0x0010)
57#define OP_OPR_CMD_ACK cpu_to_le16(0x001F)
58#define OP_OPR_CMD_NACK cpu_to_le16(0x0020)
59#define OP_TST_MEM_DUMP cpu_to_le16(0x0043)
60
61#define CMD_FLAG_PADDING_256 0x80
62
63#define FW_HCMD_BLOCK_SIZE 256
64
65struct msg_hdr {
66 u8 type;
67 u8 category;
68 __le16 opcode;
69 u8 seqnum;
70 u8 flags;
71 __le16 length;
72} __attribute__((__packed__));
73
74struct log_hdr {
75 __le32 timestamp;
76 u8 severity;
77 u8 logsource;
78 __le16 reserved;
79} __attribute__((__packed__));
80
81struct mdump_hdr {
82 u8 dmpid;
83 u8 frag;
84 __le16 size;
85 __le32 addr;
86} __attribute__((__packed__));
87
88struct top_msg {
89 struct msg_hdr hdr;
90 union {
91 /* D2H messages */
92 struct {
93 struct log_hdr log_hdr;
94 u8 data[1];
95 } __attribute__((__packed__)) log;
96
97 struct {
98 struct log_hdr log_hdr;
99 struct mdump_hdr md_hdr;
100 u8 data[1];
101 } __attribute__((__packed__)) mdump;
102
103 /* H2D messages */
104 struct {
105 u8 logsource;
106 u8 sevmask;
107 } __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
108 struct mdump_hdr mdump_req;
109 } u;
110} __attribute__((__packed__));
111
112
113#endif /* __FWMSG_H__ */
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
new file mode 100644
index 000000000000..f572fcf177a1
--- /dev/null
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -0,0 +1,206 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/iwmc3200top.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __IWMC3200TOP_H__
28#define __IWMC3200TOP_H__
29
30#include <linux/workqueue.h>
31
32#define DRV_NAME "iwmc3200top"
33
34#define IWMC_SDIO_BLK_SIZE 256
35#define IWMC_DEFAULT_TR_BLK 64
36#define IWMC_SDIO_DATA_ADDR 0x0
37#define IWMC_SDIO_INTR_ENABLE_ADDR 0x14
38#define IWMC_SDIO_INTR_STATUS_ADDR 0x13
39#define IWMC_SDIO_INTR_CLEAR_ADDR 0x13
40#define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C
41
42#define COMM_HUB_HEADER_LENGTH 16
43#define LOGGER_HEADER_LENGTH 10
44
45
46#define BARKER_DNLOAD_BT_POS 0
47#define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS)
48#define BARKER_DNLOAD_GPS_POS 1
49#define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS)
50#define BARKER_DNLOAD_TOP_POS 2
51#define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS)
52#define BARKER_DNLOAD_RESERVED1_POS 3
53#define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS)
54#define BARKER_DNLOAD_JUMP_POS 4
55#define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS)
56#define BARKER_DNLOAD_SYNC_POS 5
57#define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS)
58#define BARKER_DNLOAD_RESERVED2_POS 6
59#define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS)
60#define BARKER_DNLOAD_BARKER_POS 8
61#define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS)
62
63#define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS)
64/* whole field barker */
65#define IWMC_BARKER_ACK 0xfeedbabe
66
67#define IWMC_CMD_SIGNATURE 0xcbbc
68
69#define CMD_HDR_OPCODE_POS 0
70#define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS)
71#define CMD_HDR_RESPONSE_CODE_POS 4
72#define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS)
73#define CMD_HDR_USE_CHECKSUM_POS 8
74#define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS)
75#define CMD_HDR_RESPONSE_REQUIRED_POS 9
76#define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
77#define CMD_HDR_DIRECT_ACCESS_POS 10
78#define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS)
79#define CMD_HDR_RESERVED_POS 11
80#define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS)
81#define CMD_HDR_SIGNATURE_POS 16
82#define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS)
83
84enum {
85 IWMC_OPCODE_PING = 0,
86 IWMC_OPCODE_READ = 1,
87 IWMC_OPCODE_WRITE = 2,
88 IWMC_OPCODE_JUMP = 3,
89 IWMC_OPCODE_REBOOT = 4,
90 IWMC_OPCODE_PERSISTENT_WRITE = 5,
91 IWMC_OPCODE_PERSISTENT_READ = 6,
92 IWMC_OPCODE_READ_MODIFY_WRITE = 7,
93 IWMC_OPCODE_LAST_COMMAND = 15
94};
95
96struct iwmct_fw_load_hdr {
97 __le32 cmd;
98 __le32 target_addr;
99 __le32 data_size;
100 __le32 block_chksm;
101 u8 data[0];
102};
103
104/**
105 * struct iwmct_fw_hdr
106 * holds all sw components versions
107 */
108struct iwmct_fw_hdr {
109 u8 top_major;
110 u8 top_minor;
111 u8 top_revision;
112 u8 gps_major;
113 u8 gps_minor;
114 u8 gps_revision;
115 u8 bt_major;
116 u8 bt_minor;
117 u8 bt_revision;
118 u8 tic_name[31];
119};
120
121/**
122 * struct iwmct_fw_sec_hdr
123 * @type: function type
124 * @data_size: section's data size
125 * @target_addr: download address
126 */
127struct iwmct_fw_sec_hdr {
128 u8 type[4];
129 __le32 data_size;
130 __le32 target_addr;
131};
132
133/**
134 * struct iwmct_parser
135 * @file: fw image
136 * @file_size: fw size
137 * @cur_pos: position in file
138 * @buf: temp buf for download
139 * @buf_size: size of buf
140 * @entry_point: address to jump in fw kick-off
141 */
142struct iwmct_parser {
143 const u8 *file;
144 size_t file_size;
145 size_t cur_pos;
146 u8 *buf;
147 size_t buf_size;
148 u32 entry_point;
149 struct iwmct_fw_hdr versions;
150};
151
152
153struct iwmct_work_struct {
154 struct list_head list;
155 ssize_t iosize;
156};
157
158struct iwmct_dbg {
159 int blocks;
160 bool dump;
161 bool jump;
162 bool direct;
163 bool checksum;
164 bool fw_download;
165 int block_size;
166 int download_trans_blks;
167
168 char label_fw[256];
169};
170
171struct iwmct_debugfs;
172
173struct iwmct_priv {
174 struct sdio_func *func;
175 struct iwmct_debugfs *dbgfs;
176 struct iwmct_parser parser;
177 atomic_t reset;
178 atomic_t dev_sync;
179 u32 trans_len;
180 u32 barker;
181 struct iwmct_dbg dbg;
182
183 /* drivers work queue */
184 struct workqueue_struct *wq;
185 struct workqueue_struct *bus_rescan_wq;
186 struct work_struct bus_rescan_worker;
187 struct work_struct isr_worker;
188
189 /* drivers wait queue */
190 wait_queue_head_t wait_q;
191
192 /* rx request list */
193 struct list_head read_req_list;
194};
195
196extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
197 void *src, int count);
198
199extern int iwmct_fw_load(struct iwmct_priv *priv);
200
201extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
202extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
203extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
204extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
205
206#endif /* __IWMC3200TOP_H__ */
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c
new file mode 100644
index 000000000000..d569279698f6
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.c
@@ -0,0 +1,347 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/log.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/mmc/sdio_func.h>
29#include <linux/ctype.h>
30#include "fw-msg.h"
31#include "iwmc3200top.h"
32#include "log.h"
33
34/* Maximal hexadecimal string size of the FW memdump message */
35#define LOG_MSG_SIZE_MAX 12400
36
37/* iwmct_logdefs is a global used by log macros */
38u8 iwmct_logdefs[LOG_SRC_MAX];
39static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
40
41
42static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
43{
44 int i;
45
46 if (src < size)
47 logdefs[src] = logmask;
48 else if (src == LOG_SRC_ALL)
49 for (i = 0; i < size; i++)
50 logdefs[i] = logmask;
51 else
52 return -1;
53
54 return 0;
55}
56
57
58int iwmct_log_set_filter(u8 src, u8 logmask)
59{
60 return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
61}
62
63
64int iwmct_log_set_fw_filter(u8 src, u8 logmask)
65{
66 return _log_set_log_filter(iwmct_fw_logdefs,
67 FW_LOG_SRC_MAX, src, logmask);
68}
69
70
71static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
72 int ilen, char *pref)
73{
74 int pos = 0;
75 int i;
76 int len;
77
78 for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
79 str[pos] = pref[i];
80
81 for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
82 len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
83
84 if (i < ilen)
85 return -1;
86
87 return 0;
88}
89
90/* NOTE: This function is not thread safe.
91 Currently it's called only from sdio rx worker - no race there
92*/
93void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
94{
95 struct top_msg *msg;
96 static char logbuf[LOG_MSG_SIZE_MAX];
97
98 msg = (struct top_msg *)buf;
99
100 if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
101 LOG_ERROR(priv, FW_MSG, "Log message from TOP "
102 "is too short %d (expected %zd)\n",
103 len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
104 return;
105 }
106
107 if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
108 BIT(msg->u.log.log_hdr.severity)) ||
109 !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
110 return;
111
112 switch (msg->hdr.category) {
113 case COMM_CATEGORY_TESTABILITY:
114 if (!(iwmct_logdefs[LOG_SRC_TST] &
115 BIT(msg->u.log.log_hdr.severity)))
116 return;
117 if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
118 le16_to_cpu(msg->hdr.length) +
119 sizeof(msg->hdr), "<TST>"))
120 LOG_WARNING(priv, TST,
121 "TOP TST message is too long, truncating...");
122 LOG_WARNING(priv, TST, "%s\n", logbuf);
123 break;
124 case COMM_CATEGORY_DEBUG:
125 if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
126 LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
127 ((u8 *)msg) + sizeof(msg->hdr)
128 + sizeof(msg->u.log.log_hdr));
129 else {
130 if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
131 le16_to_cpu(msg->hdr.length)
132 + sizeof(msg->hdr),
133 "<DBG>"))
134 LOG_WARNING(priv, FW_MSG,
135 "TOP DBG message is too long,"
136 "truncating...");
137 LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
138 }
139 break;
140 default:
141 break;
142 }
143}
144
145static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
146{
147 int i, pos, len;
148 for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
149 len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
150 i, logdefs[i]);
151 pos += len;
152 }
153 buf[pos-1] = '\n';
154 buf[pos] = '\0';
155
156 if (i < logdefsz)
157 return -1;
158 return 0;
159}
160
161int log_get_filter_str(char *buf, int size)
162{
163 return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
164}
165
166int log_get_fw_filter_str(char *buf, int size)
167{
168 return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
169}
170
171#define HEXADECIMAL_RADIX 16
172#define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */
173
174ssize_t show_iwmct_log_level(struct device *d,
175 struct device_attribute *attr, char *buf)
176{
177 struct iwmct_priv *priv = dev_get_drvdata(d);
178 char *str_buf;
179 int buf_size;
180 ssize_t ret;
181
182 buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
183 str_buf = kzalloc(buf_size, GFP_KERNEL);
184 if (!str_buf) {
185 LOG_ERROR(priv, DEBUGFS,
186 "failed to allocate %d bytes\n", buf_size);
187 ret = -ENOMEM;
188 goto exit;
189 }
190
191 if (log_get_filter_str(str_buf, buf_size) < 0) {
192 ret = -EINVAL;
193 goto exit;
194 }
195
196 ret = sprintf(buf, "%s", str_buf);
197
198exit:
199 kfree(str_buf);
200 return ret;
201}
202
203ssize_t store_iwmct_log_level(struct device *d,
204 struct device_attribute *attr,
205 const char *buf, size_t count)
206{
207 struct iwmct_priv *priv = dev_get_drvdata(d);
208 char *token, *str_buf = NULL;
209 long val;
210 ssize_t ret = count;
211 u8 src, mask;
212
213 if (!count)
214 goto exit;
215
216 str_buf = kzalloc(count, GFP_KERNEL);
217 if (!str_buf) {
218 LOG_ERROR(priv, DEBUGFS,
219 "failed to allocate %zd bytes\n", count);
220 ret = -ENOMEM;
221 goto exit;
222 }
223
224 memcpy(str_buf, buf, count);
225
226 while ((token = strsep(&str_buf, ",")) != NULL) {
227 while (isspace(*token))
228 ++token;
229 if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
230 LOG_ERROR(priv, DEBUGFS,
231 "failed to convert string to long %s\n",
232 token);
233 ret = -EINVAL;
234 goto exit;
235 }
236
237 mask = val & 0xFF;
238 src = (val & 0XFF00) >> 8;
239 iwmct_log_set_filter(src, mask);
240 }
241
242exit:
243 kfree(str_buf);
244 return ret;
245}
246
247ssize_t show_iwmct_log_level_fw(struct device *d,
248 struct device_attribute *attr, char *buf)
249{
250 struct iwmct_priv *priv = dev_get_drvdata(d);
251 char *str_buf;
252 int buf_size;
253 ssize_t ret;
254
255 buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
256
257 str_buf = kzalloc(buf_size, GFP_KERNEL);
258 if (!str_buf) {
259 LOG_ERROR(priv, DEBUGFS,
260 "failed to allocate %d bytes\n", buf_size);
261 ret = -ENOMEM;
262 goto exit;
263 }
264
265 if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
266 ret = -EINVAL;
267 goto exit;
268 }
269
270 ret = sprintf(buf, "%s", str_buf);
271
272exit:
273 kfree(str_buf);
274 return ret;
275}
276
277ssize_t store_iwmct_log_level_fw(struct device *d,
278 struct device_attribute *attr,
279 const char *buf, size_t count)
280{
281 struct iwmct_priv *priv = dev_get_drvdata(d);
282 struct top_msg cmd;
283 char *token, *str_buf = NULL;
284 ssize_t ret = count;
285 u16 cmdlen = 0;
286 int i;
287 long val;
288 u8 src, mask;
289
290 if (!count)
291 goto exit;
292
293 str_buf = kzalloc(count, GFP_KERNEL);
294 if (!str_buf) {
295 LOG_ERROR(priv, DEBUGFS,
296 "failed to allocate %zd bytes\n", count);
297 ret = -ENOMEM;
298 goto exit;
299 }
300
301 memcpy(str_buf, buf, count);
302
303 cmd.hdr.type = COMM_TYPE_H2D;
304 cmd.hdr.category = COMM_CATEGORY_DEBUG;
305 cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
306
307 for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
308 (i < FW_LOG_SRC_MAX); i++) {
309
310 while (isspace(*token))
311 ++token;
312
313 if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
314 LOG_ERROR(priv, DEBUGFS,
315 "failed to convert string to long %s\n",
316 token);
317 ret = -EINVAL;
318 goto exit;
319 }
320
321 mask = val & 0xFF; /* LSB */
322 src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
323 iwmct_log_set_fw_filter(src, mask);
324
325 cmd.u.logdefs[i].logsource = src;
326 cmd.u.logdefs[i].sevmask = mask;
327 }
328
329 cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
330 cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
331
332 ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
333 if (ret) {
334 LOG_ERROR(priv, DEBUGFS,
335 "Failed to send %d bytes of fwcmd, ret=%zd\n",
336 cmdlen, ret);
337 goto exit;
338 } else
339 LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
340
341 ret = count;
342
343exit:
344 kfree(str_buf);
345 return ret;
346}
347
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
new file mode 100644
index 000000000000..aba8121f978c
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.h
@@ -0,0 +1,158 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/log.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __LOG_H__
28#define __LOG_H__
29
30
31/* log severity:
32 * The log levels here match FW log levels
33 * so values need to stay as is */
34#define LOG_SEV_CRITICAL 0
35#define LOG_SEV_ERROR 1
36#define LOG_SEV_WARNING 2
37#define LOG_SEV_INFO 3
38#define LOG_SEV_INFOEX 4
39
40#define LOG_SEV_FILTER_ALL \
41 (BIT(LOG_SEV_CRITICAL) | \
42 BIT(LOG_SEV_ERROR) | \
43 BIT(LOG_SEV_WARNING) | \
44 BIT(LOG_SEV_INFO) | \
45 BIT(LOG_SEV_INFOEX))
46
47/* log source */
48#define LOG_SRC_INIT 0
49#define LOG_SRC_DEBUGFS 1
50#define LOG_SRC_FW_DOWNLOAD 2
51#define LOG_SRC_FW_MSG 3
52#define LOG_SRC_TST 4
53#define LOG_SRC_IRQ 5
54
55#define LOG_SRC_MAX 6
56#define LOG_SRC_ALL 0xFF
57
58/**
59 * Default intitialization runtime log level
60 */
61#ifndef LOG_SEV_FILTER_RUNTIME
62#define LOG_SEV_FILTER_RUNTIME \
63 (BIT(LOG_SEV_CRITICAL) | \
64 BIT(LOG_SEV_ERROR) | \
65 BIT(LOG_SEV_WARNING))
66#endif
67
68#ifndef FW_LOG_SEV_FILTER_RUNTIME
69#define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL
70#endif
71
72#ifdef CONFIG_IWMC3200TOP_DEBUG
73/**
74 * Log macros
75 */
76
77#define priv2dev(priv) (&(priv->func)->dev)
78
79#define LOG_CRITICAL(priv, src, fmt, args...) \
80do { \
81 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \
82 dev_crit(priv2dev(priv), "%s %d: " fmt, \
83 __func__, __LINE__, ##args); \
84} while (0)
85
86#define LOG_ERROR(priv, src, fmt, args...) \
87do { \
88 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \
89 dev_err(priv2dev(priv), "%s %d: " fmt, \
90 __func__, __LINE__, ##args); \
91} while (0)
92
93#define LOG_WARNING(priv, src, fmt, args...) \
94do { \
95 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \
96 dev_warn(priv2dev(priv), "%s %d: " fmt, \
97 __func__, __LINE__, ##args); \
98} while (0)
99
100#define LOG_INFO(priv, src, fmt, args...) \
101do { \
102 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \
103 dev_info(priv2dev(priv), "%s %d: " fmt, \
104 __func__, __LINE__, ##args); \
105} while (0)
106
107#define LOG_INFOEX(priv, src, fmt, args...) \
108do { \
109 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
110 dev_dbg(priv2dev(priv), "%s %d: " fmt, \
111 __func__, __LINE__, ##args); \
112} while (0)
113
114#define LOG_HEXDUMP(src, ptr, len) \
115do { \
116 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
117 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
118 16, 1, ptr, len, false); \
119} while (0)
120
121void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
122
123extern u8 iwmct_logdefs[];
124
125int iwmct_log_set_filter(u8 src, u8 logmask);
126int iwmct_log_set_fw_filter(u8 src, u8 logmask);
127
128ssize_t show_iwmct_log_level(struct device *d,
129 struct device_attribute *attr, char *buf);
130ssize_t store_iwmct_log_level(struct device *d,
131 struct device_attribute *attr,
132 const char *buf, size_t count);
133ssize_t show_iwmct_log_level_fw(struct device *d,
134 struct device_attribute *attr, char *buf);
135ssize_t store_iwmct_log_level_fw(struct device *d,
136 struct device_attribute *attr,
137 const char *buf, size_t count);
138
139#else
140
141#define LOG_CRITICAL(priv, src, fmt, args...)
142#define LOG_ERROR(priv, src, fmt, args...)
143#define LOG_WARNING(priv, src, fmt, args...)
144#define LOG_INFO(priv, src, fmt, args...)
145#define LOG_INFOEX(priv, src, fmt, args...)
146#define LOG_HEXDUMP(src, ptr, len)
147
148static inline void iwmct_log_top_message(struct iwmct_priv *priv,
149 u8 *buf, int len) {}
150static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
151static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
152
153#endif /* CONFIG_IWMC3200TOP_DEBUG */
154
155int log_get_filter_str(char *buf, int size);
156int log_get_fw_filter_str(char *buf, int size);
157
158#endif /* __LOG_H__ */
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
new file mode 100644
index 000000000000..6e4e49113ab4
--- /dev/null
+++ b/drivers/misc/iwmc3200top/main.c
@@ -0,0 +1,699 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/main.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/kernel.h>
30#include <linux/debugfs.h>
31#include <linux/mmc/sdio_ids.h>
32#include <linux/mmc/sdio_func.h>
33#include <linux/mmc/sdio.h>
34
35#include "iwmc3200top.h"
36#include "log.h"
37#include "fw-msg.h"
38#include "debugfs.h"
39
40
41#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
42#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
43
44#define IWMCT_VERSION "0.1.62"
45
46#ifdef REPOSITORY_LABEL
47#define RL REPOSITORY_LABEL
48#else
49#define RL local
50#endif
51
52#ifdef CONFIG_IWMC3200TOP_DEBUG
53#define VD "-d"
54#else
55#define VD
56#endif
57
58#define DRIVER_VERSION IWMCT_VERSION "-" __stringify(RL) VD
59
60MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
61MODULE_VERSION(DRIVER_VERSION);
62MODULE_LICENSE("GPL");
63MODULE_AUTHOR(DRIVER_COPYRIGHT);
64
65
66/* FIXME: These can be found in sdio_ids.h in newer kernels */
67#ifndef SDIO_INTEL_VENDOR_ID
68#define SDIO_INTEL_VENDOR_ID 0x0089
69#endif
70#ifndef SDIO_DEVICE_ID_INTEL_IWMC3200TOP
71#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404
72#endif
73
74/*
75 * This workers main task is to wait for OP_OPR_ALIVE
76 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
77 * When OP_OPR_ALIVE received it will issue
78 * a call to "bus_rescan_devices".
79 */
80static void iwmct_rescan_worker(struct work_struct *ws)
81{
82 struct iwmct_priv *priv;
83 int ret;
84
85 priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
86
87 LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
88
89 ret = bus_rescan_devices(priv->func->dev.bus);
90 if (ret < 0)
91 LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n");
92}
93
94static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
95{
96 switch (msg->hdr.opcode) {
97 case OP_OPR_ALIVE:
98 LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
99 queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker);
100 break;
101 default:
102 LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
103 msg->hdr.opcode);
104 break;
105 }
106}
107
108
109static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
110{
111 struct top_msg *msg;
112
113 msg = (struct top_msg *)buf;
114
115 if (msg->hdr.type != COMM_TYPE_D2H) {
116 LOG_ERROR(priv, FW_MSG,
117 "Message from TOP with invalid message type 0x%X\n",
118 msg->hdr.type);
119 return;
120 }
121
122 if (len < sizeof(msg->hdr)) {
123 LOG_ERROR(priv, FW_MSG,
124 "Message from TOP is too short for message header "
125 "received %d bytes, expected at least %zd bytes\n",
126 len, sizeof(msg->hdr));
127 return;
128 }
129
130 if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
131 LOG_ERROR(priv, FW_MSG,
132 "Message length (%d bytes) is shorter than "
133 "in header (%d bytes)\n",
134 len, le16_to_cpu(msg->hdr.length));
135 return;
136 }
137
138 switch (msg->hdr.category) {
139 case COMM_CATEGORY_OPERATIONAL:
140 op_top_message(priv, (struct top_msg *)buf);
141 break;
142
143 case COMM_CATEGORY_DEBUG:
144 case COMM_CATEGORY_TESTABILITY:
145 case COMM_CATEGORY_DIAGNOSTICS:
146 iwmct_log_top_message(priv, buf, len);
147 break;
148
149 default:
150 LOG_ERROR(priv, FW_MSG,
151 "Message from TOP with unknown category 0x%X\n",
152 msg->hdr.category);
153 break;
154 }
155}
156
157int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
158{
159 int ret;
160 u8 *buf;
161
162 LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n");
163
164 /* add padding to 256 for IWMC */
165 ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
166
167 LOG_HEXDUMP(FW_MSG, cmd, len);
168
169 if (len > FW_HCMD_BLOCK_SIZE) {
170 LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
171 len, FW_HCMD_BLOCK_SIZE);
172 return -1;
173 }
174
175 buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
176 if (!buf) {
177 LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
178 FW_HCMD_BLOCK_SIZE);
179 return -1;
180 }
181
182 memcpy(buf, cmd, len);
183
184 sdio_claim_host(priv->func);
185 ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf,
186 FW_HCMD_BLOCK_SIZE);
187 sdio_release_host(priv->func);
188
189 kfree(buf);
190 return ret;
191}
192
193int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
194 void *src, int count)
195{
196 int ret;
197
198 sdio_claim_host(priv->func);
199 ret = sdio_memcpy_toio(priv->func, addr, src, count);
200 sdio_release_host(priv->func);
201
202 return ret;
203}
204
205static void iwmct_irq_read_worker(struct work_struct *ws)
206{
207 struct iwmct_priv *priv;
208 struct iwmct_work_struct *read_req;
209 __le32 *buf = NULL;
210 int ret;
211 int iosize;
212 u32 barker;
213 bool is_barker;
214
215 priv = container_of(ws, struct iwmct_priv, isr_worker);
216
217 LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
218
219 /* --------------------- Handshake with device -------------------- */
220 sdio_claim_host(priv->func);
221
222 /* all list manipulations have to be protected by
223 * sdio_claim_host/sdio_release_host */
224 if (list_empty(&priv->read_req_list)) {
225 LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
226 goto exit_release;
227 }
228
229 read_req = list_entry(priv->read_req_list.next,
230 struct iwmct_work_struct, list);
231
232 list_del(&read_req->list);
233 iosize = read_req->iosize;
234 kfree(read_req);
235
236 buf = kzalloc(iosize, GFP_KERNEL);
237 if (!buf) {
238 LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
239 goto exit_release;
240 }
241
242 LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
243 iosize, buf, priv->func->num);
244
245 /* read from device */
246 ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
247 if (ret) {
248 LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
249 goto exit_release;
250 }
251
252 LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
253
254 barker = le32_to_cpu(buf[0]);
255
256 /* Verify whether it's a barker and if not - treat as regular Rx */
257 if (barker == IWMC_BARKER_ACK ||
258 (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
259
260 /* Valid Barker is equal on first 4 dwords */
261 is_barker = (buf[1] == buf[0]) &&
262 (buf[2] == buf[0]) &&
263 (buf[3] == buf[0]);
264
265 if (!is_barker) {
266 LOG_WARNING(priv, IRQ,
267 "Potentially inconsistent barker "
268 "%08X_%08X_%08X_%08X\n",
269 le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
270 le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
271 }
272 } else {
273 is_barker = false;
274 }
275
276 /* Handle Top CommHub message */
277 if (!is_barker) {
278 sdio_release_host(priv->func);
279 handle_top_message(priv, (u8 *)buf, iosize);
280 goto exit;
281 } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
282 if (atomic_read(&priv->dev_sync) == 0) {
283 LOG_ERROR(priv, IRQ,
284 "ACK barker arrived out-of-sync\n");
285 goto exit_release;
286 }
287
288 /* Continuing to FW download (after Sync is completed)*/
289 atomic_set(&priv->dev_sync, 0);
290 LOG_INFO(priv, IRQ, "ACK barker arrived "
291 "- starting FW download\n");
292 } else { /* REBOOT barker */
293 LOG_INFO(priv, IRQ, "Recieved reboot barker: %x\n", barker);
294 priv->barker = barker;
295
296 if (barker & BARKER_DNLOAD_SYNC_MSK) {
297 /* Send the same barker back */
298 ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR,
299 buf, iosize);
300 if (ret) {
301 LOG_ERROR(priv, IRQ,
302 "error %d echoing barker\n", ret);
303 goto exit_release;
304 }
305 LOG_INFO(priv, IRQ, "Echoing barker to device\n");
306 atomic_set(&priv->dev_sync, 1);
307 goto exit_release;
308 }
309
310 /* Continuing to FW download (without Sync) */
311 LOG_INFO(priv, IRQ, "No sync requested "
312 "- starting FW download\n");
313 }
314
315 sdio_release_host(priv->func);
316
317
318 LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker);
319 LOG_INFO(priv, IRQ, "******* Top FW %s requested ********\n",
320 (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
321 LOG_INFO(priv, IRQ, "******* GPS FW %s requested ********\n",
322 (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
323 LOG_INFO(priv, IRQ, "******* BT FW %s requested ********\n",
324 (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
325
326 if (priv->dbg.fw_download)
327 iwmct_fw_load(priv);
328 else
329 LOG_ERROR(priv, IRQ, "FW download not allowed\n");
330
331 goto exit;
332
333exit_release:
334 sdio_release_host(priv->func);
335exit:
336 kfree(buf);
337 LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n");
338}
339
340static void iwmct_irq(struct sdio_func *func)
341{
342 struct iwmct_priv *priv;
343 int val, ret;
344 int iosize;
345 int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
346 struct iwmct_work_struct *read_req;
347
348 priv = sdio_get_drvdata(func);
349
350 LOG_INFO(priv, IRQ, "enter iwmct_irq\n");
351
352 /* read the function's status register */
353 val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
354
355 LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
356
357 if (!val) {
358 LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
359 goto exit_clear_intr;
360 }
361
362
363 /*
364 * read 2 bytes of the transaction size
365 * IMPORTANT: sdio transaction size has to be read before clearing
366 * sdio interrupt!!!
367 */
368 val = sdio_readb(priv->func, addr++, &ret);
369 iosize = val;
370 val = sdio_readb(priv->func, addr++, &ret);
371 iosize += val << 8;
372
373 LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
374
375 if (iosize == 0) {
376 LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
377 goto exit_clear_intr;
378 }
379
380 /* allocate a work structure to pass iosize to the worker */
381 read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
382 if (!read_req) {
383 LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
384 goto exit_clear_intr;
385 }
386
387 INIT_LIST_HEAD(&read_req->list);
388 read_req->iosize = iosize;
389
390 list_add_tail(&priv->read_req_list, &read_req->list);
391
392 /* clear the function's interrupt request bit (write 1 to clear) */
393 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
394
395 queue_work(priv->wq, &priv->isr_worker);
396
397 LOG_INFO(priv, IRQ, "exit iwmct_irq\n");
398
399 return;
400
401exit_clear_intr:
402 /* clear the function's interrupt request bit (write 1 to clear) */
403 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
404}
405
406
407static int blocks;
408module_param(blocks, int, 0604);
409MODULE_PARM_DESC(blocks, "max_blocks_to_send");
410
411static int dump;
412module_param(dump, bool, 0604);
413MODULE_PARM_DESC(dump, "dump_hex_content");
414
415static int jump = 1;
416module_param(jump, bool, 0604);
417
418static int direct = 1;
419module_param(direct, bool, 0604);
420
421static int checksum = 1;
422module_param(checksum, bool, 0604);
423
424static int fw_download = 1;
425module_param(fw_download, bool, 0604);
426
427static int block_size = IWMC_SDIO_BLK_SIZE;
428module_param(block_size, int, 0404);
429
430static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
431module_param(download_trans_blks, int, 0604);
432
433static int rubbish_barker;
434module_param(rubbish_barker, bool, 0604);
435
436#ifdef CONFIG_IWMC3200TOP_DEBUG
437static int log_level[LOG_SRC_MAX];
438static unsigned int log_level_argc;
439module_param_array(log_level, int, &log_level_argc, 0604);
440MODULE_PARM_DESC(log_level, "log_level");
441
442static int log_level_fw[FW_LOG_SRC_MAX];
443static unsigned int log_level_fw_argc;
444module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
445MODULE_PARM_DESC(log_level_fw, "log_level_fw");
446#endif
447
448void iwmct_dbg_init_params(struct iwmct_priv *priv)
449{
450#ifdef CONFIG_IWMC3200TOP_DEBUG
451 int i;
452
453 for (i = 0; i < log_level_argc; i++) {
454 dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
455 i, log_level[i]);
456 iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
457 log_level[i] & 0xFF);
458 }
459 for (i = 0; i < log_level_fw_argc; i++) {
460 dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
461 i, log_level_fw[i]);
462 iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
463 log_level_fw[i] & 0xFF);
464 }
465#endif
466
467 priv->dbg.blocks = blocks;
468 LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
469 priv->dbg.dump = (bool)dump;
470 LOG_INFO(priv, INIT, "dump=%d\n", dump);
471 priv->dbg.jump = (bool)jump;
472 LOG_INFO(priv, INIT, "jump=%d\n", jump);
473 priv->dbg.direct = (bool)direct;
474 LOG_INFO(priv, INIT, "direct=%d\n", direct);
475 priv->dbg.checksum = (bool)checksum;
476 LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
477 priv->dbg.fw_download = (bool)fw_download;
478 LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
479 priv->dbg.block_size = block_size;
480 LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
481 priv->dbg.download_trans_blks = download_trans_blks;
482 LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
483}
484
485/*****************************************************************************
486 *
487 * sysfs attributes
488 *
489 *****************************************************************************/
490static ssize_t show_iwmct_fw_version(struct device *d,
491 struct device_attribute *attr, char *buf)
492{
493 struct iwmct_priv *priv = dev_get_drvdata(d);
494 return sprintf(buf, "%s\n", priv->dbg.label_fw);
495}
496static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
497
498#ifdef CONFIG_IWMC3200TOP_DEBUG
499static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
500 show_iwmct_log_level, store_iwmct_log_level);
501static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
502 show_iwmct_log_level_fw, store_iwmct_log_level_fw);
503#endif
504
505static struct attribute *iwmct_sysfs_entries[] = {
506 &dev_attr_cc_label_fw.attr,
507#ifdef CONFIG_IWMC3200TOP_DEBUG
508 &dev_attr_log_level.attr,
509 &dev_attr_log_level_fw.attr,
510#endif
511 NULL
512};
513
514static struct attribute_group iwmct_attribute_group = {
515 .name = NULL, /* put in device directory */
516 .attrs = iwmct_sysfs_entries,
517};
518
519
520static int iwmct_probe(struct sdio_func *func,
521 const struct sdio_device_id *id)
522{
523 struct iwmct_priv *priv;
524 int ret;
525 int val = 1;
526 int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
527
528 dev_dbg(&func->dev, "enter iwmct_probe\n");
529
530 dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
531 jiffies_to_msecs(2147483647), HZ);
532
533 priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
534 if (!priv) {
535 dev_err(&func->dev, "kzalloc error\n");
536 return -ENOMEM;
537 }
538 priv->func = func;
539 sdio_set_drvdata(func, priv);
540
541
542 /* create drivers work queue */
543 priv->wq = create_workqueue(DRV_NAME "_wq");
544 priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
545 INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
546 INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
547
548 init_waitqueue_head(&priv->wait_q);
549
550 sdio_claim_host(func);
551 /* FIXME: Remove after it is fixed in the Boot ROM upgrade */
552 func->enable_timeout = 10;
553
554 /* In our HW, setting the block size also wakes up the boot rom. */
555 ret = sdio_set_block_size(func, priv->dbg.block_size);
556 if (ret) {
557 LOG_ERROR(priv, INIT,
558 "sdio_set_block_size() failure: %d\n", ret);
559 goto error_sdio_enable;
560 }
561
562 ret = sdio_enable_func(func);
563 if (ret) {
564 LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
565 goto error_sdio_enable;
566 }
567
568 /* init reset and dev_sync states */
569 atomic_set(&priv->reset, 0);
570 atomic_set(&priv->dev_sync, 0);
571
572 /* init read req queue */
573 INIT_LIST_HEAD(&priv->read_req_list);
574
575 /* process configurable parameters */
576 iwmct_dbg_init_params(priv);
577 ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
578 if (ret) {
579 LOG_ERROR(priv, INIT, "Failed to register attributes and "
580 "initialize module_params\n");
581 goto error_dev_attrs;
582 }
583
584 iwmct_dbgfs_register(priv, DRV_NAME);
585
586 if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
587 LOG_INFO(priv, INIT,
588 "Reducing transaction to 8 blocks = 2K (from %d)\n",
589 priv->dbg.download_trans_blks);
590 priv->dbg.download_trans_blks = 8;
591 }
592 priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
593 LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
594
595 ret = sdio_claim_irq(func, iwmct_irq);
596 if (ret) {
597 LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
598 goto error_claim_irq;
599 }
600
601
602 /* Enable function's interrupt */
603 sdio_writeb(priv->func, val, addr, &ret);
604 if (ret) {
605 LOG_ERROR(priv, INIT, "Failure writing to "
606 "Interrupt Enable Register (%d): %d\n", addr, ret);
607 goto error_enable_int;
608 }
609
610 sdio_release_host(func);
611
612 LOG_INFO(priv, INIT, "exit iwmct_probe\n");
613
614 return ret;
615
616error_enable_int:
617 sdio_release_irq(func);
618error_claim_irq:
619 sdio_disable_func(func);
620error_dev_attrs:
621 iwmct_dbgfs_unregister(priv->dbgfs);
622 sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
623error_sdio_enable:
624 sdio_release_host(func);
625 return ret;
626}
627
628static void iwmct_remove(struct sdio_func *func)
629{
630 struct iwmct_work_struct *read_req;
631 struct iwmct_priv *priv = sdio_get_drvdata(func);
632
633 priv = sdio_get_drvdata(func);
634
635 LOG_INFO(priv, INIT, "enter\n");
636
637 sdio_claim_host(func);
638 sdio_release_irq(func);
639 sdio_release_host(func);
640
641 /* Safely destroy osc workqueue */
642 destroy_workqueue(priv->bus_rescan_wq);
643 destroy_workqueue(priv->wq);
644
645 sdio_claim_host(func);
646 sdio_disable_func(func);
647 sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
648 iwmct_dbgfs_unregister(priv->dbgfs);
649 sdio_release_host(func);
650
651 /* free read requests */
652 while (!list_empty(&priv->read_req_list)) {
653 read_req = list_entry(priv->read_req_list.next,
654 struct iwmct_work_struct, list);
655
656 list_del(&read_req->list);
657 kfree(read_req);
658 }
659
660 kfree(priv);
661}
662
663
664static const struct sdio_device_id iwmct_ids[] = {
665 { SDIO_DEVICE(SDIO_INTEL_VENDOR_ID, SDIO_DEVICE_ID_INTEL_IWMC3200TOP)},
666 { /* end: all zeroes */ },
667};
668
669MODULE_DEVICE_TABLE(sdio, iwmct_ids);
670
671static struct sdio_driver iwmct_driver = {
672 .probe = iwmct_probe,
673 .remove = iwmct_remove,
674 .name = DRV_NAME,
675 .id_table = iwmct_ids,
676};
677
678static int __init iwmct_init(void)
679{
680 int rc;
681
682 /* Default log filter settings */
683 iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
684 iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL);
685 iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
686
687 rc = sdio_register_driver(&iwmct_driver);
688
689 return rc;
690}
691
692static void __exit iwmct_exit(void)
693{
694 sdio_unregister_driver(&iwmct_driver);
695}
696
697module_init(iwmct_init);
698module_exit(iwmct_exit);
699
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 04fb8b0ca3e6..e012c2e0825a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1741,6 +1741,7 @@ config KS8851
1741config KS8851_MLL 1741config KS8851_MLL
1742 tristate "Micrel KS8851 MLL" 1742 tristate "Micrel KS8851 MLL"
1743 depends on HAS_IOMEM 1743 depends on HAS_IOMEM
1744 select MII
1744 help 1745 help
1745 This platform driver is for Micrel KS8851 Address/data bus 1746 This platform driver is for Micrel KS8851 Address/data bus
1746 multiplexed network chip. 1747 multiplexed network chip.
@@ -2482,6 +2483,8 @@ config S6GMAC
2482 To compile this driver as a module, choose M here. The module 2483 To compile this driver as a module, choose M here. The module
2483 will be called s6gmac. 2484 will be called s6gmac.
2484 2485
2486source "drivers/net/stmmac/Kconfig"
2487
2485endif # NETDEV_1000 2488endif # NETDEV_1000
2486 2489
2487# 2490#
@@ -3232,7 +3235,7 @@ config VIRTIO_NET
3232 3235
3233config VMXNET3 3236config VMXNET3
3234 tristate "VMware VMXNET3 ethernet driver" 3237 tristate "VMware VMXNET3 ethernet driver"
3235 depends on PCI && X86 3238 depends on PCI && X86 && INET
3236 help 3239 help
3237 This driver supports VMware's vmxnet3 virtual ethernet NIC. 3240 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3238 To compile this driver as a module, choose M here: the 3241 To compile this driver as a module, choose M here: the
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fc6c8bb92c50..246323d7f161 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
100obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 100obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
101obj-$(CONFIG_RIONET) += rionet.o 101obj-$(CONFIG_RIONET) += rionet.o
102obj-$(CONFIG_SH_ETH) += sh_eth.o 102obj-$(CONFIG_SH_ETH) += sh_eth.o
103obj-$(CONFIG_STMMAC_ETH) += stmmac/
103 104
104# 105#
105# end link order section 106# end link order section
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 2a7b7745cc55..0073d198715b 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -35,11 +35,13 @@
35 35
36#include <mach/regs-switch.h> 36#include <mach/regs-switch.h>
37#include <mach/regs-misc.h> 37#include <mach/regs-misc.h>
38#include <asm/mach/irq.h>
39#include <mach/regs-irq.h>
38 40
39#include "ks8695net.h" 41#include "ks8695net.h"
40 42
41#define MODULENAME "ks8695_ether" 43#define MODULENAME "ks8695_ether"
42#define MODULEVERSION "1.01" 44#define MODULEVERSION "1.02"
43 45
44/* 46/*
45 * Transmit and device reset timeout, default 5 seconds. 47 * Transmit and device reset timeout, default 5 seconds.
@@ -95,6 +97,9 @@ struct ks8695_skbuff {
95#define MAX_RX_DESC 16 97#define MAX_RX_DESC 16
96#define MAX_RX_DESC_MASK 0xf 98#define MAX_RX_DESC_MASK 0xf
97 99
100/*napi_weight have better more than rx DMA buffers*/
101#define NAPI_WEIGHT 64
102
98#define MAX_RXBUF_SIZE 0x700 103#define MAX_RXBUF_SIZE 0x700
99 104
100#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC) 105#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
@@ -120,6 +125,7 @@ enum ks8695_dtype {
120 * @dev: The platform device object for this interface 125 * @dev: The platform device object for this interface
121 * @dtype: The type of this device 126 * @dtype: The type of this device
122 * @io_regs: The ioremapped registers for this interface 127 * @io_regs: The ioremapped registers for this interface
128 * @napi : Add support NAPI for Rx
123 * @rx_irq_name: The textual name of the RX IRQ from the platform data 129 * @rx_irq_name: The textual name of the RX IRQ from the platform data
124 * @tx_irq_name: The textual name of the TX IRQ from the platform data 130 * @tx_irq_name: The textual name of the TX IRQ from the platform data
125 * @link_irq_name: The textual name of the link IRQ from the 131 * @link_irq_name: The textual name of the link IRQ from the
@@ -143,6 +149,7 @@ enum ks8695_dtype {
143 * @rx_ring_dma: The DMA mapped equivalent of rx_ring 149 * @rx_ring_dma: The DMA mapped equivalent of rx_ring
144 * @rx_buffers: The sk_buff mappings for the RX ring 150 * @rx_buffers: The sk_buff mappings for the RX ring
145 * @next_rx_desc_read: The next RX descriptor to read from on IRQ 151 * @next_rx_desc_read: The next RX descriptor to read from on IRQ
152 * @rx_lock: A lock to protect Rx irq function
146 * @msg_enable: The flags for which messages to emit 153 * @msg_enable: The flags for which messages to emit
147 */ 154 */
148struct ks8695_priv { 155struct ks8695_priv {
@@ -152,6 +159,8 @@ struct ks8695_priv {
152 enum ks8695_dtype dtype; 159 enum ks8695_dtype dtype;
153 void __iomem *io_regs; 160 void __iomem *io_regs;
154 161
162 struct napi_struct napi;
163
155 const char *rx_irq_name, *tx_irq_name, *link_irq_name; 164 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
156 int rx_irq, tx_irq, link_irq; 165 int rx_irq, tx_irq, link_irq;
157 166
@@ -172,6 +181,7 @@ struct ks8695_priv {
172 dma_addr_t rx_ring_dma; 181 dma_addr_t rx_ring_dma;
173 struct ks8695_skbuff rx_buffers[MAX_RX_DESC]; 182 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
174 int next_rx_desc_read; 183 int next_rx_desc_read;
184 spinlock_t rx_lock;
175 185
176 int msg_enable; 186 int msg_enable;
177}; 187};
@@ -392,29 +402,82 @@ ks8695_tx_irq(int irq, void *dev_id)
392} 402}
393 403
394/** 404/**
405 * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit
406 * @ksp: Private data for the KS8695 Ethernet
407 *
408 * For KS8695 document:
409 * Interrupt Enable Register (offset 0xE204)
410 * Bit29 : WAN MAC Receive Interrupt Enable
411 * Bit16 : LAN MAC Receive Interrupt Enable
412 * Interrupt Status Register (Offset 0xF208)
413 * Bit29: WAN MAC Receive Status
414 * Bit16: LAN MAC Receive Status
415 * So, this Rx interrrupt enable/status bit number is equal
416 * as Rx IRQ number.
417 */
418static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
419{
420 return ksp->rx_irq;
421}
422
423/**
395 * ks8695_rx_irq - Receive IRQ handler 424 * ks8695_rx_irq - Receive IRQ handler
396 * @irq: The IRQ which went off (ignored) 425 * @irq: The IRQ which went off (ignored)
397 * @dev_id: The net_device for the interrupt 426 * @dev_id: The net_device for the interrupt
398 * 427 *
399 * Process the RX ring, passing any received packets up to the 428 * Inform NAPI that packet reception needs to be scheduled
400 * host. If we received anything other than errors, we then
401 * refill the ring.
402 */ 429 */
430
403static irqreturn_t 431static irqreturn_t
404ks8695_rx_irq(int irq, void *dev_id) 432ks8695_rx_irq(int irq, void *dev_id)
405{ 433{
406 struct net_device *ndev = (struct net_device *)dev_id; 434 struct net_device *ndev = (struct net_device *)dev_id;
407 struct ks8695_priv *ksp = netdev_priv(ndev); 435 struct ks8695_priv *ksp = netdev_priv(ndev);
436 unsigned long status;
437
438 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
439
440 spin_lock(&ksp->rx_lock);
441
442 status = readl(KS8695_IRQ_VA + KS8695_INTST);
443
444 /*clean rx status bit*/
445 writel(status | mask_bit , KS8695_IRQ_VA + KS8695_INTST);
446
447 if (status & mask_bit) {
448 if (napi_schedule_prep(&ksp->napi)) {
449 /*disable rx interrupt*/
450 status &= ~mask_bit;
451 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
452 __napi_schedule(&ksp->napi);
453 }
454 }
455
456 spin_unlock(&ksp->rx_lock);
457 return IRQ_HANDLED;
458}
459
460/**
461 * ks8695_rx - Receive packets called by NAPI poll method
462 * @ksp: Private data for the KS8695 Ethernet
463 * @budget: The max packets would be receive
464 */
465
466static int ks8695_rx(struct ks8695_priv *ksp, int budget)
467{
468 struct net_device *ndev = ksp->ndev;
408 struct sk_buff *skb; 469 struct sk_buff *skb;
409 int buff_n; 470 int buff_n;
410 u32 flags; 471 u32 flags;
411 int pktlen; 472 int pktlen;
412 int last_rx_processed = -1; 473 int last_rx_processed = -1;
474 int received = 0;
413 475
414 buff_n = ksp->next_rx_desc_read; 476 buff_n = ksp->next_rx_desc_read;
415 do { 477 while (received < budget
416 if (ksp->rx_buffers[buff_n].skb && 478 && ksp->rx_buffers[buff_n].skb
417 !(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN))) { 479 && (!(ksp->rx_ring[buff_n].status &
480 cpu_to_le32(RDES_OWN)))) {
418 rmb(); 481 rmb();
419 flags = le32_to_cpu(ksp->rx_ring[buff_n].status); 482 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
420 /* Found an SKB which we own, this means we 483 /* Found an SKB which we own, this means we
@@ -464,7 +527,7 @@ ks8695_rx_irq(int irq, void *dev_id)
464 /* Relinquish the SKB to the network layer */ 527 /* Relinquish the SKB to the network layer */
465 skb_put(skb, pktlen); 528 skb_put(skb, pktlen);
466 skb->protocol = eth_type_trans(skb, ndev); 529 skb->protocol = eth_type_trans(skb, ndev);
467 netif_rx(skb); 530 netif_receive_skb(skb);
468 531
469 /* Record stats */ 532 /* Record stats */
470 ndev->stats.rx_packets++; 533 ndev->stats.rx_packets++;
@@ -478,29 +541,56 @@ rx_failure:
478 /* Give the ring entry back to the hardware */ 541 /* Give the ring entry back to the hardware */
479 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); 542 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
480rx_finished: 543rx_finished:
544 received++;
481 /* And note this as processed so we can start 545 /* And note this as processed so we can start
482 * from here next time 546 * from here next time
483 */ 547 */
484 last_rx_processed = buff_n; 548 last_rx_processed = buff_n;
485 } else { 549 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
486 /* Ran out of things to process, stop now */ 550 /*And note which RX descriptor we last did */
487 break; 551 if (likely(last_rx_processed != -1))
488 } 552 ksp->next_rx_desc_read =
489 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; 553 (last_rx_processed + 1) &
490 } while (buff_n != ksp->next_rx_desc_read); 554 MAX_RX_DESC_MASK;
555
556 /* And refill the buffers */
557 ks8695_refill_rxbuffers(ksp);
558
559 /* Kick the RX DMA engine, in case it became
560 * suspended */
561 ks8695_writereg(ksp, KS8695_DRSC, 0);
562 }
563 return received;
564}
491 565
492 /* And note which RX descriptor we last did anything with */
493 if (likely(last_rx_processed != -1))
494 ksp->next_rx_desc_read =
495 (last_rx_processed + 1) & MAX_RX_DESC_MASK;
496 566
497 /* And refill the buffers */ 567/**
498 ks8695_refill_rxbuffers(ksp); 568 * ks8695_poll - Receive packet by NAPI poll method
569 * @ksp: Private data for the KS8695 Ethernet
570 * @budget: The remaining number packets for network subsystem
571 *
572 * Invoked by the network core when it requests for new
573 * packets from the driver
574 */
575static int ks8695_poll(struct napi_struct *napi, int budget)
576{
577 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
578 unsigned long work_done;
499 579
500 /* Kick the RX DMA engine, in case it became suspended */ 580 unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
501 ks8695_writereg(ksp, KS8695_DRSC, 0); 581 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
502 582
503 return IRQ_HANDLED; 583 work_done = ks8695_rx(ksp, budget);
584
585 if (work_done < budget) {
586 unsigned long flags;
587 spin_lock_irqsave(&ksp->rx_lock, flags);
588 /*enable rx interrupt*/
589 writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
590 __napi_complete(napi);
591 spin_unlock_irqrestore(&ksp->rx_lock, flags);
592 }
593 return work_done;
504} 594}
505 595
506/** 596/**
@@ -1253,6 +1343,7 @@ ks8695_stop(struct net_device *ndev)
1253 struct ks8695_priv *ksp = netdev_priv(ndev); 1343 struct ks8695_priv *ksp = netdev_priv(ndev);
1254 1344
1255 netif_stop_queue(ndev); 1345 netif_stop_queue(ndev);
1346 napi_disable(&ksp->napi);
1256 netif_carrier_off(ndev); 1347 netif_carrier_off(ndev);
1257 1348
1258 ks8695_shutdown(ksp); 1349 ks8695_shutdown(ksp);
@@ -1287,6 +1378,7 @@ ks8695_open(struct net_device *ndev)
1287 return ret; 1378 return ret;
1288 } 1379 }
1289 1380
1381 napi_enable(&ksp->napi);
1290 netif_start_queue(ndev); 1382 netif_start_queue(ndev);
1291 1383
1292 return 0; 1384 return 0;
@@ -1472,6 +1564,8 @@ ks8695_probe(struct platform_device *pdev)
1472 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); 1564 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1473 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1565 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1474 1566
1567 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
1568
1475 /* Retrieve the default MAC addr from the chip. */ 1569 /* Retrieve the default MAC addr from the chip. */
1476 /* The bootloader should have left it in there for us. */ 1570 /* The bootloader should have left it in there for us. */
1477 1571
@@ -1505,6 +1599,7 @@ ks8695_probe(struct platform_device *pdev)
1505 1599
1506 /* And initialise the queue's lock */ 1600 /* And initialise the queue's lock */
1507 spin_lock_init(&ksp->txq_lock); 1601 spin_lock_init(&ksp->txq_lock);
1602 spin_lock_init(&ksp->rx_lock);
1508 1603
1509 /* Specify the RX DMA ring buffer */ 1604 /* Specify the RX DMA ring buffer */
1510 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE; 1605 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
@@ -1626,6 +1721,7 @@ ks8695_drv_remove(struct platform_device *pdev)
1626 struct ks8695_priv *ksp = netdev_priv(ndev); 1721 struct ks8695_priv *ksp = netdev_priv(ndev);
1627 1722
1628 platform_set_drvdata(pdev, NULL); 1723 platform_set_drvdata(pdev, NULL);
1724 netif_napi_del(&ksp->napi);
1629 1725
1630 unregister_netdev(ndev); 1726 unregister_netdev(ndev);
1631 ks8695_release_device(ksp); 1727 ks8695_release_device(ksp);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 1372e9a99f5b..3b8801a39726 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1981,8 +1981,6 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
1981 else { 1981 else {
1982 use_tpd = atl1c_get_tpd(adapter, type); 1982 use_tpd = atl1c_get_tpd(adapter, type);
1983 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); 1983 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
1984 use_tpd = atl1c_get_tpd(adapter, type);
1985 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
1986 } 1984 }
1987 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); 1985 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
1988 buffer_info->length = buf_len - mapped_len; 1986 buffer_info->length = buf_len - mapped_len;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 04f63c77071d..ce6f1ac25df8 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -34,6 +34,7 @@
34 * 34 *
35 * 35 *
36 */ 36 */
37#include <linux/capability.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/module.h> 39#include <linux/module.h>
39#include <linux/kernel.h> 40#include <linux/kernel.h>
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 25b6602e464c..cc75dd0df0d8 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -243,15 +243,26 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
243 243
244int be_cmd_POST(struct be_adapter *adapter) 244int be_cmd_POST(struct be_adapter *adapter)
245{ 245{
246 u16 stage, error; 246 u16 stage;
247 int status, timeout = 0;
247 248
248 error = be_POST_stage_get(adapter, &stage); 249 do {
249 if (error || stage != POST_STAGE_ARMFW_RDY) { 250 status = be_POST_stage_get(adapter, &stage);
250 dev_err(&adapter->pdev->dev, "POST failed.\n"); 251 if (status) {
251 return -1; 252 dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
252 } 253 stage);
254 return -1;
255 } else if (stage != POST_STAGE_ARMFW_RDY) {
256 set_current_state(TASK_INTERRUPTIBLE);
257 schedule_timeout(2 * HZ);
258 timeout += 2;
259 } else {
260 return 0;
261 }
262 } while (timeout < 20);
253 263
254 return 0; 264 dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
265 return -1;
255} 266}
256 267
257static inline void *embedded_payload(struct be_mcc_wrb *wrb) 268static inline void *embedded_payload(struct be_mcc_wrb *wrb)
@@ -729,8 +740,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
729/* Create an rx filtering policy configuration on an i/f 740/* Create an rx filtering policy configuration on an i/f
730 * Uses mbox 741 * Uses mbox
731 */ 742 */
732int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, 743int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
733 bool pmac_invalid, u32 *if_handle, u32 *pmac_id) 744 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
734{ 745{
735 struct be_mcc_wrb *wrb; 746 struct be_mcc_wrb *wrb;
736 struct be_cmd_req_if_create *req; 747 struct be_cmd_req_if_create *req;
@@ -746,8 +757,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
746 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 757 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
747 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); 758 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
748 759
749 req->capability_flags = cpu_to_le32(flags); 760 req->capability_flags = cpu_to_le32(cap_flags);
750 req->enable_flags = cpu_to_le32(flags); 761 req->enable_flags = cpu_to_le32(en_flags);
751 req->pmac_invalid = pmac_invalid; 762 req->pmac_invalid = pmac_invalid;
752 if (!pmac_invalid) 763 if (!pmac_invalid)
753 memcpy(req->mac_addr, mac, ETH_ALEN); 764 memcpy(req->mac_addr, mac, ETH_ALEN);
@@ -823,7 +834,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
823 834
824/* Uses synchronous mcc */ 835/* Uses synchronous mcc */
825int be_cmd_link_status_query(struct be_adapter *adapter, 836int be_cmd_link_status_query(struct be_adapter *adapter,
826 bool *link_up) 837 bool *link_up, u8 *mac_speed, u16 *link_speed)
827{ 838{
828 struct be_mcc_wrb *wrb; 839 struct be_mcc_wrb *wrb;
829 struct be_cmd_req_link_status *req; 840 struct be_cmd_req_link_status *req;
@@ -844,8 +855,11 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
844 status = be_mcc_notify_wait(adapter); 855 status = be_mcc_notify_wait(adapter);
845 if (!status) { 856 if (!status) {
846 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 857 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
847 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) 858 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
848 *link_up = true; 859 *link_up = true;
860 *link_speed = le16_to_cpu(resp->link_speed);
861 *mac_speed = resp->mac_speed;
862 }
849 } 863 }
850 864
851 spin_unlock_bh(&adapter->mcc_lock); 865 spin_unlock_bh(&adapter->mcc_lock);
@@ -1177,6 +1191,36 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1177 return status; 1191 return status;
1178} 1192}
1179 1193
1194/* Uses sync mcc */
1195int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1196 u8 *connector)
1197{
1198 struct be_mcc_wrb *wrb;
1199 struct be_cmd_req_port_type *req;
1200 int status;
1201
1202 spin_lock_bh(&adapter->mcc_lock);
1203
1204 wrb = wrb_from_mccq(adapter);
1205 req = embedded_payload(wrb);
1206
1207 be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0);
1208
1209 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1210 OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
1211
1212 req->port = cpu_to_le32(port);
1213 req->page_num = cpu_to_le32(TR_PAGE_A0);
1214 status = be_mcc_notify_wait(adapter);
1215 if (!status) {
1216 struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
1217 *connector = resp->data.connector;
1218 }
1219
1220 spin_unlock_bh(&adapter->mcc_lock);
1221 return status;
1222}
1223
1180int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 1224int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1181 u32 flash_type, u32 flash_opcode, u32 buf_size) 1225 u32 flash_type, u32 flash_opcode, u32 buf_size)
1182{ 1226{
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index a1e78cc3e171..69dc017c814b 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -68,7 +68,7 @@ enum {
68#define CQE_STATUS_COMPL_MASK 0xFFFF 68#define CQE_STATUS_COMPL_MASK 0xFFFF
69#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 69#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
70#define CQE_STATUS_EXTD_MASK 0xFFFF 70#define CQE_STATUS_EXTD_MASK 0xFFFF
71#define CQE_STATUS_EXTD_SHIFT 0 /* bits 0 - 15 */ 71#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
72 72
73struct be_mcc_compl { 73struct be_mcc_compl {
74 u32 status; /* dword 0 */ 74 u32 status; /* dword 0 */
@@ -140,6 +140,7 @@ struct be_mcc_mailbox {
140#define OPCODE_COMMON_FUNCTION_RESET 61 140#define OPCODE_COMMON_FUNCTION_RESET 61
141#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69 141#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
142#define OPCODE_COMMON_GET_BEACON_STATE 70 142#define OPCODE_COMMON_GET_BEACON_STATE 70
143#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
143 144
144#define OPCODE_ETH_ACPI_CONFIG 2 145#define OPCODE_ETH_ACPI_CONFIG 2
145#define OPCODE_ETH_PROMISCUOUS 3 146#define OPCODE_ETH_PROMISCUOUS 3
@@ -635,9 +636,47 @@ struct be_cmd_resp_link_status {
635 u8 mac_fault; 636 u8 mac_fault;
636 u8 mgmt_mac_duplex; 637 u8 mgmt_mac_duplex;
637 u8 mgmt_mac_speed; 638 u8 mgmt_mac_speed;
638 u16 rsvd0; 639 u16 link_speed;
640 u32 rsvd0;
639} __packed; 641} __packed;
640 642
643/******************** Port Identification ***************************/
644/* Identifies the type of port attached to NIC */
645struct be_cmd_req_port_type {
646 struct be_cmd_req_hdr hdr;
647 u32 page_num;
648 u32 port;
649};
650
651enum {
652 TR_PAGE_A0 = 0xa0,
653 TR_PAGE_A2 = 0xa2
654};
655
656struct be_cmd_resp_port_type {
657 struct be_cmd_resp_hdr hdr;
658 u32 page_num;
659 u32 port;
660 struct data {
661 u8 identifier;
662 u8 identifier_ext;
663 u8 connector;
664 u8 transceiver[8];
665 u8 rsvd0[3];
666 u8 length_km;
667 u8 length_hm;
668 u8 length_om1;
669 u8 length_om2;
670 u8 length_cu;
671 u8 length_cu_m;
672 u8 vendor_name[16];
673 u8 rsvd;
674 u8 vendor_oui[3];
675 u8 vendor_pn[16];
676 u8 vendor_rev[4];
677 } data;
678};
679
641/******************** Get FW Version *******************/ 680/******************** Get FW Version *******************/
642struct be_cmd_req_get_fw_version { 681struct be_cmd_req_get_fw_version {
643 struct be_cmd_req_hdr hdr; 682 struct be_cmd_req_hdr hdr;
@@ -753,8 +792,9 @@ extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
753extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 792extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
754 u32 if_id, u32 *pmac_id); 793 u32 if_id, u32 *pmac_id);
755extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 794extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
756extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac, 795extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
757 bool pmac_invalid, u32 *if_handle, u32 *pmac_id); 796 u32 en_flags, u8 *mac, bool pmac_invalid,
797 u32 *if_handle, u32 *pmac_id);
758extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 798extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
759extern int be_cmd_eq_create(struct be_adapter *adapter, 799extern int be_cmd_eq_create(struct be_adapter *adapter,
760 struct be_queue_info *eq, int eq_delay); 800 struct be_queue_info *eq, int eq_delay);
@@ -775,7 +815,7 @@ extern int be_cmd_rxq_create(struct be_adapter *adapter,
775extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 815extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
776 int type); 816 int type);
777extern int be_cmd_link_status_query(struct be_adapter *adapter, 817extern int be_cmd_link_status_query(struct be_adapter *adapter,
778 bool *link_up); 818 bool *link_up, u8 *mac_speed, u16 *link_speed);
779extern int be_cmd_reset(struct be_adapter *adapter); 819extern int be_cmd_reset(struct be_adapter *adapter);
780extern int be_cmd_get_stats(struct be_adapter *adapter, 820extern int be_cmd_get_stats(struct be_adapter *adapter,
781 struct be_dma_mem *nonemb_cmd); 821 struct be_dma_mem *nonemb_cmd);
@@ -801,6 +841,8 @@ extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
801 u8 port_num, u8 beacon, u8 status, u8 state); 841 u8 port_num, u8 beacon, u8 status, u8 state);
802extern int be_cmd_get_beacon_state(struct be_adapter *adapter, 842extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
803 u8 port_num, u32 *state); 843 u8 port_num, u32 *state);
844extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
845 u8 *connector);
804extern int be_cmd_write_flashrom(struct be_adapter *adapter, 846extern int be_cmd_write_flashrom(struct be_adapter *adapter,
805 struct be_dma_mem *cmd, u32 flash_oper, 847 struct be_dma_mem *cmd, u32 flash_oper,
806 u32 flash_opcode, u32 buf_size); 848 u32 flash_opcode, u32 buf_size);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 280471e18695..edebce994906 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -293,9 +293,43 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
293 293
294static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 294static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
295{ 295{
296 ecmd->speed = SPEED_10000; 296 struct be_adapter *adapter = netdev_priv(netdev);
297 u8 mac_speed = 0, connector = 0;
298 u16 link_speed = 0;
299 bool link_up = false;
300
301 be_cmd_link_status_query(adapter, &link_up, &mac_speed, &link_speed);
302
303 /* link_speed is in units of 10 Mbps */
304 if (link_speed) {
305 ecmd->speed = link_speed*10;
306 } else {
307 switch (mac_speed) {
308 case PHY_LINK_SPEED_1GBPS:
309 ecmd->speed = SPEED_1000;
310 break;
311 case PHY_LINK_SPEED_10GBPS:
312 ecmd->speed = SPEED_10000;
313 break;
314 }
315 }
297 ecmd->duplex = DUPLEX_FULL; 316 ecmd->duplex = DUPLEX_FULL;
298 ecmd->autoneg = AUTONEG_DISABLE; 317 ecmd->autoneg = AUTONEG_DISABLE;
318 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
319
320 be_cmd_read_port_type(adapter, adapter->port_num, &connector);
321 switch (connector) {
322 case 7:
323 ecmd->port = PORT_FIBRE;
324 break;
325 default:
326 ecmd->port = PORT_TP;
327 break;
328 }
329
330 ecmd->phy_address = adapter->port_num;
331 ecmd->transceiver = XCVR_INTERNAL;
332
299 return 0; 333 return 0;
300} 334}
301 335
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index e0f9d6477184..43180dc210a2 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -31,8 +31,10 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
31 31
32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { 32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
34 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
34 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 36 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
37 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
36 { 0 } 38 { 0 }
37}; 39};
38MODULE_DEVICE_TABLE(pci, be_dev_ids); 40MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -1586,6 +1588,8 @@ static int be_open(struct net_device *netdev)
1586 struct be_eq_obj *tx_eq = &adapter->tx_eq; 1588 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1587 bool link_up; 1589 bool link_up;
1588 int status; 1590 int status;
1591 u8 mac_speed;
1592 u16 link_speed;
1589 1593
1590 /* First time posting */ 1594 /* First time posting */
1591 be_post_rx_frags(adapter); 1595 be_post_rx_frags(adapter);
@@ -1604,7 +1608,8 @@ static int be_open(struct net_device *netdev)
1604 /* Rx compl queue may be in unarmed state; rearm it */ 1608 /* Rx compl queue may be in unarmed state; rearm it */
1605 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0); 1609 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
1606 1610
1607 status = be_cmd_link_status_query(adapter, &link_up); 1611 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1612 &link_speed);
1608 if (status) 1613 if (status)
1609 return status; 1614 return status;
1610 be_link_status_update(adapter, link_up); 1615 be_link_status_update(adapter, link_up);
@@ -1616,19 +1621,22 @@ static int be_open(struct net_device *netdev)
1616static int be_setup(struct be_adapter *adapter) 1621static int be_setup(struct be_adapter *adapter)
1617{ 1622{
1618 struct net_device *netdev = adapter->netdev; 1623 struct net_device *netdev = adapter->netdev;
1619 u32 if_flags; 1624 u32 cap_flags, en_flags;
1620 int status; 1625 int status;
1621 1626
1622 if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | 1627 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1623 BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | 1628 BE_IF_FLAGS_MCAST_PROMISCUOUS |
1624 BE_IF_FLAGS_PASS_L3L4_ERRORS; 1629 BE_IF_FLAGS_PROMISCUOUS |
1625 status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr, 1630 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1626 false/* pmac_invalid */, &adapter->if_handle, 1631 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1627 &adapter->pmac_id); 1632 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1633
1634 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1635 netdev->dev_addr, false/* pmac_invalid */,
1636 &adapter->if_handle, &adapter->pmac_id);
1628 if (status != 0) 1637 if (status != 0)
1629 goto do_none; 1638 goto do_none;
1630 1639
1631
1632 status = be_tx_queues_create(adapter); 1640 status = be_tx_queues_create(adapter);
1633 if (status != 0) 1641 if (status != 0)
1634 goto if_destroy; 1642 goto if_destroy;
@@ -2051,6 +2059,10 @@ static int be_hw_up(struct be_adapter *adapter)
2051 if (status) 2059 if (status)
2052 return status; 2060 return status;
2053 2061
2062 status = be_cmd_reset_function(adapter);
2063 if (status)
2064 return status;
2065
2054 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); 2066 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2055 if (status) 2067 if (status)
2056 return status; 2068 return status;
@@ -2104,10 +2116,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
2104 if (status) 2116 if (status)
2105 goto free_netdev; 2117 goto free_netdev;
2106 2118
2107 status = be_cmd_reset_function(adapter);
2108 if (status)
2109 goto ctrl_clean;
2110
2111 status = be_stats_init(adapter); 2119 status = be_stats_init(adapter);
2112 if (status) 2120 if (status)
2113 goto ctrl_clean; 2121 goto ctrl_clean;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 08cddb6ff740..539d23b594ce 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1466,6 +1466,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1466 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1466 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468 bmcr |= BCM5708S_BMCR_FORCE_2500; 1468 bmcr |= BCM5708S_BMCR_FORCE_2500;
1469 } else {
1470 return;
1469 } 1471 }
1470 1472
1471 if (bp->autoneg & AUTONEG_SPEED) { 1473 if (bp->autoneg & AUTONEG_SPEED) {
@@ -1500,6 +1502,8 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
1500 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1502 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1501 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1503 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1502 bmcr &= ~BCM5708S_BMCR_FORCE_2500; 1504 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1505 } else {
1506 return;
1503 } 1507 }
1504 1508
1505 if (bp->autoneg & AUTONEG_SPEED) 1509 if (bp->autoneg & AUTONEG_SPEED)
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 6c7f795d12de..a4d83409f205 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -361,9 +361,12 @@ struct l2_fhdr {
361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) 361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
362 362
363#define BNX2_L2CTX_HOST_BDIDX 0x00000004 363#define BNX2_L2CTX_HOST_BDIDX 0x00000004
364#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16 364#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT 16
365#define BNX2_L2CTX_STATUSB_NUM(sb_id) \ 365#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT 24
366 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0) 366#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id) \
367 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0)
368#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id) \
369 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0)
367#define BNX2_L2CTX_HOST_BSEQ 0x00000008 370#define BNX2_L2CTX_HOST_BSEQ 0x00000008
368#define BNX2_L2CTX_NX_BSEQ 0x0000000c 371#define BNX2_L2CTX_NX_BSEQ 0x0000000c
369#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 372#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index dc2f8ed5fd07..52585338ada8 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -264,6 +264,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
264#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 264#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
265#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 265#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
266#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 266#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
267#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
267#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 268#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
268#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 269#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
269 270
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index e32d3370862e..41b9b7bd3d8e 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1107,18 +1107,21 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
1107 MDIO_REG_BANK_SERDES_DIGITAL, 1107 MDIO_REG_BANK_SERDES_DIGITAL,
1108 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1108 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1109 &control2); 1109 &control2);
1110 1110 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1111 1111 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1112 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1112 else
1113 1113 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1114 1114 DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1115 params->speed_cap_mask, control2);
1115 CL45_WR_OVER_CL22(bp, params->port, 1116 CL45_WR_OVER_CL22(bp, params->port,
1116 params->phy_addr, 1117 params->phy_addr,
1117 MDIO_REG_BANK_SERDES_DIGITAL, 1118 MDIO_REG_BANK_SERDES_DIGITAL,
1118 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1119 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1119 control2); 1120 control2);
1120 1121
1121 if (phy_flags & PHY_XGXS_FLAG) { 1122 if ((phy_flags & PHY_XGXS_FLAG) &&
1123 (params->speed_cap_mask &
1124 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1122 DP(NETIF_MSG_LINK, "XGXS\n"); 1125 DP(NETIF_MSG_LINK, "XGXS\n");
1123 1126
1124 CL45_WR_OVER_CL22(bp, params->port, 1127 CL45_WR_OVER_CL22(bp, params->port,
@@ -1225,7 +1228,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1225 params->phy_addr, 1228 params->phy_addr,
1226 MDIO_REG_BANK_CL73_USERB0, 1229 MDIO_REG_BANK_CL73_USERB0,
1227 MDIO_CL73_USERB0_CL73_UCTRL, 1230 MDIO_CL73_USERB0_CL73_UCTRL,
1228 MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL); 1231 0xe);
1229 1232
1230 /* Enable BAM Station Manager*/ 1233 /* Enable BAM Station Manager*/
1231 CL45_WR_OVER_CL22(bp, params->port, 1234 CL45_WR_OVER_CL22(bp, params->port,
@@ -1236,29 +1239,25 @@ static void bnx2x_set_autoneg(struct link_params *params,
1236 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN | 1239 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
1237 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 1240 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
1238 1241
1239 /* Merge CL73 and CL37 aneg resolution */ 1242 /* Advertise CL73 link speeds */
1240 CL45_RD_OVER_CL22(bp, params->port,
1241 params->phy_addr,
1242 MDIO_REG_BANK_CL73_USERB0,
1243 MDIO_CL73_USERB0_CL73_BAM_CTRL3,
1244 &reg_val);
1245
1246 if (params->speed_cap_mask &
1247 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
1248 /* Set the CL73 AN speed */
1249 CL45_RD_OVER_CL22(bp, params->port, 1243 CL45_RD_OVER_CL22(bp, params->port,
1250 params->phy_addr, 1244 params->phy_addr,
1251 MDIO_REG_BANK_CL73_IEEEB1, 1245 MDIO_REG_BANK_CL73_IEEEB1,
1252 MDIO_CL73_IEEEB1_AN_ADV2, 1246 MDIO_CL73_IEEEB1_AN_ADV2,
1253 &reg_val); 1247 &reg_val);
1248 if (params->speed_cap_mask &
1249 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1250 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
1251 if (params->speed_cap_mask &
1252 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1253 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
1254 1254
1255 CL45_WR_OVER_CL22(bp, params->port, 1255 CL45_WR_OVER_CL22(bp, params->port,
1256 params->phy_addr, 1256 params->phy_addr,
1257 MDIO_REG_BANK_CL73_IEEEB1, 1257 MDIO_REG_BANK_CL73_IEEEB1,
1258 MDIO_CL73_IEEEB1_AN_ADV2, 1258 MDIO_CL73_IEEEB1_AN_ADV2,
1259 reg_val | MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4); 1259 reg_val);
1260 1260
1261 }
1262 /* CL73 Autoneg Enabled */ 1261 /* CL73 Autoneg Enabled */
1263 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; 1262 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
1264 1263
@@ -1351,6 +1350,7 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1351 1350
1352static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc) 1351static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1353{ 1352{
1353 struct bnx2x *bp = params->bp;
1354 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 1354 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1355 /* resolve pause mode and advertisement 1355 /* resolve pause mode and advertisement
1356 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 1356 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
@@ -1380,18 +1380,30 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1380 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; 1380 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
1381 break; 1381 break;
1382 } 1382 }
1383 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
1383} 1384}
1384 1385
1385static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, 1386static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
1386 u16 ieee_fc) 1387 u16 ieee_fc)
1387{ 1388{
1388 struct bnx2x *bp = params->bp; 1389 struct bnx2x *bp = params->bp;
1390 u16 val;
1389 /* for AN, we are always publishing full duplex */ 1391 /* for AN, we are always publishing full duplex */
1390 1392
1391 CL45_WR_OVER_CL22(bp, params->port, 1393 CL45_WR_OVER_CL22(bp, params->port,
1392 params->phy_addr, 1394 params->phy_addr,
1393 MDIO_REG_BANK_COMBO_IEEE0, 1395 MDIO_REG_BANK_COMBO_IEEE0,
1394 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 1396 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
1397 CL45_RD_OVER_CL22(bp, params->port,
1398 params->phy_addr,
1399 MDIO_REG_BANK_CL73_IEEEB1,
1400 MDIO_CL73_IEEEB1_AN_ADV1, &val);
1401 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
1402 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
1403 CL45_WR_OVER_CL22(bp, params->port,
1404 params->phy_addr,
1405 MDIO_REG_BANK_CL73_IEEEB1,
1406 MDIO_CL73_IEEEB1_AN_ADV1, val);
1395} 1407}
1396 1408
1397static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73) 1409static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
@@ -1609,6 +1621,39 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
1609 return ret; 1621 return ret;
1610} 1622}
1611 1623
1624static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1625{
1626 struct bnx2x *bp = params->bp;
1627 u16 pd_10g, status2_1000x;
1628 CL45_RD_OVER_CL22(bp, params->port,
1629 params->phy_addr,
1630 MDIO_REG_BANK_SERDES_DIGITAL,
1631 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1632 &status2_1000x);
1633 CL45_RD_OVER_CL22(bp, params->port,
1634 params->phy_addr,
1635 MDIO_REG_BANK_SERDES_DIGITAL,
1636 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1637 &status2_1000x);
1638 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
1639 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
1640 params->port);
1641 return 1;
1642 }
1643
1644 CL45_RD_OVER_CL22(bp, params->port,
1645 params->phy_addr,
1646 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1647 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
1648 &pd_10g);
1649
1650 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
1651 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
1652 params->port);
1653 return 1;
1654 }
1655 return 0;
1656}
1612 1657
1613static void bnx2x_flow_ctrl_resolve(struct link_params *params, 1658static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1614 struct link_vars *vars, 1659 struct link_vars *vars,
@@ -1627,21 +1672,53 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1627 (!(vars->phy_flags & PHY_SGMII_FLAG)) && 1672 (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
1628 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1673 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1629 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) { 1674 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1630 CL45_RD_OVER_CL22(bp, params->port, 1675 if (bnx2x_direct_parallel_detect_used(params)) {
1631 params->phy_addr, 1676 vars->flow_ctrl = params->req_fc_auto_adv;
1632 MDIO_REG_BANK_COMBO_IEEE0, 1677 return;
1633 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 1678 }
1634 &ld_pause); 1679 if ((gp_status &
1635 CL45_RD_OVER_CL22(bp, params->port, 1680 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1636 params->phy_addr, 1681 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
1637 MDIO_REG_BANK_COMBO_IEEE0, 1682 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1638 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, 1683 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
1639 &lp_pause); 1684
1640 pause_result = (ld_pause & 1685 CL45_RD_OVER_CL22(bp, params->port,
1686 params->phy_addr,
1687 MDIO_REG_BANK_CL73_IEEEB1,
1688 MDIO_CL73_IEEEB1_AN_ADV1,
1689 &ld_pause);
1690 CL45_RD_OVER_CL22(bp, params->port,
1691 params->phy_addr,
1692 MDIO_REG_BANK_CL73_IEEEB1,
1693 MDIO_CL73_IEEEB1_AN_LP_ADV1,
1694 &lp_pause);
1695 pause_result = (ld_pause &
1696 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
1697 >> 8;
1698 pause_result |= (lp_pause &
1699 MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
1700 >> 10;
1701 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
1702 pause_result);
1703 } else {
1704
1705 CL45_RD_OVER_CL22(bp, params->port,
1706 params->phy_addr,
1707 MDIO_REG_BANK_COMBO_IEEE0,
1708 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1709 &ld_pause);
1710 CL45_RD_OVER_CL22(bp, params->port,
1711 params->phy_addr,
1712 MDIO_REG_BANK_COMBO_IEEE0,
1713 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1714 &lp_pause);
1715 pause_result = (ld_pause &
1641 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; 1716 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1642 pause_result |= (lp_pause & 1717 pause_result |= (lp_pause &
1643 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 1718 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1644 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result); 1719 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
1720 pause_result);
1721 }
1645 bnx2x_pause_resolve(vars, pause_result); 1722 bnx2x_pause_resolve(vars, pause_result);
1646 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) && 1723 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1647 (bnx2x_ext_phy_resolve_fc(params, vars))) { 1724 (bnx2x_ext_phy_resolve_fc(params, vars))) {
@@ -1853,6 +1930,8 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1853 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1930 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1854 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 1931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
1855 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1932 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1933 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
1934 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1856 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) { 1935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
1857 vars->autoneg = AUTO_NEG_ENABLED; 1936 vars->autoneg = AUTO_NEG_ENABLED;
1858 1937
@@ -1987,8 +2066,7 @@ static u8 bnx2x_emac_program(struct link_params *params,
1987 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 2066 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
1988 mode); 2067 mode);
1989 2068
1990 bnx2x_set_led(bp, params->port, LED_MODE_OPER, 2069 bnx2x_set_led(params, LED_MODE_OPER, line_speed);
1991 line_speed, params->hw_led_mode, params->chip_id);
1992 return 0; 2070 return 0;
1993} 2071}
1994 2072
@@ -2122,6 +2200,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
2122 MDIO_PMA_REG_CTRL, 2200 MDIO_PMA_REG_CTRL,
2123 1<<15); 2201 1<<15);
2124 break; 2202 break;
2203 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
2204 break;
2125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 2205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
2126 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n"); 2206 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
2127 break; 2207 break;
@@ -2512,16 +2592,11 @@ static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
2512 /* Need to wait 100ms after reset */ 2592 /* Need to wait 100ms after reset */
2513 msleep(100); 2593 msleep(100);
2514 2594
2515 /* Set serial boot control for external load */
2516 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2517 MDIO_PMA_DEVAD,
2518 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2519
2520 /* Micro controller re-boot */ 2595 /* Micro controller re-boot */
2521 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2596 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2522 MDIO_PMA_DEVAD, 2597 MDIO_PMA_DEVAD,
2523 MDIO_PMA_REG_GEN_CTRL, 2598 MDIO_PMA_REG_GEN_CTRL,
2524 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 2599 0x018B);
2525 2600
2526 /* Set soft reset */ 2601 /* Set soft reset */
2527 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2602 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
@@ -2529,14 +2604,10 @@ static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
2529 MDIO_PMA_REG_GEN_CTRL, 2604 MDIO_PMA_REG_GEN_CTRL,
2530 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 2605 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2531 2606
2532 /* Set PLL register value to be same like in P13 ver */
2533 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2607 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2534 MDIO_PMA_DEVAD, 2608 MDIO_PMA_DEVAD,
2535 MDIO_PMA_REG_PLL_CTRL, 2609 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2536 0x73A0);
2537 2610
2538 /* Clear soft reset.
2539 Will automatically reset micro-controller re-boot */
2540 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2611 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2541 MDIO_PMA_DEVAD, 2612 MDIO_PMA_DEVAD,
2542 MDIO_PMA_REG_GEN_CTRL, 2613 MDIO_PMA_REG_GEN_CTRL,
@@ -3462,8 +3533,8 @@ static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
3462 MDIO_PMA_REG_8481_LINK_SIGNAL, 3533 MDIO_PMA_REG_8481_LINK_SIGNAL,
3463 &val1); 3534 &val1);
3464 /* Set bit 2 to 0, and bits [1:0] to 10 */ 3535 /* Set bit 2 to 0, and bits [1:0] to 10 */
3465 val1 &= ~((1<<0) | (1<<2)); /* Clear bits 0,2*/ 3536 val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
3466 val1 |= (1<<1); /* Set bit 1 */ 3537 val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
3467 3538
3468 bnx2x_cl45_write(bp, params->port, 3539 bnx2x_cl45_write(bp, params->port,
3469 ext_phy_type, 3540 ext_phy_type,
@@ -3497,36 +3568,19 @@ static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
3497 MDIO_PMA_REG_8481_LED2_MASK, 3568 MDIO_PMA_REG_8481_LED2_MASK,
3498 0); 3569 0);
3499 3570
3500 /* LED3 (10G/1G/100/10G Activity) */ 3571 /* Unmask LED3 for 10G link */
3501 bnx2x_cl45_read(bp, params->port,
3502 ext_phy_type,
3503 ext_phy_addr,
3504 MDIO_PMA_DEVAD,
3505 MDIO_PMA_REG_8481_LINK_SIGNAL,
3506 &val1);
3507 /* Enable blink based on source 4(Activity) */
3508 val1 &= ~((1<<7) | (1<<8)); /* Clear bits 7,8 */
3509 val1 |= (1<<6); /* Set only bit 6 */
3510 bnx2x_cl45_write(bp, params->port, 3572 bnx2x_cl45_write(bp, params->port,
3511 ext_phy_type, 3573 ext_phy_type,
3512 ext_phy_addr, 3574 ext_phy_addr,
3513 MDIO_PMA_DEVAD, 3575 MDIO_PMA_DEVAD,
3514 MDIO_PMA_REG_8481_LINK_SIGNAL,
3515 val1);
3516
3517 bnx2x_cl45_read(bp, params->port,
3518 ext_phy_type,
3519 ext_phy_addr,
3520 MDIO_PMA_DEVAD,
3521 MDIO_PMA_REG_8481_LED3_MASK, 3576 MDIO_PMA_REG_8481_LED3_MASK,
3522 &val1); 3577 0x6);
3523 val1 |= (1<<4); /* Unmask LED3 for 10G link */
3524 bnx2x_cl45_write(bp, params->port, 3578 bnx2x_cl45_write(bp, params->port,
3525 ext_phy_type, 3579 ext_phy_type,
3526 ext_phy_addr, 3580 ext_phy_addr,
3527 MDIO_PMA_DEVAD, 3581 MDIO_PMA_DEVAD,
3528 MDIO_PMA_REG_8481_LED3_MASK, 3582 MDIO_PMA_REG_8481_LED3_BLINK,
3529 val1); 3583 0);
3530} 3584}
3531 3585
3532 3586
@@ -3544,7 +3598,10 @@ static void bnx2x_init_internal_phy(struct link_params *params,
3544 bnx2x_set_preemphasis(params); 3598 bnx2x_set_preemphasis(params);
3545 3599
3546 /* forced speed requested? */ 3600 /* forced speed requested? */
3547 if (vars->line_speed != SPEED_AUTO_NEG) { 3601 if (vars->line_speed != SPEED_AUTO_NEG ||
3602 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
3603 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3604 params->loopback_mode == LOOPBACK_EXT)) {
3548 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 3605 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3549 3606
3550 /* disable autoneg */ 3607 /* disable autoneg */
@@ -3693,19 +3750,6 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3693 } 3750 }
3694 } 3751 }
3695 /* Force speed */ 3752 /* Force speed */
3696 /* First enable LASI */
3697 bnx2x_cl45_write(bp, params->port,
3698 ext_phy_type,
3699 ext_phy_addr,
3700 MDIO_PMA_DEVAD,
3701 MDIO_PMA_REG_RX_ALARM_CTRL,
3702 0x0400);
3703 bnx2x_cl45_write(bp, params->port,
3704 ext_phy_type,
3705 ext_phy_addr,
3706 MDIO_PMA_DEVAD,
3707 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3708
3709 if (params->req_line_speed == SPEED_10000) { 3753 if (params->req_line_speed == SPEED_10000) {
3710 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n"); 3754 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3711 3755
@@ -3715,6 +3759,9 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3715 MDIO_PMA_DEVAD, 3759 MDIO_PMA_DEVAD,
3716 MDIO_PMA_REG_DIGITAL_CTRL, 3760 MDIO_PMA_REG_DIGITAL_CTRL,
3717 0x400); 3761 0x400);
3762 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3763 ext_phy_addr, MDIO_PMA_DEVAD,
3764 MDIO_PMA_REG_LASI_CTRL, 1);
3718 } else { 3765 } else {
3719 /* Force 1Gbps using autoneg with 1G 3766 /* Force 1Gbps using autoneg with 1G
3720 advertisment */ 3767 advertisment */
@@ -3756,6 +3803,17 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3756 MDIO_AN_DEVAD, 3803 MDIO_AN_DEVAD,
3757 MDIO_AN_REG_CTRL, 3804 MDIO_AN_REG_CTRL,
3758 0x1200); 3805 0x1200);
3806 bnx2x_cl45_write(bp, params->port,
3807 ext_phy_type,
3808 ext_phy_addr,
3809 MDIO_PMA_DEVAD,
3810 MDIO_PMA_REG_RX_ALARM_CTRL,
3811 0x0400);
3812 bnx2x_cl45_write(bp, params->port,
3813 ext_phy_type,
3814 ext_phy_addr,
3815 MDIO_PMA_DEVAD,
3816 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3759 3817
3760 } 3818 }
3761 bnx2x_save_bcm_spirom_ver(bp, params->port, 3819 bnx2x_save_bcm_spirom_ver(bp, params->port,
@@ -4291,6 +4349,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4291 break; 4349 break;
4292 } 4350 }
4293 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 4351 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
4352 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
4294 /* This phy uses the NIG latch mechanism since link 4353 /* This phy uses the NIG latch mechanism since link
4295 indication arrives through its LED4 and not via 4354 indication arrives through its LED4 and not via
4296 its LASI signal, so we get steady signal 4355 its LASI signal, so we get steady signal
@@ -4298,6 +4357,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4298 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 4357 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
4299 1 << NIG_LATCH_BC_ENABLE_MI_INT); 4358 1 << NIG_LATCH_BC_ENABLE_MI_INT);
4300 4359
4360 bnx2x_cl45_write(bp, params->port,
4361 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
4362 ext_phy_addr,
4363 MDIO_PMA_DEVAD,
4364 MDIO_PMA_REG_CTRL, 0x0000);
4365
4301 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr); 4366 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
4302 if (params->req_line_speed == SPEED_AUTO_NEG) { 4367 if (params->req_line_speed == SPEED_AUTO_NEG) {
4303 4368
@@ -4394,17 +4459,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4394 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 4459 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
4395 DP(NETIF_MSG_LINK, "Advertising 10G\n"); 4460 DP(NETIF_MSG_LINK, "Advertising 10G\n");
4396 /* Restart autoneg for 10G*/ 4461 /* Restart autoneg for 10G*/
4397 bnx2x_cl45_read(bp, params->port, 4462
4398 ext_phy_type,
4399 ext_phy_addr,
4400 MDIO_AN_DEVAD,
4401 MDIO_AN_REG_CTRL, &val);
4402 val |= 0x200;
4403 bnx2x_cl45_write(bp, params->port, 4463 bnx2x_cl45_write(bp, params->port,
4404 ext_phy_type, 4464 ext_phy_type,
4405 ext_phy_addr, 4465 ext_phy_addr,
4406 MDIO_AN_DEVAD, 4466 MDIO_AN_DEVAD,
4407 MDIO_AN_REG_CTRL, val); 4467 MDIO_AN_REG_CTRL, 0x3200);
4408 } 4468 }
4409 } else { 4469 } else {
4410 /* Force speed */ 4470 /* Force speed */
@@ -5148,6 +5208,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
5148 } 5208 }
5149 break; 5209 break;
5150 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 5210 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5211 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5151 /* Check 10G-BaseT link status */ 5212 /* Check 10G-BaseT link status */
5152 /* Check PMD signal ok */ 5213 /* Check PMD signal ok */
5153 bnx2x_cl45_read(bp, params->port, ext_phy_type, 5214 bnx2x_cl45_read(bp, params->port, ext_phy_type,
@@ -5363,8 +5424,10 @@ static void bnx2x_link_int_ack(struct link_params *params,
5363 (NIG_STATUS_XGXS0_LINK10G | 5424 (NIG_STATUS_XGXS0_LINK10G |
5364 NIG_STATUS_XGXS0_LINK_STATUS | 5425 NIG_STATUS_XGXS0_LINK_STATUS |
5365 NIG_STATUS_SERDES0_LINK_STATUS)); 5426 NIG_STATUS_SERDES0_LINK_STATUS));
5366 if (XGXS_EXT_PHY_TYPE(params->ext_phy_config) 5427 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5367 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) { 5428 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
5429 (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5430 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
5368 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int); 5431 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
5369 } 5432 }
5370 if (vars->phy_link_up) { 5433 if (vars->phy_link_up) {
@@ -5477,6 +5540,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
5477 status = bnx2x_format_ver(spirom_ver, version, len); 5540 status = bnx2x_format_ver(spirom_ver, version, len);
5478 break; 5541 break;
5479 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 5542 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5543 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5480 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 | 5544 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
5481 (spirom_ver & 0x7F); 5545 (spirom_ver & 0x7F);
5482 status = bnx2x_format_ver(spirom_ver, version, len); 5546 status = bnx2x_format_ver(spirom_ver, version, len);
@@ -5728,13 +5792,15 @@ u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
5728} 5792}
5729 5793
5730 5794
5731u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, 5795u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed)
5732 u16 hw_led_mode, u32 chip_id)
5733{ 5796{
5797 u8 port = params->port;
5798 u16 hw_led_mode = params->hw_led_mode;
5734 u8 rc = 0; 5799 u8 rc = 0;
5735 u32 tmp; 5800 u32 tmp;
5736 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 5801 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5737 5802 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5803 struct bnx2x *bp = params->bp;
5738 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); 5804 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
5739 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", 5805 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
5740 speed, hw_led_mode); 5806 speed, hw_led_mode);
@@ -5749,7 +5815,14 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
5749 break; 5815 break;
5750 5816
5751 case LED_MODE_OPER: 5817 case LED_MODE_OPER:
5752 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); 5818 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5819 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5820 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5821 } else {
5822 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5823 hw_led_mode);
5824 }
5825
5753 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + 5826 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
5754 port*4, 0); 5827 port*4, 0);
5755 /* Set blinking rate to ~15.9Hz */ 5828 /* Set blinking rate to ~15.9Hz */
@@ -5761,7 +5834,7 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
5761 EMAC_WR(bp, EMAC_REG_EMAC_LED, 5834 EMAC_WR(bp, EMAC_REG_EMAC_LED,
5762 (tmp & (~EMAC_LED_OVERRIDE))); 5835 (tmp & (~EMAC_LED_OVERRIDE)));
5763 5836
5764 if (!CHIP_IS_E1H(bp) && 5837 if (CHIP_IS_E1(bp) &&
5765 ((speed == SPEED_2500) || 5838 ((speed == SPEED_2500) ||
5766 (speed == SPEED_1000) || 5839 (speed == SPEED_1000) ||
5767 (speed == SPEED_100) || 5840 (speed == SPEED_100) ||
@@ -5864,6 +5937,7 @@ static u8 bnx2x_link_initialize(struct link_params *params,
5864 5937
5865 if (non_ext_phy || 5938 if (non_ext_phy ||
5866 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 5939 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
5940 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
5867 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) || 5941 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) ||
5868 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 5942 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
5869 if (params->req_line_speed == SPEED_AUTO_NEG) 5943 if (params->req_line_speed == SPEED_AUTO_NEG)
@@ -6030,10 +6104,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6030 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 6104 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6031 params->port*4, 0); 6105 params->port*4, 0);
6032 6106
6033 bnx2x_set_led(bp, params->port, LED_MODE_OPER, 6107 bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed);
6034 vars->line_speed, params->hw_led_mode,
6035 params->chip_id);
6036
6037 } else 6108 } else
6038 /* No loopback */ 6109 /* No loopback */
6039 { 6110 {
@@ -6091,15 +6162,13 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6091{ 6162{
6092 struct bnx2x *bp = params->bp; 6163 struct bnx2x *bp = params->bp;
6093 u32 ext_phy_config = params->ext_phy_config; 6164 u32 ext_phy_config = params->ext_phy_config;
6094 u16 hw_led_mode = params->hw_led_mode;
6095 u32 chip_id = params->chip_id;
6096 u8 port = params->port; 6165 u8 port = params->port;
6097 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 6166 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6098 u32 val = REG_RD(bp, params->shmem_base + 6167 u32 val = REG_RD(bp, params->shmem_base +
6099 offsetof(struct shmem_region, dev_info. 6168 offsetof(struct shmem_region, dev_info.
6100 port_feature_config[params->port]. 6169 port_feature_config[params->port].
6101 config)); 6170 config));
6102 6171 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
6103 /* disable attentions */ 6172 /* disable attentions */
6104 vars->link_status = 0; 6173 vars->link_status = 0;
6105 bnx2x_update_mng(params, vars->link_status); 6174 bnx2x_update_mng(params, vars->link_status);
@@ -6127,7 +6196,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6127 * Hold it as vars low 6196 * Hold it as vars low
6128 */ 6197 */
6129 /* clear link led */ 6198 /* clear link led */
6130 bnx2x_set_led(bp, port, LED_MODE_OFF, 0, hw_led_mode, chip_id); 6199 bnx2x_set_led(params, LED_MODE_OFF, 0);
6131 if (reset_ext_phy) { 6200 if (reset_ext_phy) {
6132 switch (ext_phy_type) { 6201 switch (ext_phy_type) {
6133 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 6202 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
@@ -6163,6 +6232,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6163 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr); 6232 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
6164 break; 6233 break;
6165 } 6234 }
6235 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6236 {
6237 u8 ext_phy_addr =
6238 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6239 bnx2x_cl45_write(bp, port,
6240 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6241 ext_phy_addr,
6242 MDIO_AN_DEVAD,
6243 MDIO_AN_REG_CTRL, 0x0000);
6244 bnx2x_cl45_write(bp, port,
6245 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6246 ext_phy_addr,
6247 MDIO_PMA_DEVAD,
6248 MDIO_PMA_REG_CTRL, 1);
6249 break;
6250 }
6166 default: 6251 default:
6167 /* HW reset */ 6252 /* HW reset */
6168 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 6253 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
@@ -6198,9 +6283,7 @@ static u8 bnx2x_update_link_down(struct link_params *params,
6198 u8 port = params->port; 6283 u8 port = params->port;
6199 6284
6200 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); 6285 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
6201 bnx2x_set_led(bp, port, LED_MODE_OFF, 6286 bnx2x_set_led(params, LED_MODE_OFF, 0);
6202 0, params->hw_led_mode,
6203 params->chip_id);
6204 6287
6205 /* indicate no mac active */ 6288 /* indicate no mac active */
6206 vars->mac_type = MAC_TYPE_NONE; 6289 vars->mac_type = MAC_TYPE_NONE;
@@ -6237,15 +6320,13 @@ static u8 bnx2x_update_link_up(struct link_params *params,
6237 vars->link_status |= LINK_STATUS_LINK_UP; 6320 vars->link_status |= LINK_STATUS_LINK_UP;
6238 if (link_10g) { 6321 if (link_10g) {
6239 bnx2x_bmac_enable(params, vars, 0); 6322 bnx2x_bmac_enable(params, vars, 0);
6240 bnx2x_set_led(bp, port, LED_MODE_OPER, 6323 bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
6241 SPEED_10000, params->hw_led_mode,
6242 params->chip_id);
6243
6244 } else { 6324 } else {
6245 bnx2x_emac_enable(params, vars, 0);
6246 rc = bnx2x_emac_program(params, vars->line_speed, 6325 rc = bnx2x_emac_program(params, vars->line_speed,
6247 vars->duplex); 6326 vars->duplex);
6248 6327
6328 bnx2x_emac_enable(params, vars, 0);
6329
6249 /* AN complete? */ 6330 /* AN complete? */
6250 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { 6331 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
6251 if (!(vars->phy_flags & 6332 if (!(vars->phy_flags &
@@ -6343,6 +6424,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6343 6424
6344 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) && 6425 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
6345 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) && 6426 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
6427 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
6346 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) && 6428 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
6347 (ext_phy_link_up && !vars->phy_link_up)) 6429 (ext_phy_link_up && !vars->phy_link_up))
6348 bnx2x_init_internal_phy(params, vars, 0); 6430 bnx2x_init_internal_phy(params, vars, 0);
@@ -6578,6 +6660,13 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6578 return 0; 6660 return 0;
6579} 6661}
6580 6662
6663
6664static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6665{
6666 /* HW reset */
6667 bnx2x_ext_phy_hw_reset(bp, 1);
6668 return 0;
6669}
6581u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base) 6670u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6582{ 6671{
6583 u8 rc = 0; 6672 u8 rc = 0;
@@ -6607,7 +6696,9 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6607 /* GPIO1 affects both ports, so there's need to pull 6696 /* GPIO1 affects both ports, so there's need to pull
6608 it for single port alone */ 6697 it for single port alone */
6609 rc = bnx2x_8726_common_init_phy(bp, shmem_base); 6698 rc = bnx2x_8726_common_init_phy(bp, shmem_base);
6610 6699 break;
6700 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6701 rc = bnx2x_84823_common_init_phy(bp, shmem_base);
6611 break; 6702 break;
6612 default: 6703 default:
6613 DP(NETIF_MSG_LINK, 6704 DP(NETIF_MSG_LINK,
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index f3e252264e1b..40c2981de8ed 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -178,8 +178,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
178 Basically, the CLC takes care of the led for the link, but in case one needs 178 Basically, the CLC takes care of the led for the link, but in case one needs
179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to 179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
180 blink the led, and LED_MODE_OFF to set the led off.*/ 180 blink the led, and LED_MODE_OFF to set the led off.*/
181u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, 181u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed);
182 u16 hw_led_mode, u32 chip_id);
183#define LED_MODE_OFF 0 182#define LED_MODE_OFF 0
184#define LED_MODE_OPER 2 183#define LED_MODE_OPER 2
185 184
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 59b58d8f0fa8..61974b74909a 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -56,8 +56,8 @@
56#include "bnx2x_init_ops.h" 56#include "bnx2x_init_ops.h"
57#include "bnx2x_dump.h" 57#include "bnx2x_dump.h"
58 58
59#define DRV_MODULE_VERSION "1.52.1-1" 59#define DRV_MODULE_VERSION "1.52.1-3"
60#define DRV_MODULE_RELDATE "2009/10/13" 60#define DRV_MODULE_RELDATE "2009/11/05"
61#define BNX2X_BC_VER 0x040200 61#define BNX2X_BC_VER 0x040200
62 62
63#include <linux/firmware.h> 63#include <linux/firmware.h>
@@ -10855,7 +10855,6 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
10855static int bnx2x_phys_id(struct net_device *dev, u32 data) 10855static int bnx2x_phys_id(struct net_device *dev, u32 data)
10856{ 10856{
10857 struct bnx2x *bp = netdev_priv(dev); 10857 struct bnx2x *bp = netdev_priv(dev);
10858 int port = BP_PORT(bp);
10859 int i; 10858 int i;
10860 10859
10861 if (!netif_running(dev)) 10860 if (!netif_running(dev))
@@ -10869,13 +10868,10 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
10869 10868
10870 for (i = 0; i < (data * 2); i++) { 10869 for (i = 0; i < (data * 2); i++) {
10871 if ((i % 2) == 0) 10870 if ((i % 2) == 0)
10872 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000, 10871 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10873 bp->link_params.hw_led_mode, 10872 SPEED_1000);
10874 bp->link_params.chip_id);
10875 else 10873 else
10876 bnx2x_set_led(bp, port, LED_MODE_OFF, 0, 10874 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10877 bp->link_params.hw_led_mode,
10878 bp->link_params.chip_id);
10879 10875
10880 msleep_interruptible(500); 10876 msleep_interruptible(500);
10881 if (signal_pending(current)) 10877 if (signal_pending(current))
@@ -10883,10 +10879,8 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
10883 } 10879 }
10884 10880
10885 if (bp->link_vars.link_up) 10881 if (bp->link_vars.link_up)
10886 bnx2x_set_led(bp, port, LED_MODE_OPER, 10882 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10887 bp->link_vars.line_speed, 10883 bp->link_vars.line_speed);
10888 bp->link_params.hw_led_mode,
10889 bp->link_params.chip_id);
10890 10884
10891 return 0; 10885 return 0;
10892} 10886}
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index aa76cbada5e2..b668173ffcb4 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -4772,18 +4772,28 @@
4772#define PCI_ID_VAL2 0x438 4772#define PCI_ID_VAL2 0x438
4773 4773
4774 4774
4775#define MDIO_REG_BANK_CL73_IEEEB0 0x0 4775#define MDIO_REG_BANK_CL73_IEEEB0 0x0
4776#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 4776#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
4777#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200 4777#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
4778#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000 4778#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
4779#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000 4779#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
4780 4780
4781#define MDIO_REG_BANK_CL73_IEEEB1 0x10 4781#define MDIO_REG_BANK_CL73_IEEEB1 0x10
4782#define MDIO_CL73_IEEEB1_AN_ADV2 0x01 4782#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
4783#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
4784#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
4785#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
4786#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
4787#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
4783#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000 4788#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
4784#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020 4789#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
4785#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040 4790#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
4786#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080 4791#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
4792#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
4793#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
4794#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
4795#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
4796#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
4787 4797
4788#define MDIO_REG_BANK_RX0 0x80b0 4798#define MDIO_REG_BANK_RX0 0x80b0
4789#define MDIO_RX0_RX_STATUS 0x10 4799#define MDIO_RX0_RX_STATUS 0x10
@@ -4910,6 +4920,8 @@
4910 4920
4911 4921
4912#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 4922#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
4923#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
4924#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
4913#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11 4925#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
4914#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1 4926#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
4915#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13 4927#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
@@ -4934,6 +4946,8 @@
4934#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010 4946#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
4935#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008 4947#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
4936#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000 4948#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
4949#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
4950#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
4937#define MDIO_SERDES_DIGITAL_MISC1 0x18 4951#define MDIO_SERDES_DIGITAL_MISC1 0x18
4938#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000 4952#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
4939#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000 4953#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
@@ -5115,6 +5129,7 @@ Theotherbitsarereservedandshouldbezero*/
5115#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c 5129#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
5116#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f 5130#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
5117#define MDIO_PMA_REG_8481_LED3_MASK 0xa832 5131#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
5132#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
5118#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835 5133#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
5119#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b 5134#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
5120 5135
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c3fa31c9f2a7..1d0581923287 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1956,7 +1956,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
1956 struct port *port, *prev_port, *temp_port; 1956 struct port *port, *prev_port, *temp_port;
1957 struct aggregator *aggregator, *new_aggregator, *temp_aggregator; 1957 struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
1958 int select_new_active_agg = 0; 1958 int select_new_active_agg = 0;
1959 1959
1960 // find the aggregator related to this slave 1960 // find the aggregator related to this slave
1961 aggregator = &(SLAVE_AD_INFO(slave).aggregator); 1961 aggregator = &(SLAVE_AD_INFO(slave).aggregator);
1962 1962
@@ -2024,7 +2024,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2024 2024
2025 // clear the aggregator 2025 // clear the aggregator
2026 ad_clear_agg(aggregator); 2026 ad_clear_agg(aggregator);
2027 2027
2028 if (select_new_active_agg) { 2028 if (select_new_active_agg) {
2029 ad_agg_selection_logic(__get_first_agg(port)); 2029 ad_agg_selection_logic(__get_first_agg(port));
2030 } 2030 }
@@ -2075,7 +2075,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2075 } 2075 }
2076 } 2076 }
2077 } 2077 }
2078 port->slave=NULL; 2078 port->slave=NULL;
2079} 2079}
2080 2080
2081/** 2081/**
@@ -2301,7 +2301,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2301} 2301}
2302 2302
2303/* 2303/*
2304 * set link state for bonding master: if we have an active 2304 * set link state for bonding master: if we have an active
2305 * aggregator, we're up, if not, we're down. Presumes that we cannot 2305 * aggregator, we're up, if not, we're down. Presumes that we cannot
2306 * have an active aggregator if there are no slaves with link up. 2306 * have an active aggregator if there are no slaves with link up.
2307 * 2307 *
@@ -2395,7 +2395,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2395 goto out; 2395 goto out;
2396 } 2396 }
2397 2397
2398 slave_agg_no = bond->xmit_hash_policy(skb, dev, slaves_in_agg); 2398 slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
2399 2399
2400 bond_for_each_slave(bond, slave, i) { 2400 bond_for_each_slave(bond, slave, i) {
2401 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; 2401 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
@@ -2445,9 +2445,6 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2445 struct slave *slave = NULL; 2445 struct slave *slave = NULL;
2446 int ret = NET_RX_DROP; 2446 int ret = NET_RX_DROP;
2447 2447
2448 if (dev_net(dev) != &init_net)
2449 goto out;
2450
2451 if (!(dev->flags & IFF_MASTER)) 2448 if (!(dev->flags & IFF_MASTER))
2452 goto out; 2449 goto out;
2453 2450
@@ -2468,4 +2465,3 @@ out:
2468 2465
2469 return ret; 2466 return ret;
2470} 2467}
2471
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9b5936f072dc..0d30d1e5e53f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -355,9 +355,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
355 struct arp_pkt *arp = (struct arp_pkt *)skb->data; 355 struct arp_pkt *arp = (struct arp_pkt *)skb->data;
356 int res = NET_RX_DROP; 356 int res = NET_RX_DROP;
357 357
358 if (dev_net(bond_dev) != &init_net)
359 goto out;
360
361 while (bond_dev->priv_flags & IFF_802_1Q_VLAN) 358 while (bond_dev->priv_flags & IFF_802_1Q_VLAN)
362 bond_dev = vlan_dev_real_dev(bond_dev); 359 bond_dev = vlan_dev_real_dev(bond_dev);
363 360
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 83921abae12d..b72e1dc8cf8f 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -25,6 +25,7 @@
25#include <net/ipv6.h> 25#include <net/ipv6.h>
26#include <net/ndisc.h> 26#include <net/ndisc.h>
27#include <net/addrconf.h> 27#include <net/addrconf.h>
28#include <net/netns/generic.h>
28#include "bonding.h" 29#include "bonding.h"
29 30
30/* 31/*
@@ -152,11 +153,9 @@ static int bond_inet6addr_event(struct notifier_block *this,
152 struct net_device *vlan_dev, *event_dev = ifa->idev->dev; 153 struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
153 struct bonding *bond; 154 struct bonding *bond;
154 struct vlan_entry *vlan; 155 struct vlan_entry *vlan;
156 struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
155 157
156 if (dev_net(event_dev) != &init_net) 158 list_for_each_entry(bond, &bn->dev_list, bond_list) {
157 return NOTIFY_DONE;
158
159 list_for_each_entry(bond, &bond_dev_list, bond_list) {
160 if (bond->dev == event_dev) { 159 if (bond->dev == event_dev) {
161 switch (event) { 160 switch (event) {
162 case NETDEV_UP: 161 case NETDEV_UP:
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index feb03ad0d803..ecea6c294132 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -75,6 +75,7 @@
75#include <linux/jiffies.h> 75#include <linux/jiffies.h>
76#include <net/route.h> 76#include <net/route.h>
77#include <net/net_namespace.h> 77#include <net/net_namespace.h>
78#include <net/netns/generic.h>
78#include "bonding.h" 79#include "bonding.h"
79#include "bond_3ad.h" 80#include "bond_3ad.h"
80#include "bond_alb.h" 81#include "bond_alb.h"
@@ -157,11 +158,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the
157static const char * const version = 158static const char * const version =
158 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; 159 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
159 160
160LIST_HEAD(bond_dev_list); 161int bond_net_id;
161
162#ifdef CONFIG_PROC_FS
163static struct proc_dir_entry *bond_proc_dir;
164#endif
165 162
166static __be32 arp_target[BOND_MAX_ARP_TARGETS]; 163static __be32 arp_target[BOND_MAX_ARP_TARGETS];
167static int arp_ip_count; 164static int arp_ip_count;
@@ -227,7 +224,7 @@ struct bond_parm_tbl ad_select_tbl[] = {
227 224
228static void bond_send_gratuitous_arp(struct bonding *bond); 225static void bond_send_gratuitous_arp(struct bonding *bond);
229static int bond_init(struct net_device *bond_dev); 226static int bond_init(struct net_device *bond_dev);
230static void bond_deinit(struct net_device *bond_dev); 227static void bond_uninit(struct net_device *bond_dev);
231 228
232/*---------------------------- General routines -----------------------------*/ 229/*---------------------------- General routines -----------------------------*/
233 230
@@ -707,7 +704,7 @@ static int bond_check_dev_link(struct bonding *bond,
707 struct net_device *slave_dev, int reporting) 704 struct net_device *slave_dev, int reporting)
708{ 705{
709 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 706 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
710 static int (*ioctl)(struct net_device *, struct ifreq *, int); 707 int (*ioctl)(struct net_device *, struct ifreq *, int);
711 struct ifreq ifr; 708 struct ifreq ifr;
712 struct mii_ioctl_data *mii; 709 struct mii_ioctl_data *mii;
713 710
@@ -2003,25 +2000,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2003} 2000}
2004 2001
2005/* 2002/*
2006* Destroy a bonding device.
2007* Must be under rtnl_lock when this function is called.
2008*/
2009static void bond_uninit(struct net_device *bond_dev)
2010{
2011 struct bonding *bond = netdev_priv(bond_dev);
2012
2013 bond_deinit(bond_dev);
2014 bond_destroy_sysfs_entry(bond);
2015
2016 if (bond->wq)
2017 destroy_workqueue(bond->wq);
2018
2019 netif_addr_lock_bh(bond_dev);
2020 bond_mc_list_destroy(bond);
2021 netif_addr_unlock_bh(bond_dev);
2022}
2023
2024/*
2025* First release a slave and than destroy the bond if no more slaves are left. 2003* First release a slave and than destroy the bond if no more slaves are left.
2026* Must be under rtnl_lock when this function is called. 2004* Must be under rtnl_lock when this function is called.
2027*/ 2005*/
@@ -2605,7 +2583,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2605 fl.fl4_dst = targets[i]; 2583 fl.fl4_dst = targets[i];
2606 fl.fl4_tos = RTO_ONLINK; 2584 fl.fl4_tos = RTO_ONLINK;
2607 2585
2608 rv = ip_route_output_key(&init_net, &rt, &fl); 2586 rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
2609 if (rv) { 2587 if (rv) {
2610 if (net_ratelimit()) { 2588 if (net_ratelimit()) {
2611 pr_warning(DRV_NAME 2589 pr_warning(DRV_NAME
@@ -2713,9 +2691,6 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2713 unsigned char *arp_ptr; 2691 unsigned char *arp_ptr;
2714 __be32 sip, tip; 2692 __be32 sip, tip;
2715 2693
2716 if (dev_net(dev) != &init_net)
2717 goto out;
2718
2719 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) 2694 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
2720 goto out; 2695 goto out;
2721 2696
@@ -3378,10 +3353,11 @@ static const struct file_operations bond_info_fops = {
3378static void bond_create_proc_entry(struct bonding *bond) 3353static void bond_create_proc_entry(struct bonding *bond)
3379{ 3354{
3380 struct net_device *bond_dev = bond->dev; 3355 struct net_device *bond_dev = bond->dev;
3356 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3381 3357
3382 if (bond_proc_dir) { 3358 if (bn->proc_dir) {
3383 bond->proc_entry = proc_create_data(bond_dev->name, 3359 bond->proc_entry = proc_create_data(bond_dev->name,
3384 S_IRUGO, bond_proc_dir, 3360 S_IRUGO, bn->proc_dir,
3385 &bond_info_fops, bond); 3361 &bond_info_fops, bond);
3386 if (bond->proc_entry == NULL) 3362 if (bond->proc_entry == NULL)
3387 pr_warning(DRV_NAME 3363 pr_warning(DRV_NAME
@@ -3394,8 +3370,11 @@ static void bond_create_proc_entry(struct bonding *bond)
3394 3370
3395static void bond_remove_proc_entry(struct bonding *bond) 3371static void bond_remove_proc_entry(struct bonding *bond)
3396{ 3372{
3397 if (bond_proc_dir && bond->proc_entry) { 3373 struct net_device *bond_dev = bond->dev;
3398 remove_proc_entry(bond->proc_file_name, bond_proc_dir); 3374 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3375
3376 if (bn->proc_dir && bond->proc_entry) {
3377 remove_proc_entry(bond->proc_file_name, bn->proc_dir);
3399 memset(bond->proc_file_name, 0, IFNAMSIZ); 3378 memset(bond->proc_file_name, 0, IFNAMSIZ);
3400 bond->proc_entry = NULL; 3379 bond->proc_entry = NULL;
3401 } 3380 }
@@ -3404,11 +3383,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
3404/* Create the bonding directory under /proc/net, if doesn't exist yet. 3383/* Create the bonding directory under /proc/net, if doesn't exist yet.
3405 * Caller must hold rtnl_lock. 3384 * Caller must hold rtnl_lock.
3406 */ 3385 */
3407static void bond_create_proc_dir(void) 3386static void bond_create_proc_dir(struct bond_net *bn)
3408{ 3387{
3409 if (!bond_proc_dir) { 3388 if (!bn->proc_dir) {
3410 bond_proc_dir = proc_mkdir(DRV_NAME, init_net.proc_net); 3389 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
3411 if (!bond_proc_dir) 3390 if (!bn->proc_dir)
3412 pr_warning(DRV_NAME 3391 pr_warning(DRV_NAME
3413 ": Warning: cannot create /proc/net/%s\n", 3392 ": Warning: cannot create /proc/net/%s\n",
3414 DRV_NAME); 3393 DRV_NAME);
@@ -3418,11 +3397,11 @@ static void bond_create_proc_dir(void)
3418/* Destroy the bonding directory under /proc/net, if empty. 3397/* Destroy the bonding directory under /proc/net, if empty.
3419 * Caller must hold rtnl_lock. 3398 * Caller must hold rtnl_lock.
3420 */ 3399 */
3421static void bond_destroy_proc_dir(void) 3400static void bond_destroy_proc_dir(struct bond_net *bn)
3422{ 3401{
3423 if (bond_proc_dir) { 3402 if (bn->proc_dir) {
3424 remove_proc_entry(DRV_NAME, init_net.proc_net); 3403 remove_proc_entry(DRV_NAME, bn->net->proc_net);
3425 bond_proc_dir = NULL; 3404 bn->proc_dir = NULL;
3426 } 3405 }
3427} 3406}
3428 3407
@@ -3436,11 +3415,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
3436{ 3415{
3437} 3416}
3438 3417
3439static void bond_create_proc_dir(void) 3418static void bond_create_proc_dir(struct bond_net *bn)
3440{ 3419{
3441} 3420}
3442 3421
3443static void bond_destroy_proc_dir(void) 3422static void bond_destroy_proc_dir(struct bond_net *bn)
3444{ 3423{
3445} 3424}
3446 3425
@@ -3457,9 +3436,6 @@ static int bond_event_changename(struct bonding *bond)
3457 bond_remove_proc_entry(bond); 3436 bond_remove_proc_entry(bond);
3458 bond_create_proc_entry(bond); 3437 bond_create_proc_entry(bond);
3459 3438
3460 bond_destroy_sysfs_entry(bond);
3461 bond_create_sysfs_entry(bond);
3462
3463 return NOTIFY_DONE; 3439 return NOTIFY_DONE;
3464} 3440}
3465 3441
@@ -3471,9 +3447,6 @@ static int bond_master_netdev_event(unsigned long event,
3471 switch (event) { 3447 switch (event) {
3472 case NETDEV_CHANGENAME: 3448 case NETDEV_CHANGENAME:
3473 return bond_event_changename(event_bond); 3449 return bond_event_changename(event_bond);
3474 case NETDEV_UNREGISTER:
3475 bond_release_all(event_bond->dev);
3476 break;
3477 default: 3450 default:
3478 break; 3451 break;
3479 } 3452 }
@@ -3565,9 +3538,6 @@ static int bond_netdev_event(struct notifier_block *this,
3565{ 3538{
3566 struct net_device *event_dev = (struct net_device *)ptr; 3539 struct net_device *event_dev = (struct net_device *)ptr;
3567 3540
3568 if (dev_net(event_dev) != &init_net)
3569 return NOTIFY_DONE;
3570
3571 pr_debug("event_dev: %s, event: %lx\n", 3541 pr_debug("event_dev: %s, event: %lx\n",
3572 (event_dev ? event_dev->name : "None"), 3542 (event_dev ? event_dev->name : "None"),
3573 event); 3543 event);
@@ -3600,13 +3570,11 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3600{ 3570{
3601 struct in_ifaddr *ifa = ptr; 3571 struct in_ifaddr *ifa = ptr;
3602 struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev; 3572 struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev;
3573 struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
3603 struct bonding *bond; 3574 struct bonding *bond;
3604 struct vlan_entry *vlan; 3575 struct vlan_entry *vlan;
3605 3576
3606 if (dev_net(ifa->ifa_dev->dev) != &init_net) 3577 list_for_each_entry(bond, &bn->dev_list, bond_list) {
3607 return NOTIFY_DONE;
3608
3609 list_for_each_entry(bond, &bond_dev_list, bond_list) {
3610 if (bond->dev == event_dev) { 3578 if (bond->dev == event_dev) {
3611 switch (event) { 3579 switch (event) {
3612 case NETDEV_UP: 3580 case NETDEV_UP:
@@ -3696,18 +3664,17 @@ void bond_unregister_arp(struct bonding *bond)
3696 * Hash for the output device based upon layer 2 and layer 3 data. If 3664 * Hash for the output device based upon layer 2 and layer 3 data. If
3697 * the packet is not IP mimic bond_xmit_hash_policy_l2() 3665 * the packet is not IP mimic bond_xmit_hash_policy_l2()
3698 */ 3666 */
3699static int bond_xmit_hash_policy_l23(struct sk_buff *skb, 3667static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
3700 struct net_device *bond_dev, int count)
3701{ 3668{
3702 struct ethhdr *data = (struct ethhdr *)skb->data; 3669 struct ethhdr *data = (struct ethhdr *)skb->data;
3703 struct iphdr *iph = ip_hdr(skb); 3670 struct iphdr *iph = ip_hdr(skb);
3704 3671
3705 if (skb->protocol == htons(ETH_P_IP)) { 3672 if (skb->protocol == htons(ETH_P_IP)) {
3706 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ 3673 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
3707 (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count; 3674 (data->h_dest[5] ^ data->h_source[5])) % count;
3708 } 3675 }
3709 3676
3710 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3677 return (data->h_dest[5] ^ data->h_source[5]) % count;
3711} 3678}
3712 3679
3713/* 3680/*
@@ -3715,8 +3682,7 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
3715 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is 3682 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
3716 * altogether not IP, mimic bond_xmit_hash_policy_l2() 3683 * altogether not IP, mimic bond_xmit_hash_policy_l2()
3717 */ 3684 */
3718static int bond_xmit_hash_policy_l34(struct sk_buff *skb, 3685static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
3719 struct net_device *bond_dev, int count)
3720{ 3686{
3721 struct ethhdr *data = (struct ethhdr *)skb->data; 3687 struct ethhdr *data = (struct ethhdr *)skb->data;
3722 struct iphdr *iph = ip_hdr(skb); 3688 struct iphdr *iph = ip_hdr(skb);
@@ -3734,18 +3700,17 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
3734 3700
3735 } 3701 }
3736 3702
3737 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3703 return (data->h_dest[5] ^ data->h_source[5]) % count;
3738} 3704}
3739 3705
3740/* 3706/*
3741 * Hash for the output device based upon layer 2 data 3707 * Hash for the output device based upon layer 2 data
3742 */ 3708 */
3743static int bond_xmit_hash_policy_l2(struct sk_buff *skb, 3709static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3744 struct net_device *bond_dev, int count)
3745{ 3710{
3746 struct ethhdr *data = (struct ethhdr *)skb->data; 3711 struct ethhdr *data = (struct ethhdr *)skb->data;
3747 3712
3748 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3713 return (data->h_dest[5] ^ data->h_source[5]) % count;
3749} 3714}
3750 3715
3751/*-------------------------- Device entry points ----------------------------*/ 3716/*-------------------------- Device entry points ----------------------------*/
@@ -3978,7 +3943,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3978 if (!capable(CAP_NET_ADMIN)) 3943 if (!capable(CAP_NET_ADMIN))
3979 return -EPERM; 3944 return -EPERM;
3980 3945
3981 slave_dev = dev_get_by_name(&init_net, ifr->ifr_slave); 3946 slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave);
3982 3947
3983 pr_debug("slave_dev=%p: \n", slave_dev); 3948 pr_debug("slave_dev=%p: \n", slave_dev);
3984 3949
@@ -4334,7 +4299,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
4334 if (!BOND_IS_OK(bond)) 4299 if (!BOND_IS_OK(bond))
4335 goto out; 4300 goto out;
4336 4301
4337 slave_no = bond->xmit_hash_policy(skb, bond_dev, bond->slave_cnt); 4302 slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
4338 4303
4339 bond_for_each_slave(bond, slave, i) { 4304 bond_for_each_slave(bond, slave, i) {
4340 slave_no--; 4305 slave_no--;
@@ -4615,37 +4580,29 @@ static void bond_work_cancel_all(struct bonding *bond)
4615 cancel_delayed_work(&bond->ad_work); 4580 cancel_delayed_work(&bond->ad_work);
4616} 4581}
4617 4582
4618/* De-initialize device specific data. 4583/*
4619 * Caller must hold rtnl_lock. 4584* Destroy a bonding device.
4620 */ 4585* Must be under rtnl_lock when this function is called.
4621static void bond_deinit(struct net_device *bond_dev) 4586*/
4587static void bond_uninit(struct net_device *bond_dev)
4622{ 4588{
4623 struct bonding *bond = netdev_priv(bond_dev); 4589 struct bonding *bond = netdev_priv(bond_dev);
4624 4590
4591 /* Release the bonded slaves */
4592 bond_release_all(bond_dev);
4593
4625 list_del(&bond->bond_list); 4594 list_del(&bond->bond_list);
4626 4595
4627 bond_work_cancel_all(bond); 4596 bond_work_cancel_all(bond);
4628 4597
4629 bond_remove_proc_entry(bond); 4598 bond_remove_proc_entry(bond);
4630}
4631 4599
4632/* Unregister and free all bond devices. 4600 if (bond->wq)
4633 * Caller must hold rtnl_lock. 4601 destroy_workqueue(bond->wq);
4634 */
4635static void bond_free_all(void)
4636{
4637 struct bonding *bond, *nxt;
4638
4639 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) {
4640 struct net_device *bond_dev = bond->dev;
4641
4642 bond_work_cancel_all(bond);
4643 /* Release the bonded slaves */
4644 bond_release_all(bond_dev);
4645 unregister_netdevice(bond_dev);
4646 }
4647 4602
4648 bond_destroy_proc_dir(); 4603 netif_addr_lock_bh(bond_dev);
4604 bond_mc_list_destroy(bond);
4605 netif_addr_unlock_bh(bond_dev);
4649} 4606}
4650 4607
4651/*------------------------- Module initialization ---------------------------*/ 4608/*------------------------- Module initialization ---------------------------*/
@@ -5067,6 +5024,7 @@ static void bond_set_lockdep_class(struct net_device *dev)
5067static int bond_init(struct net_device *bond_dev) 5024static int bond_init(struct net_device *bond_dev)
5068{ 5025{
5069 struct bonding *bond = netdev_priv(bond_dev); 5026 struct bonding *bond = netdev_priv(bond_dev);
5027 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
5070 5028
5071 pr_debug("Begin bond_init for %s\n", bond_dev->name); 5029 pr_debug("Begin bond_init for %s\n", bond_dev->name);
5072 5030
@@ -5079,30 +5037,41 @@ static int bond_init(struct net_device *bond_dev)
5079 netif_carrier_off(bond_dev); 5037 netif_carrier_off(bond_dev);
5080 5038
5081 bond_create_proc_entry(bond); 5039 bond_create_proc_entry(bond);
5082 list_add_tail(&bond->bond_list, &bond_dev_list); 5040 list_add_tail(&bond->bond_list, &bn->dev_list);
5041
5042 bond_prepare_sysfs_group(bond);
5043 return 0;
5044}
5083 5045
5046static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
5047{
5048 if (tb[IFLA_ADDRESS]) {
5049 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
5050 return -EINVAL;
5051 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
5052 return -EADDRNOTAVAIL;
5053 }
5084 return 0; 5054 return 0;
5085} 5055}
5086 5056
5057static struct rtnl_link_ops bond_link_ops __read_mostly = {
5058 .kind = "bond",
5059 .priv_size = sizeof(struct bonding),
5060 .setup = bond_setup,
5061 .validate = bond_validate,
5062};
5063
5087/* Create a new bond based on the specified name and bonding parameters. 5064/* Create a new bond based on the specified name and bonding parameters.
5088 * If name is NULL, obtain a suitable "bond%d" name for us. 5065 * If name is NULL, obtain a suitable "bond%d" name for us.
5089 * Caller must NOT hold rtnl_lock; we need to release it here before we 5066 * Caller must NOT hold rtnl_lock; we need to release it here before we
5090 * set up our sysfs entries. 5067 * set up our sysfs entries.
5091 */ 5068 */
5092int bond_create(const char *name) 5069int bond_create(struct net *net, const char *name)
5093{ 5070{
5094 struct net_device *bond_dev; 5071 struct net_device *bond_dev;
5095 int res; 5072 int res;
5096 5073
5097 rtnl_lock(); 5074 rtnl_lock();
5098 /* Check to see if the bond already exists. */
5099 /* FIXME: pass netns from caller */
5100 if (name && __dev_get_by_name(&init_net, name)) {
5101 pr_err(DRV_NAME ": cannot add bond %s; already exists\n",
5102 name);
5103 res = -EEXIST;
5104 goto out_rtnl;
5105 }
5106 5075
5107 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", 5076 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
5108 bond_setup); 5077 bond_setup);
@@ -5110,9 +5079,12 @@ int bond_create(const char *name)
5110 pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n", 5079 pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n",
5111 name); 5080 name);
5112 res = -ENOMEM; 5081 res = -ENOMEM;
5113 goto out_rtnl; 5082 goto out;
5114 } 5083 }
5115 5084
5085 dev_net_set(bond_dev, net);
5086 bond_dev->rtnl_link_ops = &bond_link_ops;
5087
5116 if (!name) { 5088 if (!name) {
5117 res = dev_alloc_name(bond_dev, "bond%d"); 5089 res = dev_alloc_name(bond_dev, "bond%d");
5118 if (res < 0) 5090 if (res < 0)
@@ -5120,27 +5092,55 @@ int bond_create(const char *name)
5120 } 5092 }
5121 5093
5122 res = register_netdevice(bond_dev); 5094 res = register_netdevice(bond_dev);
5123 if (res < 0)
5124 goto out_bond;
5125
5126 res = bond_create_sysfs_entry(netdev_priv(bond_dev));
5127 if (res < 0)
5128 goto out_unreg;
5129 5095
5096out:
5130 rtnl_unlock(); 5097 rtnl_unlock();
5131 return 0; 5098 return res;
5132
5133out_unreg:
5134 unregister_netdevice(bond_dev);
5135out_bond:
5136 bond_deinit(bond_dev);
5137out_netdev: 5099out_netdev:
5138 free_netdev(bond_dev); 5100 free_netdev(bond_dev);
5139out_rtnl: 5101 goto out;
5140 rtnl_unlock(); 5102}
5141 return res; 5103
5104static int bond_net_init(struct net *net)
5105{
5106 struct bond_net *bn;
5107 int err;
5108
5109 err = -ENOMEM;
5110 bn = kzalloc(sizeof(struct bond_net), GFP_KERNEL);
5111 if (bn == NULL)
5112 goto out;
5113
5114 bn->net = net;
5115 INIT_LIST_HEAD(&bn->dev_list);
5116
5117 err = net_assign_generic(net, bond_net_id, bn);
5118 if (err)
5119 goto out_free;
5120
5121 bond_create_proc_dir(bn);
5122out:
5123 return err;
5124out_free:
5125 kfree(bn);
5126 goto out;
5142} 5127}
5143 5128
5129static void bond_net_exit(struct net *net)
5130{
5131 struct bond_net *bn;
5132
5133 bn = net_generic(net, bond_net_id);
5134
5135 bond_destroy_proc_dir(bn);
5136 kfree(bn);
5137}
5138
5139static struct pernet_operations bond_net_ops = {
5140 .init = bond_net_init,
5141 .exit = bond_net_exit,
5142};
5143
5144static int __init bonding_init(void) 5144static int __init bonding_init(void)
5145{ 5145{
5146 int i; 5146 int i;
@@ -5152,10 +5152,16 @@ static int __init bonding_init(void)
5152 if (res) 5152 if (res)
5153 goto out; 5153 goto out;
5154 5154
5155 bond_create_proc_dir(); 5155 res = register_pernet_gen_subsys(&bond_net_id, &bond_net_ops);
5156 if (res)
5157 goto out;
5158
5159 res = rtnl_link_register(&bond_link_ops);
5160 if (res)
5161 goto err_link;
5156 5162
5157 for (i = 0; i < max_bonds; i++) { 5163 for (i = 0; i < max_bonds; i++) {
5158 res = bond_create(NULL); 5164 res = bond_create(&init_net, NULL);
5159 if (res) 5165 if (res)
5160 goto err; 5166 goto err;
5161 } 5167 }
@@ -5167,14 +5173,13 @@ static int __init bonding_init(void)
5167 register_netdevice_notifier(&bond_netdev_notifier); 5173 register_netdevice_notifier(&bond_netdev_notifier);
5168 register_inetaddr_notifier(&bond_inetaddr_notifier); 5174 register_inetaddr_notifier(&bond_inetaddr_notifier);
5169 bond_register_ipv6_notifier(); 5175 bond_register_ipv6_notifier();
5170
5171 goto out;
5172err:
5173 rtnl_lock();
5174 bond_free_all();
5175 rtnl_unlock();
5176out: 5176out:
5177 return res; 5177 return res;
5178err:
5179 rtnl_link_unregister(&bond_link_ops);
5180err_link:
5181 unregister_pernet_gen_subsys(bond_net_id, &bond_net_ops);
5182 goto out;
5178 5183
5179} 5184}
5180 5185
@@ -5186,9 +5191,8 @@ static void __exit bonding_exit(void)
5186 5191
5187 bond_destroy_sysfs(); 5192 bond_destroy_sysfs();
5188 5193
5189 rtnl_lock(); 5194 rtnl_link_unregister(&bond_link_ops);
5190 bond_free_all(); 5195 unregister_pernet_gen_subsys(bond_net_id, &bond_net_ops);
5191 rtnl_unlock();
5192} 5196}
5193 5197
5194module_init(bonding_init); 5198module_init(bonding_init);
@@ -5197,3 +5201,4 @@ MODULE_LICENSE("GPL");
5197MODULE_VERSION(DRV_VERSION); 5201MODULE_VERSION(DRV_VERSION);
5198MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION); 5202MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
5199MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others"); 5203MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
5204MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index dca7d82f7b97..a59094f8bb6b 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -35,6 +35,8 @@
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
37#include <net/net_namespace.h> 37#include <net/net_namespace.h>
38#include <net/netns/generic.h>
39#include <linux/nsproxy.h>
38 40
39#include "bonding.h" 41#include "bonding.h"
40 42
@@ -47,12 +49,14 @@
47 */ 49 */
48static ssize_t bonding_show_bonds(struct class *cls, char *buf) 50static ssize_t bonding_show_bonds(struct class *cls, char *buf)
49{ 51{
52 struct net *net = current->nsproxy->net_ns;
53 struct bond_net *bn = net_generic(net, bond_net_id);
50 int res = 0; 54 int res = 0;
51 struct bonding *bond; 55 struct bonding *bond;
52 56
53 rtnl_lock(); 57 rtnl_lock();
54 58
55 list_for_each_entry(bond, &bond_dev_list, bond_list) { 59 list_for_each_entry(bond, &bn->dev_list, bond_list) {
56 if (res > (PAGE_SIZE - IFNAMSIZ)) { 60 if (res > (PAGE_SIZE - IFNAMSIZ)) {
57 /* not enough space for another interface name */ 61 /* not enough space for another interface name */
58 if ((PAGE_SIZE - res) > 10) 62 if ((PAGE_SIZE - res) > 10)
@@ -69,11 +73,12 @@ static ssize_t bonding_show_bonds(struct class *cls, char *buf)
69 return res; 73 return res;
70} 74}
71 75
72static struct net_device *bond_get_by_name(const char *ifname) 76static struct net_device *bond_get_by_name(struct net *net, const char *ifname)
73{ 77{
78 struct bond_net *bn = net_generic(net, bond_net_id);
74 struct bonding *bond; 79 struct bonding *bond;
75 80
76 list_for_each_entry(bond, &bond_dev_list, bond_list) { 81 list_for_each_entry(bond, &bn->dev_list, bond_list) {
77 if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0) 82 if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0)
78 return bond->dev; 83 return bond->dev;
79 } 84 }
@@ -91,6 +96,7 @@ static struct net_device *bond_get_by_name(const char *ifname)
91static ssize_t bonding_store_bonds(struct class *cls, 96static ssize_t bonding_store_bonds(struct class *cls,
92 const char *buffer, size_t count) 97 const char *buffer, size_t count)
93{ 98{
99 struct net *net = current->nsproxy->net_ns;
94 char command[IFNAMSIZ + 1] = {0, }; 100 char command[IFNAMSIZ + 1] = {0, };
95 char *ifname; 101 char *ifname;
96 int rv, res = count; 102 int rv, res = count;
@@ -104,7 +110,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
104 if (command[0] == '+') { 110 if (command[0] == '+') {
105 pr_info(DRV_NAME 111 pr_info(DRV_NAME
106 ": %s is being created...\n", ifname); 112 ": %s is being created...\n", ifname);
107 rv = bond_create(ifname); 113 rv = bond_create(net, ifname);
108 if (rv) { 114 if (rv) {
109 pr_info(DRV_NAME ": Bond creation failed.\n"); 115 pr_info(DRV_NAME ": Bond creation failed.\n");
110 res = rv; 116 res = rv;
@@ -113,7 +119,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
113 struct net_device *bond_dev; 119 struct net_device *bond_dev;
114 120
115 rtnl_lock(); 121 rtnl_lock();
116 bond_dev = bond_get_by_name(ifname); 122 bond_dev = bond_get_by_name(net, ifname);
117 if (bond_dev) { 123 if (bond_dev) {
118 pr_info(DRV_NAME ": %s is being deleted...\n", 124 pr_info(DRV_NAME ": %s is being deleted...\n",
119 ifname); 125 ifname);
@@ -238,8 +244,7 @@ static ssize_t bonding_store_slaves(struct device *d,
238 /* Got a slave name in ifname. Is it already in the list? */ 244 /* Got a slave name in ifname. Is it already in the list? */
239 found = 0; 245 found = 0;
240 246
241 /* FIXME: get netns from sysfs object */ 247 dev = __dev_get_by_name(dev_net(bond->dev), ifname);
242 dev = __dev_get_by_name(&init_net, ifname);
243 if (!dev) { 248 if (!dev) {
244 pr_info(DRV_NAME 249 pr_info(DRV_NAME
245 ": %s: Interface %s does not exist!\n", 250 ": %s: Interface %s does not exist!\n",
@@ -1616,24 +1621,8 @@ void bond_destroy_sysfs(void)
1616 * Initialize sysfs for each bond. This sets up and registers 1621 * Initialize sysfs for each bond. This sets up and registers
1617 * the 'bondctl' directory for each individual bond under /sys/class/net. 1622 * the 'bondctl' directory for each individual bond under /sys/class/net.
1618 */ 1623 */
1619int bond_create_sysfs_entry(struct bonding *bond) 1624void bond_prepare_sysfs_group(struct bonding *bond)
1620{ 1625{
1621 struct net_device *dev = bond->dev; 1626 bond->dev->sysfs_groups[0] = &bonding_group;
1622 int err;
1623
1624 err = sysfs_create_group(&(dev->dev.kobj), &bonding_group);
1625 if (err)
1626 pr_emerg("eek! didn't create group!\n");
1627
1628 return err;
1629}
1630/*
1631 * Remove sysfs entries for each bond.
1632 */
1633void bond_destroy_sysfs_entry(struct bonding *bond)
1634{
1635 struct net_device *dev = bond->dev;
1636
1637 sysfs_remove_group(&(dev->dev.kobj), &bonding_group);
1638} 1627}
1639 1628
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 9c03c2ee074d..a51ae7dc8d51 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -30,8 +30,6 @@
30 30
31#define BOND_MAX_ARP_TARGETS 16 31#define BOND_MAX_ARP_TARGETS 16
32 32
33extern struct list_head bond_dev_list;
34
35#define IS_UP(dev) \ 33#define IS_UP(dev) \
36 ((((dev)->flags & IFF_UP) == IFF_UP) && \ 34 ((((dev)->flags & IFF_UP) == IFF_UP) && \
37 netif_running(dev) && \ 35 netif_running(dev) && \
@@ -206,7 +204,7 @@ struct bonding {
206#endif /* CONFIG_PROC_FS */ 204#endif /* CONFIG_PROC_FS */
207 struct list_head bond_list; 205 struct list_head bond_list;
208 struct dev_mc_list *mc_list; 206 struct dev_mc_list *mc_list;
209 int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int); 207 int (*xmit_hash_policy)(struct sk_buff *, int);
210 __be32 master_ip; 208 __be32 master_ip;
211 u16 flags; 209 u16 flags;
212 u16 rr_tx_counter; 210 u16 rr_tx_counter;
@@ -327,12 +325,11 @@ static inline void bond_unset_master_alb_flags(struct bonding *bond)
327 325
328struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 326struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
329int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 327int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
330int bond_create(const char *name); 328int bond_create(struct net *net, const char *name);
331int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev); 329int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
332int bond_create_sysfs(void); 330int bond_create_sysfs(void);
333void bond_destroy_sysfs(void); 331void bond_destroy_sysfs(void);
334void bond_destroy_sysfs_entry(struct bonding *bond); 332void bond_prepare_sysfs_group(struct bonding *bond);
335int bond_create_sysfs_entry(struct bonding *bond);
336int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave); 333int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
337void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave); 334void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
338int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); 335int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
@@ -347,8 +344,16 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
347void bond_register_arp(struct bonding *); 344void bond_register_arp(struct bonding *);
348void bond_unregister_arp(struct bonding *); 345void bond_unregister_arp(struct bonding *);
349 346
347struct bond_net {
348 struct net * net; /* Associated network namespace */
349 struct list_head dev_list;
350#ifdef CONFIG_PROC_FS
351 struct proc_dir_entry * proc_dir;
352#endif
353};
354
350/* exported from bond_main.c */ 355/* exported from bond_main.c */
351extern struct list_head bond_dev_list; 356extern int bond_net_id;
352extern const struct bond_parm_tbl bond_lacp_tbl[]; 357extern const struct bond_parm_tbl bond_lacp_tbl[];
353extern const struct bond_parm_tbl bond_mode_tbl[]; 358extern const struct bond_parm_tbl bond_mode_tbl[];
354extern const struct bond_parm_tbl xmit_hashtype_tbl[]; 359extern const struct bond_parm_tbl xmit_hashtype_tbl[];
@@ -377,4 +382,3 @@ static inline void bond_unregister_ipv6_notifier(void)
377#endif 382#endif
378 383
379#endif /* _LINUX_BONDING_H */ 384#endif /* _LINUX_BONDING_H */
380
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index b13fd9114130..cbe3fce53e3b 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -221,38 +221,6 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
221 set_mb_mode_prio(priv, mb, mode, 0); 221 set_mb_mode_prio(priv, mb, mode, 0);
222} 222}
223 223
224static struct sk_buff *alloc_can_skb(struct net_device *dev,
225 struct can_frame **cf)
226{
227 struct sk_buff *skb;
228
229 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
230 if (unlikely(!skb))
231 return NULL;
232
233 skb->protocol = htons(ETH_P_CAN);
234 skb->ip_summed = CHECKSUM_UNNECESSARY;
235 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
236
237 return skb;
238}
239
240static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
241 struct can_frame **cf)
242{
243 struct sk_buff *skb;
244
245 skb = alloc_can_skb(dev, cf);
246 if (unlikely(!skb))
247 return NULL;
248
249 memset(*cf, 0, sizeof(struct can_frame));
250 (*cf)->can_id = CAN_ERR_FLAG;
251 (*cf)->can_dlc = CAN_ERR_DLC;
252
253 return skb;
254}
255
256/* 224/*
257 * Swtich transceiver on or off 225 * Swtich transceiver on or off
258 */ 226 */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 39b99f57c265..c3db111d2ff5 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -366,17 +366,12 @@ void can_restart(unsigned long data)
366 can_flush_echo_skb(dev); 366 can_flush_echo_skb(dev);
367 367
368 /* send restart message upstream */ 368 /* send restart message upstream */
369 skb = dev_alloc_skb(sizeof(struct can_frame)); 369 skb = alloc_can_err_skb(dev, &cf);
370 if (skb == NULL) { 370 if (skb == NULL) {
371 err = -ENOMEM; 371 err = -ENOMEM;
372 goto restart; 372 goto restart;
373 } 373 }
374 skb->dev = dev; 374 cf->can_id |= CAN_ERR_RESTARTED;
375 skb->protocol = htons(ETH_P_CAN);
376 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
377 memset(cf, 0, sizeof(struct can_frame));
378 cf->can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
379 cf->can_dlc = CAN_ERR_DLC;
380 375
381 netif_rx(skb); 376 netif_rx(skb);
382 377
@@ -449,6 +444,39 @@ static void can_setup(struct net_device *dev)
449 dev->features = NETIF_F_NO_CSUM; 444 dev->features = NETIF_F_NO_CSUM;
450} 445}
451 446
447struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
448{
449 struct sk_buff *skb;
450
451 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
452 if (unlikely(!skb))
453 return NULL;
454
455 skb->protocol = htons(ETH_P_CAN);
456 skb->pkt_type = PACKET_BROADCAST;
457 skb->ip_summed = CHECKSUM_UNNECESSARY;
458 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
459 memset(*cf, 0, sizeof(struct can_frame));
460
461 return skb;
462}
463EXPORT_SYMBOL_GPL(alloc_can_skb);
464
465struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
466{
467 struct sk_buff *skb;
468
469 skb = alloc_can_skb(dev, cf);
470 if (unlikely(!skb))
471 return NULL;
472
473 (*cf)->can_id = CAN_ERR_FLAG;
474 (*cf)->can_dlc = CAN_ERR_DLC;
475
476 return skb;
477}
478EXPORT_SYMBOL_GPL(alloc_can_err_skb);
479
452/* 480/*
453 * Allocate and setup space for the CAN network device 481 * Allocate and setup space for the CAN network device
454 */ 482 */
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 96d8be4253f8..782a47fabf2c 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -296,11 +296,9 @@ static void sja1000_rx(struct net_device *dev)
296 uint8_t dlc; 296 uint8_t dlc;
297 int i; 297 int i;
298 298
299 skb = dev_alloc_skb(sizeof(struct can_frame)); 299 skb = alloc_can_skb(dev, &cf);
300 if (skb == NULL) 300 if (skb == NULL)
301 return; 301 return;
302 skb->dev = dev;
303 skb->protocol = htons(ETH_P_CAN);
304 302
305 fi = priv->read_reg(priv, REG_FI); 303 fi = priv->read_reg(priv, REG_FI);
306 dlc = fi & 0x0F; 304 dlc = fi & 0x0F;
@@ -323,8 +321,6 @@ static void sja1000_rx(struct net_device *dev)
323 if (fi & FI_RTR) 321 if (fi & FI_RTR)
324 id |= CAN_RTR_FLAG; 322 id |= CAN_RTR_FLAG;
325 323
326 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
327 memset(cf, 0, sizeof(struct can_frame));
328 cf->can_id = id; 324 cf->can_id = id;
329 cf->can_dlc = dlc; 325 cf->can_dlc = dlc;
330 for (i = 0; i < dlc; i++) 326 for (i = 0; i < dlc; i++)
@@ -351,15 +347,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
351 enum can_state state = priv->can.state; 347 enum can_state state = priv->can.state;
352 uint8_t ecc, alc; 348 uint8_t ecc, alc;
353 349
354 skb = dev_alloc_skb(sizeof(struct can_frame)); 350 skb = alloc_can_err_skb(dev, &cf);
355 if (skb == NULL) 351 if (skb == NULL)
356 return -ENOMEM; 352 return -ENOMEM;
357 skb->dev = dev;
358 skb->protocol = htons(ETH_P_CAN);
359 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
360 memset(cf, 0, sizeof(struct can_frame));
361 cf->can_id = CAN_ERR_FLAG;
362 cf->can_dlc = CAN_ERR_DLC;
363 353
364 if (isrc & IRQ_DOI) { 354 if (isrc & IRQ_DOI) {
365 /* data overrun interrupt */ 355 /* data overrun interrupt */
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 3373560405ba..9dd076a626a5 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -213,6 +213,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
213 {.compatible = "nxp,sja1000"}, 213 {.compatible = "nxp,sja1000"},
214 {}, 214 {},
215}; 215};
216MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
216 217
217static struct of_platform_driver sja1000_ofp_driver = { 218static struct of_platform_driver sja1000_ofp_driver = {
218 .owner = THIS_MODULE, 219 .owner = THIS_MODULE,
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 23a7128e4eb7..07e8016b17ec 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -535,18 +535,15 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
535 u32 data, mbx_mask; 535 u32 data, mbx_mask;
536 unsigned long flags; 536 unsigned long flags;
537 537
538 skb = netdev_alloc_skb(priv->ndev, sizeof(struct can_frame)); 538 skb = alloc_can_skb(priv->ndev, &cf);
539 if (!skb) { 539 if (!skb) {
540 if (printk_ratelimit()) 540 if (printk_ratelimit())
541 dev_err(priv->ndev->dev.parent, 541 dev_err(priv->ndev->dev.parent,
542 "ti_hecc_rx_pkt: netdev_alloc_skb() failed\n"); 542 "ti_hecc_rx_pkt: alloc_can_skb() failed\n");
543 return -ENOMEM; 543 return -ENOMEM;
544 } 544 }
545 skb->protocol = __constant_htons(ETH_P_CAN);
546 skb->ip_summed = CHECKSUM_UNNECESSARY;
547 545
548 mbx_mask = BIT(mbxno); 546 mbx_mask = BIT(mbxno);
549 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
550 data = hecc_read_mbx(priv, mbxno, HECC_CANMID); 547 data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
551 if (data & HECC_CANMID_IDE) 548 if (data & HECC_CANMID_IDE)
552 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; 549 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -656,19 +653,13 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
656 struct sk_buff *skb; 653 struct sk_buff *skb;
657 654
658 /* propogate the error condition to the can stack */ 655 /* propogate the error condition to the can stack */
659 skb = netdev_alloc_skb(ndev, sizeof(struct can_frame)); 656 skb = alloc_can_err_skb(ndev, &cf);
660 if (!skb) { 657 if (!skb) {
661 if (printk_ratelimit()) 658 if (printk_ratelimit())
662 dev_err(priv->ndev->dev.parent, 659 dev_err(priv->ndev->dev.parent,
663 "ti_hecc_error: netdev_alloc_skb() failed\n"); 660 "ti_hecc_error: alloc_can_err_skb() failed\n");
664 return -ENOMEM; 661 return -ENOMEM;
665 } 662 }
666 skb->protocol = __constant_htons(ETH_P_CAN);
667 skb->ip_summed = CHECKSUM_UNNECESSARY;
668 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
669 memset(cf, 0, sizeof(struct can_frame));
670 cf->can_id = CAN_ERR_FLAG;
671 cf->can_dlc = CAN_ERR_DLC;
672 663
673 if (int_status & HECC_CANGIF_WLIF) { /* warning level int */ 664 if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
674 if ((int_status & HECC_CANGIF_BOIF) == 0) { 665 if ((int_status & HECC_CANGIF_BOIF) == 0) {
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index a65f56a9cd3d..3685f3e42d12 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -311,14 +311,10 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
311 int i; 311 int i;
312 struct net_device_stats *stats = &dev->netdev->stats; 312 struct net_device_stats *stats = &dev->netdev->stats;
313 313
314 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); 314 skb = alloc_can_skb(dev->netdev, &cf);
315 if (skb == NULL) 315 if (skb == NULL)
316 return; 316 return;
317 317
318 skb->protocol = htons(ETH_P_CAN);
319
320 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
321
322 cf->can_id = msg->msg.can_msg.id; 318 cf->can_id = msg->msg.can_msg.id;
323 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8); 319 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
324 320
@@ -346,18 +342,10 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
346 struct sk_buff *skb; 342 struct sk_buff *skb;
347 struct net_device_stats *stats = &dev->netdev->stats; 343 struct net_device_stats *stats = &dev->netdev->stats;
348 344
349 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); 345 skb = alloc_can_err_skb(dev->netdev, &cf);
350 if (skb == NULL) 346 if (skb == NULL)
351 return; 347 return;
352 348
353 skb->protocol = htons(ETH_P_CAN);
354
355 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
356 memset(cf, 0, sizeof(struct can_frame));
357
358 cf->can_id = CAN_ERR_FLAG;
359 cf->can_dlc = CAN_ERR_DLC;
360
361 if (msg->type == CPC_MSG_TYPE_CAN_STATE) { 349 if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
362 u8 state = msg->msg.can_state; 350 u8 state = msg->msg.can_state;
363 351
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 333b1d1e7435..e503384e2a54 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -408,7 +408,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
408{ 408{
409 struct cnic_dev *dev; 409 struct cnic_dev *dev;
410 410
411 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 411 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
412 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", 412 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
413 ulp_type); 413 ulp_type);
414 return -EINVAL; 414 return -EINVAL;
@@ -454,7 +454,7 @@ int cnic_unregister_driver(int ulp_type)
454 struct cnic_ulp_ops *ulp_ops; 454 struct cnic_ulp_ops *ulp_ops;
455 int i = 0; 455 int i = 0;
456 456
457 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 457 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
458 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", 458 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
459 ulp_type); 459 ulp_type);
460 return -EINVAL; 460 return -EINVAL;
@@ -510,7 +510,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
510 struct cnic_local *cp = dev->cnic_priv; 510 struct cnic_local *cp = dev->cnic_priv;
511 struct cnic_ulp_ops *ulp_ops; 511 struct cnic_ulp_ops *ulp_ops;
512 512
513 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 513 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
514 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", 514 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
515 ulp_type); 515 ulp_type);
516 return -EINVAL; 516 return -EINVAL;
@@ -551,7 +551,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
551 struct cnic_local *cp = dev->cnic_priv; 551 struct cnic_local *cp = dev->cnic_priv;
552 int i = 0; 552 int i = 0;
553 553
554 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 554 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
555 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", 555 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
556 ulp_type); 556 ulp_type);
557 return -EINVAL; 557 return -EINVAL;
@@ -3560,9 +3560,9 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
3560 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 3560 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
3561 3561
3562 if (sb_id == 0) 3562 if (sb_id == 0)
3563 val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT; 3563 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
3564 else 3564 else
3565 val = BNX2_L2CTX_STATUSB_NUM(sb_id); 3565 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
3566 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 3566 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
3567 3567
3568 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); 3568 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
@@ -3719,7 +3719,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3719 cp->int_num = 0; 3719 cp->int_num = 0;
3720 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3720 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3721 u32 sb_id = cp->status_blk_num; 3721 u32 sb_id = cp->status_blk_num;
3722 u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id); 3722 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
3723 3723
3724 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 3724 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
3725 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3725 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 47b352d982ce..cf2e1d3c0d8d 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2135,6 +2135,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2135 if (!complete) 2135 if (!complete)
2136 return; 2136 return;
2137 2137
2138 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2138 skb->ip_summed = CHECKSUM_UNNECESSARY; 2139 skb->ip_summed = CHECKSUM_UNNECESSARY;
2139 cpl = qs->lro_va; 2140 cpl = qs->lro_va;
2140 2141
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index a876dce13b9e..79ce8e857eab 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2217,7 +2217,7 @@ void emac_poll_controller(struct net_device *ndev)
2217 struct emac_priv *priv = netdev_priv(ndev); 2217 struct emac_priv *priv = netdev_priv(ndev);
2218 2218
2219 emac_int_disable(priv); 2219 emac_int_disable(priv);
2220 emac_irq(ndev->irq, priv); 2220 emac_irq(ndev->irq, ndev);
2221 emac_int_enable(priv); 2221 emac_int_enable(priv);
2222} 2222}
2223#endif 2223#endif
@@ -2806,11 +2806,33 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
2806 return 0; 2806 return 0;
2807} 2807}
2808 2808
2809static
2810int davinci_emac_suspend(struct platform_device *pdev, pm_message_t state)
2811{
2812 struct net_device *dev = platform_get_drvdata(pdev);
2813
2814 if (netif_running(dev))
2815 emac_dev_stop(dev);
2816
2817 clk_disable(emac_clk);
2818
2819 return 0;
2820}
2821
2822static int davinci_emac_resume(struct platform_device *pdev)
2823{
2824 struct net_device *dev = platform_get_drvdata(pdev);
2825
2826 clk_enable(emac_clk);
2827
2828 if (netif_running(dev))
2829 emac_dev_open(dev);
2830
2831 return 0;
2832}
2833
2809/** 2834/**
2810 * davinci_emac_driver: EMAC platform driver structure 2835 * davinci_emac_driver: EMAC platform driver structure
2811 *
2812 * We implement only probe and remove functions - suspend/resume and
2813 * others not supported by this module
2814 */ 2836 */
2815static struct platform_driver davinci_emac_driver = { 2837static struct platform_driver davinci_emac_driver = {
2816 .driver = { 2838 .driver = {
@@ -2819,6 +2841,8 @@ static struct platform_driver davinci_emac_driver = {
2819 }, 2841 },
2820 .probe = davinci_emac_probe, 2842 .probe = davinci_emac_probe,
2821 .remove = __devexit_p(davinci_emac_remove), 2843 .remove = __devexit_p(davinci_emac_remove),
2844 .suspend = davinci_emac_suspend,
2845 .resume = davinci_emac_resume,
2822}; 2846};
2823 2847
2824/** 2848/**
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index 80817c2edfb3..fb1c924d79b4 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -50,7 +50,7 @@
50#define DM9000_RCSR 0x32 50#define DM9000_RCSR 0x32
51 51
52#define CHIPR_DM9000A 0x19 52#define CHIPR_DM9000A 0x19
53#define CHIPR_DM9000B 0x1B 53#define CHIPR_DM9000B 0x1A
54 54
55#define DM9000_MRCMDX 0xF0 55#define DM9000_MRCMDX 0xF0
56#define DM9000_MRCMD 0xF2 56#define DM9000_MRCMD 0xF2
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index ff83efd47b0d..7462fdfd7f92 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -621,6 +621,7 @@ struct nic {
621 u16 eeprom_wc; 621 u16 eeprom_wc;
622 __le16 eeprom[256]; 622 __le16 eeprom[256];
623 spinlock_t mdio_lock; 623 spinlock_t mdio_lock;
624 const struct firmware *fw;
624}; 625};
625 626
626static inline void e100_write_flush(struct nic *nic) 627static inline void e100_write_flush(struct nic *nic)
@@ -1222,9 +1223,9 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1222static const struct firmware *e100_request_firmware(struct nic *nic) 1223static const struct firmware *e100_request_firmware(struct nic *nic)
1223{ 1224{
1224 const char *fw_name; 1225 const char *fw_name;
1225 const struct firmware *fw; 1226 const struct firmware *fw = nic->fw;
1226 u8 timer, bundle, min_size; 1227 u8 timer, bundle, min_size;
1227 int err; 1228 int err = 0;
1228 1229
1229 /* do not load u-code for ICH devices */ 1230 /* do not load u-code for ICH devices */
1230 if (nic->flags & ich) 1231 if (nic->flags & ich)
@@ -1240,12 +1241,20 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1240 else /* No ucode on other devices */ 1241 else /* No ucode on other devices */
1241 return NULL; 1242 return NULL;
1242 1243
1243 err = request_firmware(&fw, fw_name, &nic->pdev->dev); 1244 /* If the firmware has not previously been loaded, request a pointer
1245 * to it. If it was previously loaded, we are reinitializing the
1246 * adapter, possibly in a resume from hibernate, in which case
1247 * request_firmware() cannot be used.
1248 */
1249 if (!fw)
1250 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1251
1244 if (err) { 1252 if (err) {
1245 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n", 1253 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
1246 fw_name, err); 1254 fw_name, err);
1247 return ERR_PTR(err); 1255 return ERR_PTR(err);
1248 } 1256 }
1257
1249 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes 1258 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1250 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ 1259 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1251 if (fw->size != UCODE_SIZE * 4 + 3) { 1260 if (fw->size != UCODE_SIZE * 4 + 3) {
@@ -1268,7 +1277,10 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1268 release_firmware(fw); 1277 release_firmware(fw);
1269 return ERR_PTR(-EINVAL); 1278 return ERR_PTR(-EINVAL);
1270 } 1279 }
1271 /* OK, firmware is validated and ready to use... */ 1280
1281 /* OK, firmware is validated and ready to use. Save a pointer
1282 * to it in the nic */
1283 nic->fw = fw;
1272 return fw; 1284 return fw;
1273} 1285}
1274 1286
@@ -1426,19 +1438,31 @@ static int e100_phy_init(struct nic *nic)
1426 } else 1438 } else
1427 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id); 1439 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1428 1440
1429 /* Isolate all the PHY ids */
1430 for (addr = 0; addr < 32; addr++)
1431 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1432 /* Select the discovered PHY */
1433 bmcr &= ~BMCR_ISOLATE;
1434 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1435
1436 /* Get phy ID */ 1441 /* Get phy ID */
1437 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); 1442 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1438 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); 1443 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1439 nic->phy = (u32)id_hi << 16 | (u32)id_lo; 1444 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1440 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy); 1445 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1441 1446
1447 /* Select the phy and isolate the rest */
1448 for (addr = 0; addr < 32; addr++) {
1449 if (addr != nic->mii.phy_id) {
1450 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1451 } else if (nic->phy != phy_82552_v) {
1452 bmcr = mdio_read(netdev, addr, MII_BMCR);
1453 mdio_write(netdev, addr, MII_BMCR,
1454 bmcr & ~BMCR_ISOLATE);
1455 }
1456 }
1457 /*
1458 * Workaround for 82552:
1459 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1460 * other phy_id's) using bmcr value from addr discovery loop above.
1461 */
1462 if (nic->phy == phy_82552_v)
1463 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1464 bmcr & ~BMCR_ISOLATE);
1465
1442 /* Handle National tx phys */ 1466 /* Handle National tx phys */
1443#define NCS_PHY_MODEL_MASK 0xFFF0FFFF 1467#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1444 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { 1468 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index c0f185beb8bc..1190167a8b3d 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -76,6 +76,7 @@
76/* Extended Device Control */ 76/* Extended Device Control */
77#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */ 77#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
78#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 78#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
79#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
79#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 80#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
80#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ 81#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
81#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 82#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -347,6 +348,7 @@
347/* Extended Configuration Control and Size */ 348/* Extended Configuration Control and Size */
348#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 349#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
349#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 350#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
351#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
350#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 352#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
351#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 353#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
352#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 354#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 1211df9ae883..00989c5534c1 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -141,6 +141,20 @@ struct e1000_info;
141#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */ 141#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
142#define HV_TNCRS_LOWER PHY_REG(778, 30) 142#define HV_TNCRS_LOWER PHY_REG(778, 30)
143 143
144/* BM PHY Copper Specific Status */
145#define BM_CS_STATUS 17
146#define BM_CS_STATUS_LINK_UP 0x0400
147#define BM_CS_STATUS_RESOLVED 0x0800
148#define BM_CS_STATUS_SPEED_MASK 0xC000
149#define BM_CS_STATUS_SPEED_1000 0x8000
150
151/* 82577 Mobile Phy Status Register */
152#define HV_M_STATUS 26
153#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
154#define HV_M_STATUS_SPEED_MASK 0x0300
155#define HV_M_STATUS_SPEED_1000 0x0200
156#define HV_M_STATUS_LINK_UP 0x0040
157
144enum e1000_boards { 158enum e1000_boards {
145 board_82571, 159 board_82571,
146 board_82572, 160 board_82572,
@@ -518,9 +532,13 @@ extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
518extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); 532extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
519extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); 533extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
520extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); 534extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
535extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
536 u16 *data);
521extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); 537extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
522extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); 538extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
523extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); 539extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
540extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
541 u16 data);
524extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); 542extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
525extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); 543extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
526extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); 544extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
@@ -537,7 +555,11 @@ extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
537extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); 555extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
538extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); 556extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
539extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); 557extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
558extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
559 u16 data);
540extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); 560extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
561extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
562 u16 *data);
541extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, 563extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
542 u32 usec_interval, bool *success); 564 u32 usec_interval, bool *success);
543extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); 565extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
@@ -545,7 +567,11 @@ extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
545extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); 567extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
546extern s32 e1000e_check_downshift(struct e1000_hw *hw); 568extern s32 e1000e_check_downshift(struct e1000_hw *hw);
547extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); 569extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
570extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
571 u16 *data);
548extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); 572extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
573extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
574 u16 data);
549extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow); 575extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
550extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); 576extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
551extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); 577extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index a70999b8c6cf..0364b91488af 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -335,10 +335,18 @@ static int e1000_set_pauseparam(struct net_device *netdev,
335 335
336 hw->fc.current_mode = hw->fc.requested_mode; 336 hw->fc.current_mode = hw->fc.requested_mode;
337 337
338 retval = ((hw->phy.media_type == e1000_media_type_fiber) ? 338 if (hw->phy.media_type == e1000_media_type_fiber) {
339 hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); 339 retval = hw->mac.ops.setup_link(hw);
340 /* implicit goto out */
341 } else {
342 retval = e1000e_force_mac_fc(hw);
343 if (retval)
344 goto out;
345 e1000e_set_fc_watermarks(hw);
346 }
340 } 347 }
341 348
349out:
342 clear_bit(__E1000_RESETTING, &adapter->state); 350 clear_bit(__E1000_RESETTING, &adapter->state);
343 return retval; 351 return retval;
344} 352}
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index fd44d9f90769..aaea41ef794d 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -764,11 +764,13 @@ struct e1000_phy_operations {
764 s32 (*get_cable_length)(struct e1000_hw *); 764 s32 (*get_cable_length)(struct e1000_hw *);
765 s32 (*get_phy_info)(struct e1000_hw *); 765 s32 (*get_phy_info)(struct e1000_hw *);
766 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); 766 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
767 s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
767 void (*release_phy)(struct e1000_hw *); 768 void (*release_phy)(struct e1000_hw *);
768 s32 (*reset_phy)(struct e1000_hw *); 769 s32 (*reset_phy)(struct e1000_hw *);
769 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 770 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
770 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 771 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
771 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); 772 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
773 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
772 s32 (*cfg_on_link_up)(struct e1000_hw *); 774 s32 (*cfg_on_link_up)(struct e1000_hw *);
773}; 775};
774 776
@@ -901,6 +903,7 @@ struct e1000_shadow_ram {
901struct e1000_dev_spec_ich8lan { 903struct e1000_dev_spec_ich8lan {
902 bool kmrn_lock_loss_workaround_enabled; 904 bool kmrn_lock_loss_workaround_enabled;
903 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; 905 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
906 bool nvm_k1_enabled;
904}; 907};
905 908
906struct e1000_hw { 909struct e1000_hw {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 99df2abf82a9..51ddb04ab195 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -122,6 +122,27 @@
122 122
123#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ 123#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
124 124
125#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
126
127/* SMBus Address Phy Register */
128#define HV_SMB_ADDR PHY_REG(768, 26)
129#define HV_SMB_ADDR_PEC_EN 0x0200
130#define HV_SMB_ADDR_VALID 0x0080
131
132/* Strapping Option Register - RO */
133#define E1000_STRAP 0x0000C
134#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
135#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
136
137/* OEM Bits Phy Register */
138#define HV_OEM_BITS PHY_REG(768, 25)
139#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
140#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
141#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
142
143#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
144#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
145
125/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 146/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
126/* Offset 04h HSFSTS */ 147/* Offset 04h HSFSTS */
127union ich8_hws_flash_status { 148union ich8_hws_flash_status {
@@ -200,6 +221,10 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
200static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); 221static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
201static s32 e1000_led_on_pchlan(struct e1000_hw *hw); 222static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
202static s32 e1000_led_off_pchlan(struct e1000_hw *hw); 223static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
224static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
225static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
226static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
227static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
203 228
204static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 229static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
205{ 230{
@@ -242,7 +267,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
242 267
243 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; 268 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
244 phy->ops.read_phy_reg = e1000_read_phy_reg_hv; 269 phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
270 phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
271 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
272 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
245 phy->ops.write_phy_reg = e1000_write_phy_reg_hv; 273 phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
274 phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
246 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 275 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
247 276
248 phy->id = e1000_phy_unknown; 277 phy->id = e1000_phy_unknown;
@@ -303,6 +332,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
303 case IGP03E1000_E_PHY_ID: 332 case IGP03E1000_E_PHY_ID:
304 phy->type = e1000_phy_igp_3; 333 phy->type = e1000_phy_igp_3;
305 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 334 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
335 phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
336 phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
306 break; 337 break;
307 case IFE_E_PHY_ID: 338 case IFE_E_PHY_ID:
308 case IFE_PLUS_E_PHY_ID: 339 case IFE_PLUS_E_PHY_ID:
@@ -469,14 +500,6 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
469 goto out; 500 goto out;
470 } 501 }
471 502
472 if (hw->mac.type == e1000_pchlan) {
473 ret_val = e1000e_write_kmrn_reg(hw,
474 E1000_KMRNCTRLSTA_K1_CONFIG,
475 E1000_KMRNCTRLSTA_K1_ENABLE);
476 if (ret_val)
477 goto out;
478 }
479
480 /* 503 /*
481 * First we want to see if the MII Status Register reports 504 * First we want to see if the MII Status Register reports
482 * link. If so, then we want to get the current speed/duplex 505 * link. If so, then we want to get the current speed/duplex
@@ -486,6 +509,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
486 if (ret_val) 509 if (ret_val)
487 goto out; 510 goto out;
488 511
512 if (hw->mac.type == e1000_pchlan) {
513 ret_val = e1000_k1_gig_workaround_hv(hw, link);
514 if (ret_val)
515 goto out;
516 }
517
489 if (!link) 518 if (!link)
490 goto out; /* No link detected */ 519 goto out; /* No link detected */
491 520
@@ -568,12 +597,39 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
568static DEFINE_MUTEX(nvm_mutex); 597static DEFINE_MUTEX(nvm_mutex);
569 598
570/** 599/**
600 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
601 * @hw: pointer to the HW structure
602 *
603 * Acquires the mutex for performing NVM operations.
604 **/
605static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
606{
607 mutex_lock(&nvm_mutex);
608
609 return 0;
610}
611
612/**
613 * e1000_release_nvm_ich8lan - Release NVM mutex
614 * @hw: pointer to the HW structure
615 *
616 * Releases the mutex used while performing NVM operations.
617 **/
618static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
619{
620 mutex_unlock(&nvm_mutex);
621
622 return;
623}
624
625static DEFINE_MUTEX(swflag_mutex);
626
627/**
571 * e1000_acquire_swflag_ich8lan - Acquire software control flag 628 * e1000_acquire_swflag_ich8lan - Acquire software control flag
572 * @hw: pointer to the HW structure 629 * @hw: pointer to the HW structure
573 * 630 *
574 * Acquires the software control flag for performing NVM and PHY 631 * Acquires the software control flag for performing PHY and select
575 * operations. This is a function pointer entry point only called by 632 * MAC CSR accesses.
576 * read/write routines for the PHY and NVM parts.
577 **/ 633 **/
578static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 634static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
579{ 635{
@@ -582,7 +638,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
582 638
583 might_sleep(); 639 might_sleep();
584 640
585 mutex_lock(&nvm_mutex); 641 mutex_lock(&swflag_mutex);
586 642
587 while (timeout) { 643 while (timeout) {
588 extcnf_ctrl = er32(EXTCNF_CTRL); 644 extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -599,7 +655,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
599 goto out; 655 goto out;
600 } 656 }
601 657
602 timeout = PHY_CFG_TIMEOUT * 2; 658 timeout = SW_FLAG_TIMEOUT;
603 659
604 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 660 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
605 ew32(EXTCNF_CTRL, extcnf_ctrl); 661 ew32(EXTCNF_CTRL, extcnf_ctrl);
@@ -623,7 +679,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
623 679
624out: 680out:
625 if (ret_val) 681 if (ret_val)
626 mutex_unlock(&nvm_mutex); 682 mutex_unlock(&swflag_mutex);
627 683
628 return ret_val; 684 return ret_val;
629} 685}
@@ -632,9 +688,8 @@ out:
632 * e1000_release_swflag_ich8lan - Release software control flag 688 * e1000_release_swflag_ich8lan - Release software control flag
633 * @hw: pointer to the HW structure 689 * @hw: pointer to the HW structure
634 * 690 *
635 * Releases the software control flag for performing NVM and PHY operations. 691 * Releases the software control flag for performing PHY and select
636 * This is a function pointer entry point only called by read/write 692 * MAC CSR accesses.
637 * routines for the PHY and NVM parts.
638 **/ 693 **/
639static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 694static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
640{ 695{
@@ -644,7 +699,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
644 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 699 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
645 ew32(EXTCNF_CTRL, extcnf_ctrl); 700 ew32(EXTCNF_CTRL, extcnf_ctrl);
646 701
647 mutex_unlock(&nvm_mutex); 702 mutex_unlock(&swflag_mutex);
703
704 return;
648} 705}
649 706
650/** 707/**
@@ -752,6 +809,326 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
752} 809}
753 810
754/** 811/**
812 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
813 * @hw: pointer to the HW structure
814 *
815 * SW should configure the LCD from the NVM extended configuration region
816 * as a workaround for certain parts.
817 **/
818static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
819{
820 struct e1000_phy_info *phy = &hw->phy;
821 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
822 s32 ret_val;
823 u16 word_addr, reg_data, reg_addr, phy_page = 0;
824
825 ret_val = hw->phy.ops.acquire_phy(hw);
826 if (ret_val)
827 return ret_val;
828
829 /*
830 * Initialize the PHY from the NVM on ICH platforms. This
831 * is needed due to an issue where the NVM configuration is
832 * not properly autoloaded after power transitions.
833 * Therefore, after each PHY reset, we will load the
834 * configuration data out of the NVM manually.
835 */
836 if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
837 (hw->mac.type == e1000_pchlan)) {
838 struct e1000_adapter *adapter = hw->adapter;
839
840 /* Check if SW needs to configure the PHY */
841 if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
842 (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
843 (hw->mac.type == e1000_pchlan))
844 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
845 else
846 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
847
848 data = er32(FEXTNVM);
849 if (!(data & sw_cfg_mask))
850 goto out;
851
852 /* Wait for basic configuration completes before proceeding */
853 e1000_lan_init_done_ich8lan(hw);
854
855 /*
856 * Make sure HW does not configure LCD from PHY
857 * extended configuration before SW configuration
858 */
859 data = er32(EXTCNF_CTRL);
860 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
861 goto out;
862
863 cnf_size = er32(EXTCNF_SIZE);
864 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
865 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
866 if (!cnf_size)
867 goto out;
868
869 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
870 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
871
872 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
873 (hw->mac.type == e1000_pchlan)) {
874 /*
875 * HW configures the SMBus address and LEDs when the
876 * OEM and LCD Write Enable bits are set in the NVM.
877 * When both NVM bits are cleared, SW will configure
878 * them instead.
879 */
880 data = er32(STRAP);
881 data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
882 reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
883 reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
884 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
885 reg_data);
886 if (ret_val)
887 goto out;
888
889 data = er32(LEDCTL);
890 ret_val = e1000_write_phy_reg_hv_locked(hw,
891 HV_LED_CONFIG,
892 (u16)data);
893 if (ret_val)
894 goto out;
895 }
896 /* Configure LCD from extended configuration region. */
897
898 /* cnf_base_addr is in DWORD */
899 word_addr = (u16)(cnf_base_addr << 1);
900
901 for (i = 0; i < cnf_size; i++) {
902 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
903 &reg_data);
904 if (ret_val)
905 goto out;
906
907 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
908 1, &reg_addr);
909 if (ret_val)
910 goto out;
911
912 /* Save off the PHY page for future writes. */
913 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
914 phy_page = reg_data;
915 continue;
916 }
917
918 reg_addr &= PHY_REG_MASK;
919 reg_addr |= phy_page;
920
921 ret_val = phy->ops.write_phy_reg_locked(hw,
922 (u32)reg_addr,
923 reg_data);
924 if (ret_val)
925 goto out;
926 }
927 }
928
929out:
930 hw->phy.ops.release_phy(hw);
931 return ret_val;
932}
933
934/**
935 * e1000_k1_gig_workaround_hv - K1 Si workaround
936 * @hw: pointer to the HW structure
937 * @link: link up bool flag
938 *
939 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
940 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
941 * If link is down, the function will restore the default K1 setting located
942 * in the NVM.
943 **/
944static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
945{
946 s32 ret_val = 0;
947 u16 status_reg = 0;
948 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
949
950 if (hw->mac.type != e1000_pchlan)
951 goto out;
952
953 /* Wrap the whole flow with the sw flag */
954 ret_val = hw->phy.ops.acquire_phy(hw);
955 if (ret_val)
956 goto out;
957
958 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
959 if (link) {
960 if (hw->phy.type == e1000_phy_82578) {
961 ret_val = hw->phy.ops.read_phy_reg_locked(hw,
962 BM_CS_STATUS,
963 &status_reg);
964 if (ret_val)
965 goto release;
966
967 status_reg &= BM_CS_STATUS_LINK_UP |
968 BM_CS_STATUS_RESOLVED |
969 BM_CS_STATUS_SPEED_MASK;
970
971 if (status_reg == (BM_CS_STATUS_LINK_UP |
972 BM_CS_STATUS_RESOLVED |
973 BM_CS_STATUS_SPEED_1000))
974 k1_enable = false;
975 }
976
977 if (hw->phy.type == e1000_phy_82577) {
978 ret_val = hw->phy.ops.read_phy_reg_locked(hw,
979 HV_M_STATUS,
980 &status_reg);
981 if (ret_val)
982 goto release;
983
984 status_reg &= HV_M_STATUS_LINK_UP |
985 HV_M_STATUS_AUTONEG_COMPLETE |
986 HV_M_STATUS_SPEED_MASK;
987
988 if (status_reg == (HV_M_STATUS_LINK_UP |
989 HV_M_STATUS_AUTONEG_COMPLETE |
990 HV_M_STATUS_SPEED_1000))
991 k1_enable = false;
992 }
993
994 /* Link stall fix for link up */
995 ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19),
996 0x0100);
997 if (ret_val)
998 goto release;
999
1000 } else {
1001 /* Link stall fix for link down */
1002 ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19),
1003 0x4100);
1004 if (ret_val)
1005 goto release;
1006 }
1007
1008 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1009
1010release:
1011 hw->phy.ops.release_phy(hw);
1012out:
1013 return ret_val;
1014}
1015
1016/**
1017 * e1000_configure_k1_ich8lan - Configure K1 power state
1018 * @hw: pointer to the HW structure
1019 * @enable: K1 state to configure
1020 *
1021 * Configure the K1 power state based on the provided parameter.
1022 * Assumes semaphore already acquired.
1023 *
1024 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1025 **/
1026static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1027{
1028 s32 ret_val = 0;
1029 u32 ctrl_reg = 0;
1030 u32 ctrl_ext = 0;
1031 u32 reg = 0;
1032 u16 kmrn_reg = 0;
1033
1034 ret_val = e1000e_read_kmrn_reg_locked(hw,
1035 E1000_KMRNCTRLSTA_K1_CONFIG,
1036 &kmrn_reg);
1037 if (ret_val)
1038 goto out;
1039
1040 if (k1_enable)
1041 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1042 else
1043 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1044
1045 ret_val = e1000e_write_kmrn_reg_locked(hw,
1046 E1000_KMRNCTRLSTA_K1_CONFIG,
1047 kmrn_reg);
1048 if (ret_val)
1049 goto out;
1050
1051 udelay(20);
1052 ctrl_ext = er32(CTRL_EXT);
1053 ctrl_reg = er32(CTRL);
1054
1055 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1056 reg |= E1000_CTRL_FRCSPD;
1057 ew32(CTRL, reg);
1058
1059 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1060 udelay(20);
1061 ew32(CTRL, ctrl_reg);
1062 ew32(CTRL_EXT, ctrl_ext);
1063 udelay(20);
1064
1065out:
1066 return ret_val;
1067}
1068
1069/**
1070 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1071 * @hw: pointer to the HW structure
1072 * @d0_state: boolean if entering d0 or d3 device state
1073 *
1074 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1075 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1076 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1077 **/
1078static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1079{
1080 s32 ret_val = 0;
1081 u32 mac_reg;
1082 u16 oem_reg;
1083
1084 if (hw->mac.type != e1000_pchlan)
1085 return ret_val;
1086
1087 ret_val = hw->phy.ops.acquire_phy(hw);
1088 if (ret_val)
1089 return ret_val;
1090
1091 mac_reg = er32(EXTCNF_CTRL);
1092 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1093 goto out;
1094
1095 mac_reg = er32(FEXTNVM);
1096 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1097 goto out;
1098
1099 mac_reg = er32(PHY_CTRL);
1100
1101 ret_val = hw->phy.ops.read_phy_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1102 if (ret_val)
1103 goto out;
1104
1105 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1106
1107 if (d0_state) {
1108 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1109 oem_reg |= HV_OEM_BITS_GBE_DIS;
1110
1111 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1112 oem_reg |= HV_OEM_BITS_LPLU;
1113 } else {
1114 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1115 oem_reg |= HV_OEM_BITS_GBE_DIS;
1116
1117 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1118 oem_reg |= HV_OEM_BITS_LPLU;
1119 }
1120 /* Restart auto-neg to activate the bits */
1121 oem_reg |= HV_OEM_BITS_RESTART_AN;
1122 ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg);
1123
1124out:
1125 hw->phy.ops.release_phy(hw);
1126
1127 return ret_val;
1128}
1129
1130
1131/**
755 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 1132 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
756 * done after every PHY reset. 1133 * done after every PHY reset.
757 **/ 1134 **/
@@ -791,10 +1168,20 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
791 ret_val = hw->phy.ops.acquire_phy(hw); 1168 ret_val = hw->phy.ops.acquire_phy(hw);
792 if (ret_val) 1169 if (ret_val)
793 return ret_val; 1170 return ret_val;
1171
794 hw->phy.addr = 1; 1172 hw->phy.addr = 1;
795 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 1173 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1174 if (ret_val)
1175 goto out;
796 hw->phy.ops.release_phy(hw); 1176 hw->phy.ops.release_phy(hw);
797 1177
1178 /*
1179 * Configure the K1 Si workaround during phy reset assuming there is
1180 * link so that it disables K1 if link is in 1Gbps.
1181 */
1182 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1183
1184out:
798 return ret_val; 1185 return ret_val;
799} 1186}
800 1187
@@ -840,11 +1227,8 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
840 **/ 1227 **/
841static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 1228static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
842{ 1229{
843 struct e1000_phy_info *phy = &hw->phy; 1230 s32 ret_val = 0;
844 u32 i; 1231 u16 reg;
845 u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
846 s32 ret_val;
847 u16 word_addr, reg_data, reg_addr, phy_page = 0;
848 1232
849 ret_val = e1000e_phy_hw_reset_generic(hw); 1233 ret_val = e1000e_phy_hw_reset_generic(hw);
850 if (ret_val) 1234 if (ret_val)
@@ -859,81 +1243,20 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
859 return ret_val; 1243 return ret_val;
860 } 1244 }
861 1245
862 /* 1246 /* Dummy read to clear the phy wakeup bit after lcd reset */
863 * Initialize the PHY from the NVM on ICH platforms. This 1247 if (hw->mac.type == e1000_pchlan)
864 * is needed due to an issue where the NVM configuration is 1248 e1e_rphy(hw, BM_WUC, &reg);
865 * not properly autoloaded after power transitions.
866 * Therefore, after each PHY reset, we will load the
867 * configuration data out of the NVM manually.
868 */
869 if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) {
870 struct e1000_adapter *adapter = hw->adapter;
871
872 /* Check if SW needs configure the PHY */
873 if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
874 (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M))
875 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
876 else
877 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
878
879 data = er32(FEXTNVM);
880 if (!(data & sw_cfg_mask))
881 return 0;
882
883 /* Wait for basic configuration completes before proceeding */
884 e1000_lan_init_done_ich8lan(hw);
885
886 /*
887 * Make sure HW does not configure LCD from PHY
888 * extended configuration before SW configuration
889 */
890 data = er32(EXTCNF_CTRL);
891 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
892 return 0;
893
894 cnf_size = er32(EXTCNF_SIZE);
895 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
896 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
897 if (!cnf_size)
898 return 0;
899
900 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
901 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
902
903 /* Configure LCD from extended configuration region. */
904
905 /* cnf_base_addr is in DWORD */
906 word_addr = (u16)(cnf_base_addr << 1);
907
908 for (i = 0; i < cnf_size; i++) {
909 ret_val = e1000_read_nvm(hw,
910 (word_addr + i * 2),
911 1,
912 &reg_data);
913 if (ret_val)
914 return ret_val;
915
916 ret_val = e1000_read_nvm(hw,
917 (word_addr + i * 2 + 1),
918 1,
919 &reg_addr);
920 if (ret_val)
921 return ret_val;
922
923 /* Save off the PHY page for future writes. */
924 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
925 phy_page = reg_data;
926 continue;
927 }
928 1249
929 reg_addr |= phy_page; 1250 /* Configure the LCD with the extended configuration region in NVM */
1251 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1252 if (ret_val)
1253 goto out;
930 1254
931 ret_val = e1e_wphy(hw, (u32)reg_addr, reg_data); 1255 /* Configure the LCD with the OEM bits in NVM */
932 if (ret_val) 1256 if (hw->mac.type == e1000_pchlan)
933 return ret_val; 1257 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
934 }
935 }
936 1258
1259out:
937 return 0; 1260 return 0;
938} 1261}
939 1262
@@ -1054,6 +1377,38 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
1054} 1377}
1055 1378
1056/** 1379/**
1380 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1381 * @hw: pointer to the HW structure
1382 * @active: true to enable LPLU, false to disable
1383 *
1384 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1385 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1386 * the phy speed. This function will manually set the LPLU bit and restart
1387 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1388 * since it configures the same bit.
1389 **/
1390static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1391{
1392 s32 ret_val = 0;
1393 u16 oem_reg;
1394
1395 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
1396 if (ret_val)
1397 goto out;
1398
1399 if (active)
1400 oem_reg |= HV_OEM_BITS_LPLU;
1401 else
1402 oem_reg &= ~HV_OEM_BITS_LPLU;
1403
1404 oem_reg |= HV_OEM_BITS_RESTART_AN;
1405 ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
1406
1407out:
1408 return ret_val;
1409}
1410
1411/**
1057 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 1412 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1058 * @hw: pointer to the HW structure 1413 * @hw: pointer to the HW structure
1059 * @active: TRUE to enable LPLU, FALSE to disable 1414 * @active: TRUE to enable LPLU, FALSE to disable
@@ -1314,12 +1669,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1314 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 1669 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1315 (words == 0)) { 1670 (words == 0)) {
1316 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1671 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1317 return -E1000_ERR_NVM; 1672 ret_val = -E1000_ERR_NVM;
1673 goto out;
1318 } 1674 }
1319 1675
1320 ret_val = e1000_acquire_swflag_ich8lan(hw); 1676 nvm->ops.acquire_nvm(hw);
1321 if (ret_val)
1322 goto out;
1323 1677
1324 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1678 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1325 if (ret_val) { 1679 if (ret_val) {
@@ -1345,7 +1699,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1345 } 1699 }
1346 } 1700 }
1347 1701
1348 e1000_release_swflag_ich8lan(hw); 1702 nvm->ops.release_nvm(hw);
1349 1703
1350out: 1704out:
1351 if (ret_val) 1705 if (ret_val)
@@ -1603,11 +1957,15 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1603 return -E1000_ERR_NVM; 1957 return -E1000_ERR_NVM;
1604 } 1958 }
1605 1959
1960 nvm->ops.acquire_nvm(hw);
1961
1606 for (i = 0; i < words; i++) { 1962 for (i = 0; i < words; i++) {
1607 dev_spec->shadow_ram[offset+i].modified = 1; 1963 dev_spec->shadow_ram[offset+i].modified = 1;
1608 dev_spec->shadow_ram[offset+i].value = data[i]; 1964 dev_spec->shadow_ram[offset+i].value = data[i];
1609 } 1965 }
1610 1966
1967 nvm->ops.release_nvm(hw);
1968
1611 return 0; 1969 return 0;
1612} 1970}
1613 1971
@@ -1637,9 +1995,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1637 if (nvm->type != e1000_nvm_flash_sw) 1995 if (nvm->type != e1000_nvm_flash_sw)
1638 goto out; 1996 goto out;
1639 1997
1640 ret_val = e1000_acquire_swflag_ich8lan(hw); 1998 nvm->ops.acquire_nvm(hw);
1641 if (ret_val)
1642 goto out;
1643 1999
1644 /* 2000 /*
1645 * We're writing to the opposite bank so if we're on bank 1, 2001 * We're writing to the opposite bank so if we're on bank 1,
@@ -1657,7 +2013,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1657 old_bank_offset = 0; 2013 old_bank_offset = 0;
1658 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 2014 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
1659 if (ret_val) { 2015 if (ret_val) {
1660 e1000_release_swflag_ich8lan(hw); 2016 nvm->ops.release_nvm(hw);
1661 goto out; 2017 goto out;
1662 } 2018 }
1663 } else { 2019 } else {
@@ -1665,7 +2021,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1665 new_bank_offset = 0; 2021 new_bank_offset = 0;
1666 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 2022 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
1667 if (ret_val) { 2023 if (ret_val) {
1668 e1000_release_swflag_ich8lan(hw); 2024 nvm->ops.release_nvm(hw);
1669 goto out; 2025 goto out;
1670 } 2026 }
1671 } 2027 }
@@ -1723,7 +2079,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1723 if (ret_val) { 2079 if (ret_val) {
1724 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ 2080 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
1725 hw_dbg(hw, "Flash commit failed.\n"); 2081 hw_dbg(hw, "Flash commit failed.\n");
1726 e1000_release_swflag_ich8lan(hw); 2082 nvm->ops.release_nvm(hw);
1727 goto out; 2083 goto out;
1728 } 2084 }
1729 2085
@@ -1736,7 +2092,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1736 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 2092 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
1737 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 2093 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
1738 if (ret_val) { 2094 if (ret_val) {
1739 e1000_release_swflag_ich8lan(hw); 2095 nvm->ops.release_nvm(hw);
1740 goto out; 2096 goto out;
1741 } 2097 }
1742 data &= 0xBFFF; 2098 data &= 0xBFFF;
@@ -1744,7 +2100,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1744 act_offset * 2 + 1, 2100 act_offset * 2 + 1,
1745 (u8)(data >> 8)); 2101 (u8)(data >> 8));
1746 if (ret_val) { 2102 if (ret_val) {
1747 e1000_release_swflag_ich8lan(hw); 2103 nvm->ops.release_nvm(hw);
1748 goto out; 2104 goto out;
1749 } 2105 }
1750 2106
@@ -1757,7 +2113,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1757 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 2113 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
1758 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 2114 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
1759 if (ret_val) { 2115 if (ret_val) {
1760 e1000_release_swflag_ich8lan(hw); 2116 nvm->ops.release_nvm(hw);
1761 goto out; 2117 goto out;
1762 } 2118 }
1763 2119
@@ -1767,7 +2123,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1767 dev_spec->shadow_ram[i].value = 0xFFFF; 2123 dev_spec->shadow_ram[i].value = 0xFFFF;
1768 } 2124 }
1769 2125
1770 e1000_release_swflag_ich8lan(hw); 2126 nvm->ops.release_nvm(hw);
1771 2127
1772 /* 2128 /*
1773 * Reload the EEPROM, or else modifications will not appear 2129 * Reload the EEPROM, or else modifications will not appear
@@ -1831,14 +2187,12 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
1831 **/ 2187 **/
1832void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) 2188void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
1833{ 2189{
2190 struct e1000_nvm_info *nvm = &hw->nvm;
1834 union ich8_flash_protected_range pr0; 2191 union ich8_flash_protected_range pr0;
1835 union ich8_hws_flash_status hsfsts; 2192 union ich8_hws_flash_status hsfsts;
1836 u32 gfpreg; 2193 u32 gfpreg;
1837 s32 ret_val;
1838 2194
1839 ret_val = e1000_acquire_swflag_ich8lan(hw); 2195 nvm->ops.acquire_nvm(hw);
1840 if (ret_val)
1841 return;
1842 2196
1843 gfpreg = er32flash(ICH_FLASH_GFPREG); 2197 gfpreg = er32flash(ICH_FLASH_GFPREG);
1844 2198
@@ -1859,7 +2213,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
1859 hsfsts.hsf_status.flockdn = true; 2213 hsfsts.hsf_status.flockdn = true;
1860 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); 2214 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
1861 2215
1862 e1000_release_swflag_ich8lan(hw); 2216 nvm->ops.release_nvm(hw);
1863} 2217}
1864 2218
1865/** 2219/**
@@ -2229,6 +2583,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
2229 **/ 2583 **/
2230static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 2584static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2231{ 2585{
2586 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2587 u16 reg;
2232 u32 ctrl, icr, kab; 2588 u32 ctrl, icr, kab;
2233 s32 ret_val; 2589 s32 ret_val;
2234 2590
@@ -2263,6 +2619,18 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2263 ew32(PBS, E1000_PBS_16K); 2619 ew32(PBS, E1000_PBS_16K);
2264 } 2620 }
2265 2621
2622 if (hw->mac.type == e1000_pchlan) {
2623 /* Save the NVM K1 bit setting*/
2624 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
2625 if (ret_val)
2626 return ret_val;
2627
2628 if (reg & E1000_NVM_K1_ENABLE)
2629 dev_spec->nvm_k1_enabled = true;
2630 else
2631 dev_spec->nvm_k1_enabled = false;
2632 }
2633
2266 ctrl = er32(CTRL); 2634 ctrl = er32(CTRL);
2267 2635
2268 if (!e1000_check_reset_block(hw)) { 2636 if (!e1000_check_reset_block(hw)) {
@@ -2304,7 +2672,19 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2304 hw_dbg(hw, "Auto Read Done did not complete\n"); 2672 hw_dbg(hw, "Auto Read Done did not complete\n");
2305 } 2673 }
2306 } 2674 }
2675 /* Dummy read to clear the phy wakeup bit after lcd reset */
2676 if (hw->mac.type == e1000_pchlan)
2677 e1e_rphy(hw, BM_WUC, &reg);
2307 2678
2679 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2680 if (ret_val)
2681 goto out;
2682
2683 if (hw->mac.type == e1000_pchlan) {
2684 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2685 if (ret_val)
2686 goto out;
2687 }
2308 /* 2688 /*
2309 * For PCH, this write will make sure that any noise 2689 * For PCH, this write will make sure that any noise
2310 * will be detected as a CRC error and be dropped rather than show up 2690 * will be detected as a CRC error and be dropped rather than show up
@@ -2323,6 +2703,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2323 if (hw->mac.type == e1000_pchlan) 2703 if (hw->mac.type == e1000_pchlan)
2324 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 2704 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2325 2705
2706out:
2326 return ret_val; 2707 return ret_val;
2327} 2708}
2328 2709
@@ -2627,14 +3008,6 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
2627 if (ret_val) 3008 if (ret_val)
2628 return ret_val; 3009 return ret_val;
2629 3010
2630 if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) {
2631 ret_val = e1000e_write_kmrn_reg(hw,
2632 E1000_KMRNCTRLSTA_K1_CONFIG,
2633 E1000_KMRNCTRLSTA_K1_DISABLE);
2634 if (ret_val)
2635 return ret_val;
2636 }
2637
2638 if ((hw->mac.type == e1000_ich8lan) && 3011 if ((hw->mac.type == e1000_ich8lan) &&
2639 (hw->phy.type == e1000_phy_igp_3) && 3012 (hw->phy.type == e1000_phy_igp_3) &&
2640 (*speed == SPEED_1000)) { 3013 (*speed == SPEED_1000)) {
@@ -2843,9 +3216,8 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
2843 E1000_PHY_CTRL_GBE_DISABLE; 3216 E1000_PHY_CTRL_GBE_DISABLE;
2844 ew32(PHY_CTRL, phy_ctrl); 3217 ew32(PHY_CTRL, phy_ctrl);
2845 3218
2846 /* Workaround SWFLAG unexpectedly set during S0->Sx */
2847 if (hw->mac.type == e1000_pchlan) 3219 if (hw->mac.type == e1000_pchlan)
2848 udelay(500); 3220 e1000_phy_hw_reset_ich8lan(hw);
2849 default: 3221 default:
2850 break; 3222 break;
2851 } 3223 }
@@ -3113,9 +3485,9 @@ static struct e1000_phy_operations ich8_phy_ops = {
3113}; 3485};
3114 3486
3115static struct e1000_nvm_operations ich8_nvm_ops = { 3487static struct e1000_nvm_operations ich8_nvm_ops = {
3116 .acquire_nvm = e1000_acquire_swflag_ich8lan, 3488 .acquire_nvm = e1000_acquire_nvm_ich8lan,
3117 .read_nvm = e1000_read_nvm_ich8lan, 3489 .read_nvm = e1000_read_nvm_ich8lan,
3118 .release_nvm = e1000_release_swflag_ich8lan, 3490 .release_nvm = e1000_release_nvm_ich8lan,
3119 .update_nvm = e1000_update_nvm_checksum_ich8lan, 3491 .update_nvm = e1000_update_nvm_checksum_ich8lan,
3120 .valid_led_default = e1000_valid_led_default_ich8lan, 3492 .valid_led_default = e1000_valid_led_default_ich8lan,
3121 .validate_nvm = e1000_validate_nvm_checksum_ich8lan, 3493 .validate_nvm = e1000_validate_nvm_checksum_ich8lan,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 994401fd0664..03175b3a2c9e 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -95,13 +95,6 @@ static const u16 e1000_igp_2_cable_length_table[] =
95/* BM PHY Copper Specific Control 1 */ 95/* BM PHY Copper Specific Control 1 */
96#define BM_CS_CTRL1 16 96#define BM_CS_CTRL1 16
97 97
98/* BM PHY Copper Specific Status */
99#define BM_CS_STATUS 17
100#define BM_CS_STATUS_LINK_UP 0x0400
101#define BM_CS_STATUS_RESOLVED 0x0800
102#define BM_CS_STATUS_SPEED_MASK 0xC000
103#define BM_CS_STATUS_SPEED_1000 0x8000
104
105#define HV_MUX_DATA_CTRL PHY_REG(776, 16) 98#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
106#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 99#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
107#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 100#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
@@ -164,16 +157,25 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
164 * MDIC mode. No harm in trying again in this case since 157 * MDIC mode. No harm in trying again in this case since
165 * the PHY ID is unknown at this point anyway 158 * the PHY ID is unknown at this point anyway
166 */ 159 */
160 ret_val = phy->ops.acquire_phy(hw);
161 if (ret_val)
162 goto out;
167 ret_val = e1000_set_mdio_slow_mode_hv(hw, true); 163 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
168 if (ret_val) 164 if (ret_val)
169 goto out; 165 goto out;
166 phy->ops.release_phy(hw);
170 167
171 retry_count++; 168 retry_count++;
172 } 169 }
173out: 170out:
174 /* Revert to MDIO fast mode, if applicable */ 171 /* Revert to MDIO fast mode, if applicable */
175 if (retry_count) 172 if (retry_count) {
173 ret_val = phy->ops.acquire_phy(hw);
174 if (ret_val)
175 return ret_val;
176 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 176 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
177 phy->ops.release_phy(hw);
178 }
177 179
178 return ret_val; 180 return ret_val;
179} 181}
@@ -354,94 +356,173 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
354} 356}
355 357
356/** 358/**
357 * e1000e_read_phy_reg_igp - Read igp PHY register 359 * __e1000e_read_phy_reg_igp - Read igp PHY register
358 * @hw: pointer to the HW structure 360 * @hw: pointer to the HW structure
359 * @offset: register offset to be read 361 * @offset: register offset to be read
360 * @data: pointer to the read data 362 * @data: pointer to the read data
363 * @locked: semaphore has already been acquired or not
361 * 364 *
362 * Acquires semaphore, if necessary, then reads the PHY register at offset 365 * Acquires semaphore, if necessary, then reads the PHY register at offset
363 * and storing the retrieved information in data. Release any acquired 366 * and stores the retrieved information in data. Release any acquired
364 * semaphores before exiting. 367 * semaphores before exiting.
365 **/ 368 **/
366s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) 369static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
370 bool locked)
367{ 371{
368 s32 ret_val; 372 s32 ret_val = 0;
369 373
370 ret_val = hw->phy.ops.acquire_phy(hw); 374 if (!locked) {
371 if (ret_val) 375 if (!(hw->phy.ops.acquire_phy))
372 return ret_val; 376 goto out;
377
378 ret_val = hw->phy.ops.acquire_phy(hw);
379 if (ret_val)
380 goto out;
381 }
373 382
374 if (offset > MAX_PHY_MULTI_PAGE_REG) { 383 if (offset > MAX_PHY_MULTI_PAGE_REG) {
375 ret_val = e1000e_write_phy_reg_mdic(hw, 384 ret_val = e1000e_write_phy_reg_mdic(hw,
376 IGP01E1000_PHY_PAGE_SELECT, 385 IGP01E1000_PHY_PAGE_SELECT,
377 (u16)offset); 386 (u16)offset);
378 if (ret_val) { 387 if (ret_val)
379 hw->phy.ops.release_phy(hw); 388 goto release;
380 return ret_val;
381 }
382 } 389 }
383 390
384 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 391 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
385 data); 392 data);
386
387 hw->phy.ops.release_phy(hw);
388 393
394release:
395 if (!locked)
396 hw->phy.ops.release_phy(hw);
397out:
389 return ret_val; 398 return ret_val;
390} 399}
391 400
392/** 401/**
402 * e1000e_read_phy_reg_igp - Read igp PHY register
403 * @hw: pointer to the HW structure
404 * @offset: register offset to be read
405 * @data: pointer to the read data
406 *
407 * Acquires semaphore then reads the PHY register at offset and stores the
408 * retrieved information in data.
409 * Release the acquired semaphore before exiting.
410 **/
411s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
412{
413 return __e1000e_read_phy_reg_igp(hw, offset, data, false);
414}
415
416/**
417 * e1000e_read_phy_reg_igp_locked - Read igp PHY register
418 * @hw: pointer to the HW structure
419 * @offset: register offset to be read
420 * @data: pointer to the read data
421 *
422 * Reads the PHY register at offset and stores the retrieved information
423 * in data. Assumes semaphore already acquired.
424 **/
425s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
426{
427 return __e1000e_read_phy_reg_igp(hw, offset, data, true);
428}
429
430/**
393 * e1000e_write_phy_reg_igp - Write igp PHY register 431 * e1000e_write_phy_reg_igp - Write igp PHY register
394 * @hw: pointer to the HW structure 432 * @hw: pointer to the HW structure
395 * @offset: register offset to write to 433 * @offset: register offset to write to
396 * @data: data to write at register offset 434 * @data: data to write at register offset
435 * @locked: semaphore has already been acquired or not
397 * 436 *
398 * Acquires semaphore, if necessary, then writes the data to PHY register 437 * Acquires semaphore, if necessary, then writes the data to PHY register
399 * at the offset. Release any acquired semaphores before exiting. 438 * at the offset. Release any acquired semaphores before exiting.
400 **/ 439 **/
401s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) 440static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
441 bool locked)
402{ 442{
403 s32 ret_val; 443 s32 ret_val = 0;
404 444
405 ret_val = hw->phy.ops.acquire_phy(hw); 445 if (!locked) {
406 if (ret_val) 446 if (!(hw->phy.ops.acquire_phy))
407 return ret_val; 447 goto out;
448
449 ret_val = hw->phy.ops.acquire_phy(hw);
450 if (ret_val)
451 goto out;
452 }
408 453
409 if (offset > MAX_PHY_MULTI_PAGE_REG) { 454 if (offset > MAX_PHY_MULTI_PAGE_REG) {
410 ret_val = e1000e_write_phy_reg_mdic(hw, 455 ret_val = e1000e_write_phy_reg_mdic(hw,
411 IGP01E1000_PHY_PAGE_SELECT, 456 IGP01E1000_PHY_PAGE_SELECT,
412 (u16)offset); 457 (u16)offset);
413 if (ret_val) { 458 if (ret_val)
414 hw->phy.ops.release_phy(hw); 459 goto release;
415 return ret_val;
416 }
417 } 460 }
418 461
419 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 462 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
420 data); 463 data);
421 464
422 hw->phy.ops.release_phy(hw); 465release:
466 if (!locked)
467 hw->phy.ops.release_phy(hw);
423 468
469out:
424 return ret_val; 470 return ret_val;
425} 471}
426 472
427/** 473/**
428 * e1000e_read_kmrn_reg - Read kumeran register 474 * e1000e_write_phy_reg_igp - Write igp PHY register
475 * @hw: pointer to the HW structure
476 * @offset: register offset to write to
477 * @data: data to write at register offset
478 *
479 * Acquires semaphore then writes the data to PHY register
480 * at the offset. Release any acquired semaphores before exiting.
481 **/
482s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
483{
484 return __e1000e_write_phy_reg_igp(hw, offset, data, false);
485}
486
487/**
488 * e1000e_write_phy_reg_igp_locked - Write igp PHY register
489 * @hw: pointer to the HW structure
490 * @offset: register offset to write to
491 * @data: data to write at register offset
492 *
493 * Writes the data to PHY register at the offset.
494 * Assumes semaphore already acquired.
495 **/
496s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
497{
498 return __e1000e_write_phy_reg_igp(hw, offset, data, true);
499}
500
501/**
502 * __e1000_read_kmrn_reg - Read kumeran register
429 * @hw: pointer to the HW structure 503 * @hw: pointer to the HW structure
430 * @offset: register offset to be read 504 * @offset: register offset to be read
431 * @data: pointer to the read data 505 * @data: pointer to the read data
506 * @locked: semaphore has already been acquired or not
432 * 507 *
433 * Acquires semaphore, if necessary. Then reads the PHY register at offset 508 * Acquires semaphore, if necessary. Then reads the PHY register at offset
434 * using the kumeran interface. The information retrieved is stored in data. 509 * using the kumeran interface. The information retrieved is stored in data.
435 * Release any acquired semaphores before exiting. 510 * Release any acquired semaphores before exiting.
436 **/ 511 **/
437s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) 512static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
513 bool locked)
438{ 514{
439 u32 kmrnctrlsta; 515 u32 kmrnctrlsta;
440 s32 ret_val; 516 s32 ret_val = 0;
441 517
442 ret_val = hw->phy.ops.acquire_phy(hw); 518 if (!locked) {
443 if (ret_val) 519 if (!(hw->phy.ops.acquire_phy))
444 return ret_val; 520 goto out;
521
522 ret_val = hw->phy.ops.acquire_phy(hw);
523 if (ret_val)
524 goto out;
525 }
445 526
446 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 527 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
447 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; 528 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
@@ -452,41 +533,111 @@ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
452 kmrnctrlsta = er32(KMRNCTRLSTA); 533 kmrnctrlsta = er32(KMRNCTRLSTA);
453 *data = (u16)kmrnctrlsta; 534 *data = (u16)kmrnctrlsta;
454 535
455 hw->phy.ops.release_phy(hw); 536 if (!locked)
537 hw->phy.ops.release_phy(hw);
456 538
539out:
457 return ret_val; 540 return ret_val;
458} 541}
459 542
460/** 543/**
461 * e1000e_write_kmrn_reg - Write kumeran register 544 * e1000e_read_kmrn_reg - Read kumeran register
545 * @hw: pointer to the HW structure
546 * @offset: register offset to be read
547 * @data: pointer to the read data
548 *
549 * Acquires semaphore then reads the PHY register at offset using the
550 * kumeran interface. The information retrieved is stored in data.
551 * Release the acquired semaphore before exiting.
552 **/
553s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
554{
555 return __e1000_read_kmrn_reg(hw, offset, data, false);
556}
557
558/**
559 * e1000e_read_kmrn_reg_locked - Read kumeran register
560 * @hw: pointer to the HW structure
561 * @offset: register offset to be read
562 * @data: pointer to the read data
563 *
564 * Reads the PHY register at offset using the kumeran interface. The
565 * information retrieved is stored in data.
566 * Assumes semaphore already acquired.
567 **/
568s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
569{
570 return __e1000_read_kmrn_reg(hw, offset, data, true);
571}
572
573/**
574 * __e1000_write_kmrn_reg - Write kumeran register
462 * @hw: pointer to the HW structure 575 * @hw: pointer to the HW structure
463 * @offset: register offset to write to 576 * @offset: register offset to write to
464 * @data: data to write at register offset 577 * @data: data to write at register offset
578 * @locked: semaphore has already been acquired or not
465 * 579 *
466 * Acquires semaphore, if necessary. Then write the data to PHY register 580 * Acquires semaphore, if necessary. Then write the data to PHY register
467 * at the offset using the kumeran interface. Release any acquired semaphores 581 * at the offset using the kumeran interface. Release any acquired semaphores
468 * before exiting. 582 * before exiting.
469 **/ 583 **/
470s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) 584static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
585 bool locked)
471{ 586{
472 u32 kmrnctrlsta; 587 u32 kmrnctrlsta;
473 s32 ret_val; 588 s32 ret_val = 0;
474 589
475 ret_val = hw->phy.ops.acquire_phy(hw); 590 if (!locked) {
476 if (ret_val) 591 if (!(hw->phy.ops.acquire_phy))
477 return ret_val; 592 goto out;
593
594 ret_val = hw->phy.ops.acquire_phy(hw);
595 if (ret_val)
596 goto out;
597 }
478 598
479 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 599 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
480 E1000_KMRNCTRLSTA_OFFSET) | data; 600 E1000_KMRNCTRLSTA_OFFSET) | data;
481 ew32(KMRNCTRLSTA, kmrnctrlsta); 601 ew32(KMRNCTRLSTA, kmrnctrlsta);
482 602
483 udelay(2); 603 udelay(2);
484 hw->phy.ops.release_phy(hw);
485 604
605 if (!locked)
606 hw->phy.ops.release_phy(hw);
607
608out:
486 return ret_val; 609 return ret_val;
487} 610}
488 611
489/** 612/**
613 * e1000e_write_kmrn_reg - Write kumeran register
614 * @hw: pointer to the HW structure
615 * @offset: register offset to write to
616 * @data: data to write at register offset
617 *
618 * Acquires semaphore then writes the data to the PHY register at the offset
619 * using the kumeran interface. Release the acquired semaphore before exiting.
620 **/
621s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
622{
623 return __e1000_write_kmrn_reg(hw, offset, data, false);
624}
625
626/**
627 * e1000e_write_kmrn_reg_locked - Write kumeran register
628 * @hw: pointer to the HW structure
629 * @offset: register offset to write to
630 * @data: data to write at register offset
631 *
632 * Write the data to PHY register at the offset using the kumeran interface.
633 * Assumes semaphore already acquired.
634 **/
635s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
636{
637 return __e1000_write_kmrn_reg(hw, offset, data, true);
638}
639
640/**
490 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link 641 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
491 * @hw: pointer to the HW structure 642 * @hw: pointer to the HW structure
492 * 643 *
@@ -2105,6 +2256,10 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2105 u32 page = offset >> IGP_PAGE_SHIFT; 2256 u32 page = offset >> IGP_PAGE_SHIFT;
2106 u32 page_shift = 0; 2257 u32 page_shift = 0;
2107 2258
2259 ret_val = hw->phy.ops.acquire_phy(hw);
2260 if (ret_val)
2261 return ret_val;
2262
2108 /* Page 800 works differently than the rest so it has its own func */ 2263 /* Page 800 works differently than the rest so it has its own func */
2109 if (page == BM_WUC_PAGE) { 2264 if (page == BM_WUC_PAGE) {
2110 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, 2265 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
@@ -2112,10 +2267,6 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2112 goto out; 2267 goto out;
2113 } 2268 }
2114 2269
2115 ret_val = hw->phy.ops.acquire_phy(hw);
2116 if (ret_val)
2117 goto out;
2118
2119 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2270 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2120 2271
2121 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2272 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2135,18 +2286,15 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2135 /* Page is shifted left, PHY expects (page x 32) */ 2286 /* Page is shifted left, PHY expects (page x 32) */
2136 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2287 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2137 (page << page_shift)); 2288 (page << page_shift));
2138 if (ret_val) { 2289 if (ret_val)
2139 hw->phy.ops.release_phy(hw);
2140 goto out; 2290 goto out;
2141 }
2142 } 2291 }
2143 2292
2144 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2293 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2145 data); 2294 data);
2146 2295
2147 hw->phy.ops.release_phy(hw);
2148
2149out: 2296out:
2297 hw->phy.ops.release_phy(hw);
2150 return ret_val; 2298 return ret_val;
2151} 2299}
2152 2300
@@ -2167,6 +2315,10 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2167 u32 page = offset >> IGP_PAGE_SHIFT; 2315 u32 page = offset >> IGP_PAGE_SHIFT;
2168 u32 page_shift = 0; 2316 u32 page_shift = 0;
2169 2317
2318 ret_val = hw->phy.ops.acquire_phy(hw);
2319 if (ret_val)
2320 return ret_val;
2321
2170 /* Page 800 works differently than the rest so it has its own func */ 2322 /* Page 800 works differently than the rest so it has its own func */
2171 if (page == BM_WUC_PAGE) { 2323 if (page == BM_WUC_PAGE) {
2172 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, 2324 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -2174,10 +2326,6 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2174 goto out; 2326 goto out;
2175 } 2327 }
2176 2328
2177 ret_val = hw->phy.ops.acquire_phy(hw);
2178 if (ret_val)
2179 goto out;
2180
2181 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2329 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2182 2330
2183 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2331 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2197,17 +2345,14 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2197 /* Page is shifted left, PHY expects (page x 32) */ 2345 /* Page is shifted left, PHY expects (page x 32) */
2198 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2346 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2199 (page << page_shift)); 2347 (page << page_shift));
2200 if (ret_val) { 2348 if (ret_val)
2201 hw->phy.ops.release_phy(hw);
2202 goto out; 2349 goto out;
2203 }
2204 } 2350 }
2205 2351
2206 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2352 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2207 data); 2353 data);
2208 hw->phy.ops.release_phy(hw);
2209
2210out: 2354out:
2355 hw->phy.ops.release_phy(hw);
2211 return ret_val; 2356 return ret_val;
2212} 2357}
2213 2358
@@ -2226,17 +2371,17 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2226 s32 ret_val; 2371 s32 ret_val;
2227 u16 page = (u16)(offset >> IGP_PAGE_SHIFT); 2372 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2228 2373
2374 ret_val = hw->phy.ops.acquire_phy(hw);
2375 if (ret_val)
2376 return ret_val;
2377
2229 /* Page 800 works differently than the rest so it has its own func */ 2378 /* Page 800 works differently than the rest so it has its own func */
2230 if (page == BM_WUC_PAGE) { 2379 if (page == BM_WUC_PAGE) {
2231 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, 2380 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
2232 true); 2381 true);
2233 return ret_val; 2382 goto out;
2234 } 2383 }
2235 2384
2236 ret_val = hw->phy.ops.acquire_phy(hw);
2237 if (ret_val)
2238 return ret_val;
2239
2240 hw->phy.addr = 1; 2385 hw->phy.addr = 1;
2241 2386
2242 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2387 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2245,16 +2390,14 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2245 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, 2390 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2246 page); 2391 page);
2247 2392
2248 if (ret_val) { 2393 if (ret_val)
2249 hw->phy.ops.release_phy(hw); 2394 goto out;
2250 return ret_val;
2251 }
2252 } 2395 }
2253 2396
2254 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2397 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2255 data); 2398 data);
2399out:
2256 hw->phy.ops.release_phy(hw); 2400 hw->phy.ops.release_phy(hw);
2257
2258 return ret_val; 2401 return ret_val;
2259} 2402}
2260 2403
@@ -2272,17 +2415,17 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2272 s32 ret_val; 2415 s32 ret_val;
2273 u16 page = (u16)(offset >> IGP_PAGE_SHIFT); 2416 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2274 2417
2418 ret_val = hw->phy.ops.acquire_phy(hw);
2419 if (ret_val)
2420 return ret_val;
2421
2275 /* Page 800 works differently than the rest so it has its own func */ 2422 /* Page 800 works differently than the rest so it has its own func */
2276 if (page == BM_WUC_PAGE) { 2423 if (page == BM_WUC_PAGE) {
2277 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, 2424 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
2278 false); 2425 false);
2279 return ret_val; 2426 goto out;
2280 } 2427 }
2281 2428
2282 ret_val = hw->phy.ops.acquire_phy(hw);
2283 if (ret_val)
2284 return ret_val;
2285
2286 hw->phy.addr = 1; 2429 hw->phy.addr = 1;
2287 2430
2288 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2431 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2290,17 +2433,15 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2290 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, 2433 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2291 page); 2434 page);
2292 2435
2293 if (ret_val) { 2436 if (ret_val)
2294 hw->phy.ops.release_phy(hw); 2437 goto out;
2295 return ret_val;
2296 }
2297 } 2438 }
2298 2439
2299 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2440 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2300 data); 2441 data);
2301 2442
2443out:
2302 hw->phy.ops.release_phy(hw); 2444 hw->phy.ops.release_phy(hw);
2303
2304 return ret_val; 2445 return ret_val;
2305} 2446}
2306 2447
@@ -2320,6 +2461,8 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2320 * 3) Write the address using the address opcode (0x11) 2461 * 3) Write the address using the address opcode (0x11)
2321 * 4) Read or write the data using the data opcode (0x12) 2462 * 4) Read or write the data using the data opcode (0x12)
2322 * 5) Restore 769_17.2 to its original value 2463 * 5) Restore 769_17.2 to its original value
2464 *
2465 * Assumes semaphore already acquired.
2323 **/ 2466 **/
2324static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, 2467static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2325 u16 *data, bool read) 2468 u16 *data, bool read)
@@ -2327,20 +2470,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2327 s32 ret_val; 2470 s32 ret_val;
2328 u16 reg = BM_PHY_REG_NUM(offset); 2471 u16 reg = BM_PHY_REG_NUM(offset);
2329 u16 phy_reg = 0; 2472 u16 phy_reg = 0;
2330 u8 phy_acquired = 1;
2331
2332 2473
2333 /* Gig must be disabled for MDIO accesses to page 800 */ 2474 /* Gig must be disabled for MDIO accesses to page 800 */
2334 if ((hw->mac.type == e1000_pchlan) && 2475 if ((hw->mac.type == e1000_pchlan) &&
2335 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) 2476 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
2336 hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); 2477 hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
2337 2478
2338 ret_val = hw->phy.ops.acquire_phy(hw);
2339 if (ret_val) {
2340 phy_acquired = 0;
2341 goto out;
2342 }
2343
2344 /* All operations in this function are phy address 1 */ 2479 /* All operations in this function are phy address 1 */
2345 hw->phy.addr = 1; 2480 hw->phy.addr = 1;
2346 2481
@@ -2397,8 +2532,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2397 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); 2532 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
2398 2533
2399out: 2534out:
2400 if (phy_acquired == 1)
2401 hw->phy.ops.release_phy(hw);
2402 return ret_val; 2535 return ret_val;
2403} 2536}
2404 2537
@@ -2439,52 +2572,63 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
2439 return 0; 2572 return 0;
2440} 2573}
2441 2574
2575/**
2576 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2577 * @hw: pointer to the HW structure
2578 * @slow: true for slow mode, false for normal mode
2579 *
2580 * Assumes semaphore already acquired.
2581 **/
2442s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow) 2582s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
2443{ 2583{
2444 s32 ret_val = 0; 2584 s32 ret_val = 0;
2445 u16 data = 0; 2585 u16 data = 0;
2446 2586
2447 ret_val = hw->phy.ops.acquire_phy(hw);
2448 if (ret_val)
2449 return ret_val;
2450
2451 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */ 2587 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
2452 hw->phy.addr = 1; 2588 hw->phy.addr = 1;
2453 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 2589 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2454 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); 2590 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
2455 if (ret_val) { 2591 if (ret_val)
2456 hw->phy.ops.release_phy(hw); 2592 goto out;
2457 return ret_val; 2593
2458 }
2459 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1, 2594 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
2460 (0x2180 | (slow << 10))); 2595 (0x2180 | (slow << 10)));
2596 if (ret_val)
2597 goto out;
2461 2598
2462 /* dummy read when reverting to fast mode - throw away result */ 2599 /* dummy read when reverting to fast mode - throw away result */
2463 if (!slow) 2600 if (!slow)
2464 e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); 2601 ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
2465
2466 hw->phy.ops.release_phy(hw);
2467 2602
2603out:
2468 return ret_val; 2604 return ret_val;
2469} 2605}
2470 2606
2471/** 2607/**
2472 * e1000_read_phy_reg_hv - Read HV PHY register 2608 * __e1000_read_phy_reg_hv - Read HV PHY register
2473 * @hw: pointer to the HW structure 2609 * @hw: pointer to the HW structure
2474 * @offset: register offset to be read 2610 * @offset: register offset to be read
2475 * @data: pointer to the read data 2611 * @data: pointer to the read data
2612 * @locked: semaphore has already been acquired or not
2476 * 2613 *
2477 * Acquires semaphore, if necessary, then reads the PHY register at offset 2614 * Acquires semaphore, if necessary, then reads the PHY register at offset
2478 * and storing the retrieved information in data. Release any acquired 2615 * and stores the retrieved information in data. Release any acquired
2479 * semaphore before exiting. 2616 * semaphore before exiting.
2480 **/ 2617 **/
2481s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) 2618static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2619 bool locked)
2482{ 2620{
2483 s32 ret_val; 2621 s32 ret_val;
2484 u16 page = BM_PHY_REG_PAGE(offset); 2622 u16 page = BM_PHY_REG_PAGE(offset);
2485 u16 reg = BM_PHY_REG_NUM(offset); 2623 u16 reg = BM_PHY_REG_NUM(offset);
2486 bool in_slow_mode = false; 2624 bool in_slow_mode = false;
2487 2625
2626 if (!locked) {
2627 ret_val = hw->phy.ops.acquire_phy(hw);
2628 if (ret_val)
2629 return ret_val;
2630 }
2631
2488 /* Workaround failure in MDIO access while cable is disconnected */ 2632 /* Workaround failure in MDIO access while cable is disconnected */
2489 if ((hw->phy.type == e1000_phy_82577) && 2633 if ((hw->phy.type == e1000_phy_82577) &&
2490 !(er32(STATUS) & E1000_STATUS_LU)) { 2634 !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2508,10 +2652,6 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2508 goto out; 2652 goto out;
2509 } 2653 }
2510 2654
2511 ret_val = hw->phy.ops.acquire_phy(hw);
2512 if (ret_val)
2513 goto out;
2514
2515 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); 2655 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2516 2656
2517 if (page == HV_INTC_FC_PAGE_START) 2657 if (page == HV_INTC_FC_PAGE_START)
@@ -2529,42 +2669,76 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2529 ret_val = e1000e_write_phy_reg_mdic(hw, 2669 ret_val = e1000e_write_phy_reg_mdic(hw,
2530 IGP01E1000_PHY_PAGE_SELECT, 2670 IGP01E1000_PHY_PAGE_SELECT,
2531 (page << IGP_PAGE_SHIFT)); 2671 (page << IGP_PAGE_SHIFT));
2532 if (ret_val) {
2533 hw->phy.ops.release_phy(hw);
2534 goto out;
2535 }
2536 hw->phy.addr = phy_addr; 2672 hw->phy.addr = phy_addr;
2537 } 2673 }
2538 } 2674 }
2539 2675
2540 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2676 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2541 data); 2677 data);
2542 hw->phy.ops.release_phy(hw);
2543
2544out: 2678out:
2545 /* Revert to MDIO fast mode, if applicable */ 2679 /* Revert to MDIO fast mode, if applicable */
2546 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2680 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2547 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2681 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
2548 2682
2683 if (!locked)
2684 hw->phy.ops.release_phy(hw);
2685
2549 return ret_val; 2686 return ret_val;
2550} 2687}
2551 2688
2552/** 2689/**
2553 * e1000_write_phy_reg_hv - Write HV PHY register 2690 * e1000_read_phy_reg_hv - Read HV PHY register
2691 * @hw: pointer to the HW structure
2692 * @offset: register offset to be read
2693 * @data: pointer to the read data
2694 *
2695 * Acquires semaphore then reads the PHY register at offset and stores
2696 * the retrieved information in data. Release the acquired semaphore
2697 * before exiting.
2698 **/
2699s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2700{
2701 return __e1000_read_phy_reg_hv(hw, offset, data, false);
2702}
2703
2704/**
2705 * e1000_read_phy_reg_hv_locked - Read HV PHY register
2706 * @hw: pointer to the HW structure
2707 * @offset: register offset to be read
2708 * @data: pointer to the read data
2709 *
2710 * Reads the PHY register at offset and stores the retrieved information
2711 * in data. Assumes semaphore already acquired.
2712 **/
2713s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
2714{
2715 return __e1000_read_phy_reg_hv(hw, offset, data, true);
2716}
2717
2718/**
2719 * __e1000_write_phy_reg_hv - Write HV PHY register
2554 * @hw: pointer to the HW structure 2720 * @hw: pointer to the HW structure
2555 * @offset: register offset to write to 2721 * @offset: register offset to write to
2556 * @data: data to write at register offset 2722 * @data: data to write at register offset
2723 * @locked: semaphore has already been acquired or not
2557 * 2724 *
2558 * Acquires semaphore, if necessary, then writes the data to PHY register 2725 * Acquires semaphore, if necessary, then writes the data to PHY register
2559 * at the offset. Release any acquired semaphores before exiting. 2726 * at the offset. Release any acquired semaphores before exiting.
2560 **/ 2727 **/
2561s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) 2728static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2729 bool locked)
2562{ 2730{
2563 s32 ret_val; 2731 s32 ret_val;
2564 u16 page = BM_PHY_REG_PAGE(offset); 2732 u16 page = BM_PHY_REG_PAGE(offset);
2565 u16 reg = BM_PHY_REG_NUM(offset); 2733 u16 reg = BM_PHY_REG_NUM(offset);
2566 bool in_slow_mode = false; 2734 bool in_slow_mode = false;
2567 2735
2736 if (!locked) {
2737 ret_val = hw->phy.ops.acquire_phy(hw);
2738 if (ret_val)
2739 return ret_val;
2740 }
2741
2568 /* Workaround failure in MDIO access while cable is disconnected */ 2742 /* Workaround failure in MDIO access while cable is disconnected */
2569 if ((hw->phy.type == e1000_phy_82577) && 2743 if ((hw->phy.type == e1000_phy_82577) &&
2570 !(er32(STATUS) & E1000_STATUS_LU)) { 2744 !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2588,10 +2762,6 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2588 goto out; 2762 goto out;
2589 } 2763 }
2590 2764
2591 ret_val = hw->phy.ops.acquire_phy(hw);
2592 if (ret_val)
2593 goto out;
2594
2595 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); 2765 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2596 2766
2597 if (page == HV_INTC_FC_PAGE_START) 2767 if (page == HV_INTC_FC_PAGE_START)
@@ -2607,15 +2777,10 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2607 ((MAX_PHY_REG_ADDRESS & reg) == 0) && 2777 ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
2608 (data & (1 << 11))) { 2778 (data & (1 << 11))) {
2609 u16 data2 = 0x7EFF; 2779 u16 data2 = 0x7EFF;
2610 hw->phy.ops.release_phy(hw);
2611 ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, 2780 ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
2612 &data2, false); 2781 &data2, false);
2613 if (ret_val) 2782 if (ret_val)
2614 goto out; 2783 goto out;
2615
2616 ret_val = hw->phy.ops.acquire_phy(hw);
2617 if (ret_val)
2618 goto out;
2619 } 2784 }
2620 2785
2621 if (reg > MAX_PHY_MULTI_PAGE_REG) { 2786 if (reg > MAX_PHY_MULTI_PAGE_REG) {
@@ -2630,27 +2795,53 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2630 ret_val = e1000e_write_phy_reg_mdic(hw, 2795 ret_val = e1000e_write_phy_reg_mdic(hw,
2631 IGP01E1000_PHY_PAGE_SELECT, 2796 IGP01E1000_PHY_PAGE_SELECT,
2632 (page << IGP_PAGE_SHIFT)); 2797 (page << IGP_PAGE_SHIFT));
2633 if (ret_val) {
2634 hw->phy.ops.release_phy(hw);
2635 goto out;
2636 }
2637 hw->phy.addr = phy_addr; 2798 hw->phy.addr = phy_addr;
2638 } 2799 }
2639 } 2800 }
2640 2801
2641 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2802 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2642 data); 2803 data);
2643 hw->phy.ops.release_phy(hw);
2644 2804
2645out: 2805out:
2646 /* Revert to MDIO fast mode, if applicable */ 2806 /* Revert to MDIO fast mode, if applicable */
2647 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2807 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2648 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2808 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
2649 2809
2810 if (!locked)
2811 hw->phy.ops.release_phy(hw);
2812
2650 return ret_val; 2813 return ret_val;
2651} 2814}
2652 2815
2653/** 2816/**
2817 * e1000_write_phy_reg_hv - Write HV PHY register
2818 * @hw: pointer to the HW structure
2819 * @offset: register offset to write to
2820 * @data: data to write at register offset
2821 *
2822 * Acquires semaphore then writes the data to PHY register at the offset.
2823 * Release the acquired semaphores before exiting.
2824 **/
2825s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2826{
2827 return __e1000_write_phy_reg_hv(hw, offset, data, false);
2828}
2829
2830/**
2831 * e1000_write_phy_reg_hv_locked - Write HV PHY register
2832 * @hw: pointer to the HW structure
2833 * @offset: register offset to write to
2834 * @data: data to write at register offset
2835 *
2836 * Writes the data to PHY register at the offset. Assumes semaphore
2837 * already acquired.
2838 **/
2839s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
2840{
2841 return __e1000_write_phy_reg_hv(hw, offset, data, true);
2842}
2843
2844/**
2654 * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page 2845 * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
2655 * @page: page to be accessed 2846 * @page: page to be accessed
2656 **/ 2847 **/
@@ -2671,10 +2862,9 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
2671 * @data: pointer to the data to be read or written 2862 * @data: pointer to the data to be read or written
2672 * @read: determines if operation is read or written 2863 * @read: determines if operation is read or written
2673 * 2864 *
2674 * Acquires semaphore, if necessary, then reads the PHY register at offset 2865 * Reads the PHY register at offset and stores the retreived information
2675 * and storing the retreived information in data. Release any acquired 2866 * in data. Assumes semaphore already acquired. Note that the procedure
2676 * semaphores before exiting. Note that the procedure to read these regs 2867 * to read these regs uses the address port and data port to read/write.
2677 * uses the address port and data port to read/write.
2678 **/ 2868 **/
2679static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, 2869static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2680 u16 *data, bool read) 2870 u16 *data, bool read)
@@ -2682,20 +2872,12 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2682 s32 ret_val; 2872 s32 ret_val;
2683 u32 addr_reg = 0; 2873 u32 addr_reg = 0;
2684 u32 data_reg = 0; 2874 u32 data_reg = 0;
2685 u8 phy_acquired = 1;
2686 2875
2687 /* This takes care of the difference with desktop vs mobile phy */ 2876 /* This takes care of the difference with desktop vs mobile phy */
2688 addr_reg = (hw->phy.type == e1000_phy_82578) ? 2877 addr_reg = (hw->phy.type == e1000_phy_82578) ?
2689 I82578_ADDR_REG : I82577_ADDR_REG; 2878 I82578_ADDR_REG : I82577_ADDR_REG;
2690 data_reg = addr_reg + 1; 2879 data_reg = addr_reg + 1;
2691 2880
2692 ret_val = hw->phy.ops.acquire_phy(hw);
2693 if (ret_val) {
2694 hw_dbg(hw, "Could not acquire PHY\n");
2695 phy_acquired = 0;
2696 goto out;
2697 }
2698
2699 /* All operations in this function are phy address 2 */ 2881 /* All operations in this function are phy address 2 */
2700 hw->phy.addr = 2; 2882 hw->phy.addr = 2;
2701 2883
@@ -2718,8 +2900,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2718 } 2900 }
2719 2901
2720out: 2902out:
2721 if (phy_acquired == 1)
2722 hw->phy.ops.release_phy(hw);
2723 return ret_val; 2903 return ret_val;
2724} 2904}
2725 2905
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a25f8ed8109d..f1c565282d58 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -222,24 +222,25 @@ struct ethoc_bd {
222 u32 addr; 222 u32 addr;
223}; 223};
224 224
225static u32 ethoc_read(struct ethoc *dev, loff_t offset) 225static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
226{ 226{
227 return ioread32(dev->iobase + offset); 227 return ioread32(dev->iobase + offset);
228} 228}
229 229
230static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) 230static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
231{ 231{
232 iowrite32(data, dev->iobase + offset); 232 iowrite32(data, dev->iobase + offset);
233} 233}
234 234
235static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) 235static inline void ethoc_read_bd(struct ethoc *dev, int index,
236 struct ethoc_bd *bd)
236{ 237{
237 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 238 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
238 bd->stat = ethoc_read(dev, offset + 0); 239 bd->stat = ethoc_read(dev, offset + 0);
239 bd->addr = ethoc_read(dev, offset + 4); 240 bd->addr = ethoc_read(dev, offset + 4);
240} 241}
241 242
242static void ethoc_write_bd(struct ethoc *dev, int index, 243static inline void ethoc_write_bd(struct ethoc *dev, int index,
243 const struct ethoc_bd *bd) 244 const struct ethoc_bd *bd)
244{ 245{
245 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 246 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
@@ -247,33 +248,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index,
247 ethoc_write(dev, offset + 4, bd->addr); 248 ethoc_write(dev, offset + 4, bd->addr);
248} 249}
249 250
250static void ethoc_enable_irq(struct ethoc *dev, u32 mask) 251static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
251{ 252{
252 u32 imask = ethoc_read(dev, INT_MASK); 253 u32 imask = ethoc_read(dev, INT_MASK);
253 imask |= mask; 254 imask |= mask;
254 ethoc_write(dev, INT_MASK, imask); 255 ethoc_write(dev, INT_MASK, imask);
255} 256}
256 257
257static void ethoc_disable_irq(struct ethoc *dev, u32 mask) 258static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
258{ 259{
259 u32 imask = ethoc_read(dev, INT_MASK); 260 u32 imask = ethoc_read(dev, INT_MASK);
260 imask &= ~mask; 261 imask &= ~mask;
261 ethoc_write(dev, INT_MASK, imask); 262 ethoc_write(dev, INT_MASK, imask);
262} 263}
263 264
264static void ethoc_ack_irq(struct ethoc *dev, u32 mask) 265static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
265{ 266{
266 ethoc_write(dev, INT_SOURCE, mask); 267 ethoc_write(dev, INT_SOURCE, mask);
267} 268}
268 269
269static void ethoc_enable_rx_and_tx(struct ethoc *dev) 270static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
270{ 271{
271 u32 mode = ethoc_read(dev, MODER); 272 u32 mode = ethoc_read(dev, MODER);
272 mode |= MODER_RXEN | MODER_TXEN; 273 mode |= MODER_RXEN | MODER_TXEN;
273 ethoc_write(dev, MODER, mode); 274 ethoc_write(dev, MODER, mode);
274} 275}
275 276
276static void ethoc_disable_rx_and_tx(struct ethoc *dev) 277static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
277{ 278{
278 u32 mode = ethoc_read(dev, MODER); 279 u32 mode = ethoc_read(dev, MODER);
279 mode &= ~(MODER_RXEN | MODER_TXEN); 280 mode &= ~(MODER_RXEN | MODER_TXEN);
@@ -507,7 +508,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
507 return IRQ_NONE; 508 return IRQ_NONE;
508 } 509 }
509 510
510 ethoc_ack_irq(priv, INT_MASK_ALL); 511 ethoc_ack_irq(priv, pending);
511 512
512 if (pending & INT_MASK_BUSY) { 513 if (pending & INT_MASK_BUSY) {
513 dev_err(&dev->dev, "packet dropped\n"); 514 dev_err(&dev->dev, "packet dropped\n");
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 29234380e6c6..16a1d58419d9 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1654,7 +1654,7 @@ static const struct net_device_ops fec_netdev_ops = {
1654 * 1654 *
1655 * index is only used in legacy code 1655 * index is only used in legacy code
1656 */ 1656 */
1657int __init fec_enet_init(struct net_device *dev, int index) 1657static int fec_enet_init(struct net_device *dev, int index)
1658{ 1658{
1659 struct fec_enet_private *fep = netdev_priv(dev); 1659 struct fec_enet_private *fep = netdev_priv(dev);
1660 struct bufdesc *cbd_base; 1660 struct bufdesc *cbd_base;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index c40113f58963..66dace6d324f 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -759,12 +759,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
759 759
760 mpc52xx_fec_hw_init(dev); 760 mpc52xx_fec_hw_init(dev);
761 761
762 if (priv->phydev) {
763 phy_stop(priv->phydev);
764 phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
765 phy_start(priv->phydev);
766 }
767
768 bcom_fec_rx_reset(priv->rx_dmatsk); 762 bcom_fec_rx_reset(priv->rx_dmatsk);
769 bcom_fec_tx_reset(priv->tx_dmatsk); 763 bcom_fec_tx_reset(priv->tx_dmatsk);
770 764
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 31e6d62b785d..ee0f3c6d3f88 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -155,6 +155,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
155 { .compatible = "mpc5200b-fec-phy", }, 155 { .compatible = "mpc5200b-fec-phy", },
156 {} 156 {}
157}; 157};
158MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
158 159
159struct of_platform_driver mpc52xx_fec_mdio_driver = { 160struct of_platform_driver mpc52xx_fec_mdio_driver = {
160 .name = "mpc5200b-fec-phy", 161 .name = "mpc5200b-fec-phy",
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 2bc2d2b20644..ec2f5034457f 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1110,6 +1110,7 @@ static struct of_device_id fs_enet_match[] = {
1110#endif 1110#endif
1111 {} 1111 {}
1112}; 1112};
1113MODULE_DEVICE_TABLE(of, fs_enet_match);
1113 1114
1114static struct of_platform_driver fs_enet_driver = { 1115static struct of_platform_driver fs_enet_driver = {
1115 .name = "fs_enet", 1116 .name = "fs_enet",
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 93b481b0e3c7..24ff9f43a62b 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -221,6 +221,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
221 }, 221 },
222 {}, 222 {},
223}; 223};
224MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
224 225
225static struct of_platform_driver fs_enet_bb_mdio_driver = { 226static struct of_platform_driver fs_enet_bb_mdio_driver = {
226 .name = "fsl-bb-mdio", 227 .name = "fsl-bb-mdio",
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index a2d69c1cd07e..96eba4280c5c 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -219,6 +219,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
219#endif 219#endif
220 {}, 220 {},
221}; 221};
222MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
222 223
223static struct of_platform_driver fs_enet_fec_mdio_driver = { 224static struct of_platform_driver fs_enet_fec_mdio_driver = {
224 .name = "fsl-fec-mdio", 225 .name = "fsl-fec-mdio",
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d167090248e2..4065b7c01ecb 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -3,8 +3,9 @@
3 * Provides Bus interface for MIIM regs 3 * Provides Bus interface for MIIM regs
4 * 4 *
5 * Author: Andy Fleming <afleming@freescale.com> 5 * Author: Andy Fleming <afleming@freescale.com>
6 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
6 * 7 *
7 * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc. 8 * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
8 * 9 *
9 * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips) 10 * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
10 * 11 *
@@ -189,19 +190,29 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
189 190
190 191
191#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) 192#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
192static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) 193static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
193{ 194{
194 struct gfar __iomem *enet_regs; 195 struct gfar __iomem *enet_regs;
196 u32 __iomem *ioremap_tbipa;
197 u64 addr, size;
195 198
196 /* 199 /*
197 * This is mildly evil, but so is our hardware for doing this. 200 * This is mildly evil, but so is our hardware for doing this.
198 * Also, we have to cast back to struct gfar because of 201 * Also, we have to cast back to struct gfar because of
199 * definition weirdness done in gianfar.h. 202 * definition weirdness done in gianfar.h.
200 */ 203 */
201 enet_regs = (struct gfar __iomem *) 204 if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
202 ((char __iomem *)regs - offsetof(struct gfar, gfar_mii_regs)); 205 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
203 206 of_device_is_compatible(np, "gianfar")) {
204 return &enet_regs->tbipa; 207 enet_regs = (struct gfar __iomem *)regs;
208 return &enet_regs->tbipa;
209 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
210 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
211 addr = of_translate_address(np, of_get_address(np, 1, &size, NULL));
212 ioremap_tbipa = ioremap(addr, size);
213 return ioremap_tbipa;
214 } else
215 return NULL;
205} 216}
206#endif 217#endif
207 218
@@ -250,11 +261,11 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
250{ 261{
251 struct device_node *np = ofdev->node; 262 struct device_node *np = ofdev->node;
252 struct device_node *tbi; 263 struct device_node *tbi;
253 struct fsl_pq_mdio __iomem *regs; 264 struct fsl_pq_mdio __iomem *regs = NULL;
254 u32 __iomem *tbipa; 265 u32 __iomem *tbipa;
255 struct mii_bus *new_bus; 266 struct mii_bus *new_bus;
256 int tbiaddr = -1; 267 int tbiaddr = -1;
257 u64 addr, size; 268 u64 addr = 0, size = 0, ioremap_miimcfg = 0;
258 int err = 0; 269 int err = 0;
259 270
260 new_bus = mdiobus_alloc(); 271 new_bus = mdiobus_alloc();
@@ -268,8 +279,22 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
268 fsl_pq_mdio_bus_name(new_bus->id, np); 279 fsl_pq_mdio_bus_name(new_bus->id, np);
269 280
270 /* Set the PHY base address */ 281 /* Set the PHY base address */
271 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 282 if (of_device_is_compatible(np,"fsl,gianfar-mdio") ||
272 regs = ioremap(addr, size); 283 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
284 of_device_is_compatible(np, "fsl,ucc-mdio") ||
285 of_device_is_compatible(np,"ucc_geth_phy" )) {
286 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
287 ioremap_miimcfg = container_of(addr, struct fsl_pq_mdio, miimcfg);
288 regs = ioremap(ioremap_miimcfg, size +
289 offsetof(struct fsl_pq_mdio, miimcfg));
290 } else if (of_device_is_compatible(np,"fsl,etsec2-mdio") ||
291 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
292 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
293 regs = ioremap(addr, size);
294 } else {
295 err = -EINVAL;
296 goto err_free_bus;
297 }
273 298
274 if (NULL == regs) { 299 if (NULL == regs) {
275 err = -ENOMEM; 300 err = -ENOMEM;
@@ -290,9 +315,15 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
290 315
291 if (of_device_is_compatible(np, "fsl,gianfar-mdio") || 316 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
292 of_device_is_compatible(np, "fsl,gianfar-tbi") || 317 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
318 of_device_is_compatible(np, "fsl,etsec2-mdio") ||
319 of_device_is_compatible(np, "fsl,etsec2-tbi") ||
293 of_device_is_compatible(np, "gianfar")) { 320 of_device_is_compatible(np, "gianfar")) {
294#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) 321#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
295 tbipa = get_gfar_tbipa(regs); 322 tbipa = get_gfar_tbipa(regs, np);
323 if (!tbipa) {
324 err = -EINVAL;
325 goto err_free_irqs;
326 }
296#else 327#else
297 err = -ENODEV; 328 err = -ENODEV;
298 goto err_free_irqs; 329 goto err_free_irqs;
@@ -405,8 +436,15 @@ static struct of_device_id fsl_pq_mdio_match[] = {
405 { 436 {
406 .compatible = "fsl,gianfar-mdio", 437 .compatible = "fsl,gianfar-mdio",
407 }, 438 },
439 {
440 .compatible = "fsl,etsec2-tbi",
441 },
442 {
443 .compatible = "fsl,etsec2-mdio",
444 },
408 {}, 445 {},
409}; 446};
447MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
410 448
411static struct of_platform_driver fsl_pq_mdio_driver = { 449static struct of_platform_driver fsl_pq_mdio_driver = {
412 .name = "fsl-pq_mdio", 450 .name = "fsl-pq_mdio",
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h
index 36dad527410b..1f7d865cedb6 100644
--- a/drivers/net/fsl_pq_mdio.h
+++ b/drivers/net/fsl_pq_mdio.h
@@ -3,8 +3,9 @@
3 * Driver for the MDIO bus controller on Freescale PowerQUICC processors 3 * Driver for the MDIO bus controller on Freescale PowerQUICC processors
4 * 4 *
5 * Author: Andy Fleming 5 * Author: Andy Fleming
6 * Modifier: Sandeep Gopalpet
6 * 7 *
7 * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc. 8 * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the 11 * under the terms of the GNU General Public License as published by the
@@ -23,6 +24,12 @@
23#define MII_READ_COMMAND 0x00000001 24#define MII_READ_COMMAND 0x00000001
24 25
25struct fsl_pq_mdio { 26struct fsl_pq_mdio {
27 u8 res1[16];
28 u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
29 u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
30 u8 res2[4];
31 u32 emapm; /* MDIO Event mapping register (for etsec2)*/
32 u8 res3[1280];
26 u32 miimcfg; /* MII management configuration reg */ 33 u32 miimcfg; /* MII management configuration reg */
27 u32 miimcom; /* MII management command reg */ 34 u32 miimcom; /* MII management command reg */
28 u32 miimadd; /* MII management address reg */ 35 u32 miimadd; /* MII management address reg */
@@ -31,9 +38,9 @@ struct fsl_pq_mdio {
31 u32 miimind; /* MII management indication reg */ 38 u32 miimind; /* MII management indication reg */
32 u8 reserved[28]; /* Space holder */ 39 u8 reserved[28]; /* Space holder */
33 u32 utbipar; /* TBI phy address reg (only on UCC) */ 40 u32 utbipar; /* TBI phy address reg (only on UCC) */
41 u8 res4[2728];
34} __attribute__ ((packed)); 42} __attribute__ ((packed));
35 43
36
37int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); 44int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
38int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); 45int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
39int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, 46int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c6f6d3b7f4df..086d40dd526d 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -8,9 +8,10 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala 10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc. 14 * Copyright 2007 MontaVista Software, Inc.
14 * 15 *
15 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
@@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work);
109static void gfar_timeout(struct net_device *dev); 110static void gfar_timeout(struct net_device *dev);
110static int gfar_close(struct net_device *dev); 111static int gfar_close(struct net_device *dev);
111struct sk_buff *gfar_new_skb(struct net_device *dev); 112struct sk_buff *gfar_new_skb(struct net_device *dev);
112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
113 struct sk_buff *skb); 114 struct sk_buff *skb);
114static int gfar_set_mac_address(struct net_device *dev); 115static int gfar_set_mac_address(struct net_device *dev);
115static int gfar_change_mtu(struct net_device *dev, int new_mtu); 116static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
130#ifdef CONFIG_NET_POLL_CONTROLLER 131#ifdef CONFIG_NET_POLL_CONTROLLER
131static void gfar_netpoll(struct net_device *dev); 132static void gfar_netpoll(struct net_device *dev);
132#endif 133#endif
133int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
134static int gfar_clean_tx_ring(struct net_device *dev); 135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
135static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
136 int amount_pull); 137 int amount_pull);
137static void gfar_vlan_rx_register(struct net_device *netdev, 138static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -142,21 +143,21 @@ void gfar_start(struct net_device *dev);
142static void gfar_clear_exact_match(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev);
143static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
144static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
145 147
146MODULE_AUTHOR("Freescale Semiconductor, Inc"); 148MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL"); 150MODULE_LICENSE("GPL");
149 151
150static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 152static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
151 dma_addr_t buf) 153 dma_addr_t buf)
152{ 154{
153 struct gfar_private *priv = netdev_priv(dev);
154 u32 lstatus; 155 u32 lstatus;
155 156
156 bdp->bufPtr = buf; 157 bdp->bufPtr = buf;
157 158
158 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
159 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1) 160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
160 lstatus |= BD_LFLAG(RXBD_WRAP); 161 lstatus |= BD_LFLAG(RXBD_WRAP);
161 162
162 eieio(); 163 eieio();
@@ -167,65 +168,93 @@ static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
167static int gfar_init_bds(struct net_device *ndev) 168static int gfar_init_bds(struct net_device *ndev)
168{ 169{
169 struct gfar_private *priv = netdev_priv(ndev); 170 struct gfar_private *priv = netdev_priv(ndev);
171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
170 struct txbd8 *txbdp; 173 struct txbd8 *txbdp;
171 struct rxbd8 *rxbdp; 174 struct rxbd8 *rxbdp;
172 int i; 175 int i, j;
173 176
174 /* Initialize some variables in our dev structure */ 177 for (i = 0; i < priv->num_tx_queues; i++) {
175 priv->num_txbdfree = priv->tx_ring_size; 178 tx_queue = priv->tx_queue[i];
176 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 179 /* Initialize some variables in our dev structure */
177 priv->cur_rx = priv->rx_bd_base; 180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
178 priv->skb_curtx = priv->skb_dirtytx = 0; 181 tx_queue->dirty_tx = tx_queue->tx_bd_base;
179 priv->skb_currx = 0; 182 tx_queue->cur_tx = tx_queue->tx_bd_base;
183 tx_queue->skb_curtx = 0;
184 tx_queue->skb_dirtytx = 0;
185
186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189 txbdp->lstatus = 0;
190 txbdp->bufPtr = 0;
191 txbdp++;
192 }
180 193
181 /* Initialize Transmit Descriptor Ring */ 194 /* Set the last descriptor in the ring to indicate wrap */
182 txbdp = priv->tx_bd_base; 195 txbdp--;
183 for (i = 0; i < priv->tx_ring_size; i++) { 196 txbdp->status |= TXBD_WRAP;
184 txbdp->lstatus = 0;
185 txbdp->bufPtr = 0;
186 txbdp++;
187 } 197 }
188 198
189 /* Set the last descriptor in the ring to indicate wrap */ 199 for (i = 0; i < priv->num_rx_queues; i++) {
190 txbdp--; 200 rx_queue = priv->rx_queue[i];
191 txbdp->status |= TXBD_WRAP; 201 rx_queue->cur_rx = rx_queue->rx_bd_base;
202 rx_queue->skb_currx = 0;
203 rxbdp = rx_queue->rx_bd_base;
192 204
193 rxbdp = priv->rx_bd_base; 205 for (j = 0; j < rx_queue->rx_ring_size; j++) {
194 for (i = 0; i < priv->rx_ring_size; i++) { 206 struct sk_buff *skb = rx_queue->rx_skbuff[j];
195 struct sk_buff *skb = priv->rx_skbuff[i];
196 207
197 if (skb) { 208 if (skb) {
198 gfar_init_rxbdp(ndev, rxbdp, rxbdp->bufPtr); 209 gfar_init_rxbdp(rx_queue, rxbdp,
199 } else { 210 rxbdp->bufPtr);
200 skb = gfar_new_skb(ndev); 211 } else {
201 if (!skb) { 212 skb = gfar_new_skb(ndev);
202 pr_err("%s: Can't allocate RX buffers\n", 213 if (!skb) {
203 ndev->name); 214 pr_err("%s: Can't allocate RX buffers\n",
204 return -ENOMEM; 215 ndev->name);
216 goto err_rxalloc_fail;
217 }
218 rx_queue->rx_skbuff[j] = skb;
219
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
205 } 221 }
206 priv->rx_skbuff[i] = skb;
207 222
208 gfar_new_rxbdp(ndev, rxbdp, skb); 223 rxbdp++;
209 } 224 }
210 225
211 rxbdp++;
212 } 226 }
213 227
214 return 0; 228 return 0;
229
230err_rxalloc_fail:
231 free_skb_resources(priv);
232 return -ENOMEM;
215} 233}
216 234
217static int gfar_alloc_skb_resources(struct net_device *ndev) 235static int gfar_alloc_skb_resources(struct net_device *ndev)
218{ 236{
219 void *vaddr; 237 void *vaddr;
220 int i; 238 dma_addr_t addr;
239 int i, j, k;
221 struct gfar_private *priv = netdev_priv(ndev); 240 struct gfar_private *priv = netdev_priv(ndev);
222 struct device *dev = &priv->ofdev->dev; 241 struct device *dev = &priv->ofdev->dev;
242 struct gfar_priv_tx_q *tx_queue = NULL;
243 struct gfar_priv_rx_q *rx_queue = NULL;
244
245 priv->total_tx_ring_size = 0;
246 for (i = 0; i < priv->num_tx_queues; i++)
247 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248
249 priv->total_rx_ring_size = 0;
250 for (i = 0; i < priv->num_rx_queues; i++)
251 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
223 252
224 /* Allocate memory for the buffer descriptors */ 253 /* Allocate memory for the buffer descriptors */
225 vaddr = dma_alloc_coherent(dev, 254 vaddr = dma_alloc_coherent(dev,
226 sizeof(*priv->tx_bd_base) * priv->tx_ring_size + 255 sizeof(struct txbd8) * priv->total_tx_ring_size +
227 sizeof(*priv->rx_bd_base) * priv->rx_ring_size, 256 sizeof(struct rxbd8) * priv->total_rx_ring_size,
228 &priv->tx_bd_dma_base, GFP_KERNEL); 257 &addr, GFP_KERNEL);
229 if (!vaddr) { 258 if (!vaddr) {
230 if (netif_msg_ifup(priv)) 259 if (netif_msg_ifup(priv))
231 pr_err("%s: Could not allocate buffer descriptors!\n", 260 pr_err("%s: Could not allocate buffer descriptors!\n",
@@ -233,36 +262,57 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
233 return -ENOMEM; 262 return -ENOMEM;
234 } 263 }
235 264
236 priv->tx_bd_base = vaddr; 265 for (i = 0; i < priv->num_tx_queues; i++) {
266 tx_queue = priv->tx_queue[i];
267 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268 tx_queue->tx_bd_dma_base = addr;
269 tx_queue->dev = ndev;
270 /* enet DMA only understands physical addresses */
271 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 }
237 274
238 /* Start the rx descriptor ring where the tx ring leaves off */ 275 /* Start the rx descriptor ring where the tx ring leaves off */
239 vaddr = vaddr + sizeof(*priv->tx_bd_base) * priv->tx_ring_size; 276 for (i = 0; i < priv->num_rx_queues; i++) {
240 priv->rx_bd_base = vaddr; 277 rx_queue = priv->rx_queue[i];
278 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279 rx_queue->rx_bd_dma_base = addr;
280 rx_queue->dev = ndev;
281 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 }
241 284
242 /* Setup the skbuff rings */ 285 /* Setup the skbuff rings */
243 priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) * 286 for (i = 0; i < priv->num_tx_queues; i++) {
244 priv->tx_ring_size, GFP_KERNEL); 287 tx_queue = priv->tx_queue[i];
245 if (!priv->tx_skbuff) { 288 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
246 if (netif_msg_ifup(priv)) 289 tx_queue->tx_ring_size, GFP_KERNEL);
247 pr_err("%s: Could not allocate tx_skbuff\n", 290 if (!tx_queue->tx_skbuff) {
248 ndev->name); 291 if (netif_msg_ifup(priv))
249 goto cleanup; 292 pr_err("%s: Could not allocate tx_skbuff\n",
293 ndev->name);
294 goto cleanup;
295 }
296
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
250 } 299 }
251 300
252 for (i = 0; i < priv->tx_ring_size; i++) 301 for (i = 0; i < priv->num_rx_queues; i++) {
253 priv->tx_skbuff[i] = NULL; 302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
304 rx_queue->rx_ring_size, GFP_KERNEL);
254 305
255 priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) * 306 if (!rx_queue->rx_skbuff) {
256 priv->rx_ring_size, GFP_KERNEL); 307 if (netif_msg_ifup(priv))
257 if (!priv->rx_skbuff) { 308 pr_err("%s: Could not allocate rx_skbuff\n",
258 if (netif_msg_ifup(priv)) 309 ndev->name);
259 pr_err("%s: Could not allocate rx_skbuff\n", 310 goto cleanup;
260 ndev->name); 311 }
261 goto cleanup;
262 }
263 312
264 for (i = 0; i < priv->rx_ring_size; i++) 313 for (j = 0; j < rx_queue->rx_ring_size; j++)
265 priv->rx_skbuff[i] = NULL; 314 rx_queue->rx_skbuff[j] = NULL;
315 }
266 316
267 if (gfar_init_bds(ndev)) 317 if (gfar_init_bds(ndev))
268 goto cleanup; 318 goto cleanup;
@@ -274,28 +324,41 @@ cleanup:
274 return -ENOMEM; 324 return -ENOMEM;
275} 325}
276 326
327static void gfar_init_tx_rx_base(struct gfar_private *priv)
328{
329 struct gfar __iomem *regs = priv->gfargrp[0].regs;
330 u32 *baddr;
331 int i;
332
333 baddr = &regs->tbase0;
334 for(i = 0; i < priv->num_tx_queues; i++) {
335 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336 baddr += 2;
337 }
338
339 baddr = &regs->rbase0;
340 for(i = 0; i < priv->num_rx_queues; i++) {
341 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
342 baddr += 2;
343 }
344}
345
277static void gfar_init_mac(struct net_device *ndev) 346static void gfar_init_mac(struct net_device *ndev)
278{ 347{
279 struct gfar_private *priv = netdev_priv(ndev); 348 struct gfar_private *priv = netdev_priv(ndev);
280 struct gfar __iomem *regs = priv->regs; 349 struct gfar __iomem *regs = priv->gfargrp[0].regs;
281 u32 rctrl = 0; 350 u32 rctrl = 0;
282 u32 tctrl = 0; 351 u32 tctrl = 0;
283 u32 attrs = 0; 352 u32 attrs = 0;
284 353
285 /* enet DMA only understands physical addresses */ 354 /* write the tx/rx base registers */
286 gfar_write(&regs->tbase0, priv->tx_bd_dma_base); 355 gfar_init_tx_rx_base(priv);
287 gfar_write(&regs->rbase0, priv->tx_bd_dma_base +
288 sizeof(*priv->tx_bd_base) *
289 priv->tx_ring_size);
290 356
291 /* Configure the coalescing support */ 357 /* Configure the coalescing support */
292 gfar_write(&regs->txic, 0); 358 gfar_configure_coalescing(priv, 0xFF, 0xFF);
293 if (priv->txcoalescing)
294 gfar_write(&regs->txic, priv->txic);
295 359
296 gfar_write(&regs->rxic, 0); 360 if (priv->rx_filer_enable)
297 if (priv->rxcoalescing) 361 rctrl |= RCTRL_FILREN;
298 gfar_write(&regs->rxic, priv->rxic);
299 362
300 if (priv->rx_csum_enable) 363 if (priv->rx_csum_enable)
301 rctrl |= RCTRL_CHECKSUMMING; 364 rctrl |= RCTRL_CHECKSUMMING;
@@ -324,6 +387,8 @@ static void gfar_init_mac(struct net_device *ndev)
324 if (ndev->features & NETIF_F_IP_CSUM) 387 if (ndev->features & NETIF_F_IP_CSUM)
325 tctrl |= TCTRL_INIT_CSUM; 388 tctrl |= TCTRL_INIT_CSUM;
326 389
390 tctrl |= TCTRL_TXSCHED_PRIO;
391
327 gfar_write(&regs->tctrl, tctrl); 392 gfar_write(&regs->tctrl, tctrl);
328 393
329 /* Set the extraction length and index */ 394 /* Set the extraction length and index */
@@ -357,6 +422,7 @@ static const struct net_device_ops gfar_netdev_ops = {
357 .ndo_set_multicast_list = gfar_set_multi, 422 .ndo_set_multicast_list = gfar_set_multi,
358 .ndo_tx_timeout = gfar_timeout, 423 .ndo_tx_timeout = gfar_timeout,
359 .ndo_do_ioctl = gfar_ioctl, 424 .ndo_do_ioctl = gfar_ioctl,
425 .ndo_select_queue = gfar_select_queue,
360 .ndo_vlan_rx_register = gfar_vlan_rx_register, 426 .ndo_vlan_rx_register = gfar_vlan_rx_register,
361 .ndo_set_mac_address = eth_mac_addr, 427 .ndo_set_mac_address = eth_mac_addr,
362 .ndo_validate_addr = eth_validate_addr, 428 .ndo_validate_addr = eth_validate_addr,
@@ -365,56 +431,252 @@ static const struct net_device_ops gfar_netdev_ops = {
365#endif 431#endif
366}; 432};
367 433
434unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
435unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
436
437void lock_rx_qs(struct gfar_private *priv)
438{
439 int i = 0x0;
440
441 for (i = 0; i < priv->num_rx_queues; i++)
442 spin_lock(&priv->rx_queue[i]->rxlock);
443}
444
445void lock_tx_qs(struct gfar_private *priv)
446{
447 int i = 0x0;
448
449 for (i = 0; i < priv->num_tx_queues; i++)
450 spin_lock(&priv->tx_queue[i]->txlock);
451}
452
453void unlock_rx_qs(struct gfar_private *priv)
454{
455 int i = 0x0;
456
457 for (i = 0; i < priv->num_rx_queues; i++)
458 spin_unlock(&priv->rx_queue[i]->rxlock);
459}
460
461void unlock_tx_qs(struct gfar_private *priv)
462{
463 int i = 0x0;
464
465 for (i = 0; i < priv->num_tx_queues; i++)
466 spin_unlock(&priv->tx_queue[i]->txlock);
467}
468
368/* Returns 1 if incoming frames use an FCB */ 469/* Returns 1 if incoming frames use an FCB */
369static inline int gfar_uses_fcb(struct gfar_private *priv) 470static inline int gfar_uses_fcb(struct gfar_private *priv)
370{ 471{
371 return priv->vlgrp || priv->rx_csum_enable; 472 return priv->vlgrp || priv->rx_csum_enable;
372} 473}
373 474
374static int gfar_of_init(struct net_device *dev) 475u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
476{
477 return skb_get_queue_mapping(skb);
478}
479static void free_tx_pointers(struct gfar_private *priv)
480{
481 int i = 0;
482
483 for (i = 0; i < priv->num_tx_queues; i++)
484 kfree(priv->tx_queue[i]);
485}
486
487static void free_rx_pointers(struct gfar_private *priv)
488{
489 int i = 0;
490
491 for (i = 0; i < priv->num_rx_queues; i++)
492 kfree(priv->rx_queue[i]);
493}
494
495static void unmap_group_regs(struct gfar_private *priv)
496{
497 int i = 0;
498
499 for (i = 0; i < MAXGROUPS; i++)
500 if (priv->gfargrp[i].regs)
501 iounmap(priv->gfargrp[i].regs);
502}
503
504static void disable_napi(struct gfar_private *priv)
505{
506 int i = 0;
507
508 for (i = 0; i < priv->num_grps; i++)
509 napi_disable(&priv->gfargrp[i].napi);
510}
511
512static void enable_napi(struct gfar_private *priv)
513{
514 int i = 0;
515
516 for (i = 0; i < priv->num_grps; i++)
517 napi_enable(&priv->gfargrp[i].napi);
518}
519
520static int gfar_parse_group(struct device_node *np,
521 struct gfar_private *priv, const char *model)
522{
523 u32 *queue_mask;
524 u64 addr, size;
525
526 addr = of_translate_address(np,
527 of_get_address(np, 0, &size, NULL));
528 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
529
530 if (!priv->gfargrp[priv->num_grps].regs)
531 return -ENOMEM;
532
533 priv->gfargrp[priv->num_grps].interruptTransmit =
534 irq_of_parse_and_map(np, 0);
535
536 /* If we aren't the FEC we have multiple interrupts */
537 if (model && strcasecmp(model, "FEC")) {
538 priv->gfargrp[priv->num_grps].interruptReceive =
539 irq_of_parse_and_map(np, 1);
540 priv->gfargrp[priv->num_grps].interruptError =
541 irq_of_parse_and_map(np,2);
542 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
543 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
544 priv->gfargrp[priv->num_grps].interruptError < 0) {
545 return -EINVAL;
546 }
547 }
548
549 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
550 priv->gfargrp[priv->num_grps].priv = priv;
551 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
552 if(priv->mode == MQ_MG_MODE) {
553 queue_mask = (u32 *)of_get_property(np,
554 "fsl,rx-bit-map", NULL);
555 priv->gfargrp[priv->num_grps].rx_bit_map =
556 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
557 queue_mask = (u32 *)of_get_property(np,
558 "fsl,tx-bit-map", NULL);
559 priv->gfargrp[priv->num_grps].tx_bit_map =
560 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
561 } else {
562 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
563 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
564 }
565 priv->num_grps++;
566
567 return 0;
568}
569
570static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
375{ 571{
376 const char *model; 572 const char *model;
377 const char *ctype; 573 const char *ctype;
378 const void *mac_addr; 574 const void *mac_addr;
379 u64 addr, size; 575 int err = 0, i;
380 int err = 0; 576 struct net_device *dev = NULL;
381 struct gfar_private *priv = netdev_priv(dev); 577 struct gfar_private *priv = NULL;
382 struct device_node *np = priv->node; 578 struct device_node *np = ofdev->node;
579 struct device_node *child = NULL;
383 const u32 *stash; 580 const u32 *stash;
384 const u32 *stash_len; 581 const u32 *stash_len;
385 const u32 *stash_idx; 582 const u32 *stash_idx;
583 unsigned int num_tx_qs, num_rx_qs;
584 u32 *tx_queues, *rx_queues;
386 585
387 if (!np || !of_device_is_available(np)) 586 if (!np || !of_device_is_available(np))
388 return -ENODEV; 587 return -ENODEV;
389 588
390 /* get a pointer to the register memory */ 589 /* parse the num of tx and rx queues */
391 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 590 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
392 priv->regs = ioremap(addr, size); 591 num_tx_qs = tx_queues ? *tx_queues : 1;
592
593 if (num_tx_qs > MAX_TX_QS) {
594 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
595 num_tx_qs, MAX_TX_QS);
596 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
597 return -EINVAL;
598 }
599
600 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
601 num_rx_qs = rx_queues ? *rx_queues : 1;
602
603 if (num_rx_qs > MAX_RX_QS) {
604 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
605 num_tx_qs, MAX_TX_QS);
606 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
607 return -EINVAL;
608 }
393 609
394 if (priv->regs == NULL) 610 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
611 dev = *pdev;
612 if (NULL == dev)
395 return -ENOMEM; 613 return -ENOMEM;
396 614
397 priv->interruptTransmit = irq_of_parse_and_map(np, 0); 615 priv = netdev_priv(dev);
616 priv->node = ofdev->node;
617 priv->ndev = dev;
618
619 dev->num_tx_queues = num_tx_qs;
620 dev->real_num_tx_queues = num_tx_qs;
621 priv->num_tx_queues = num_tx_qs;
622 priv->num_rx_queues = num_rx_qs;
623 priv->num_grps = 0x0;
398 624
399 model = of_get_property(np, "model", NULL); 625 model = of_get_property(np, "model", NULL);
400 626
401 /* If we aren't the FEC we have multiple interrupts */ 627 for (i = 0; i < MAXGROUPS; i++)
402 if (model && strcasecmp(model, "FEC")) { 628 priv->gfargrp[i].regs = NULL;
403 priv->interruptReceive = irq_of_parse_and_map(np, 1);
404 629
405 priv->interruptError = irq_of_parse_and_map(np, 2); 630 /* Parse and initialize group specific information */
631 if (of_device_is_compatible(np, "fsl,etsec2")) {
632 priv->mode = MQ_MG_MODE;
633 for_each_child_of_node(np, child) {
634 err = gfar_parse_group(child, priv, model);
635 if (err)
636 goto err_grp_init;
637 }
638 } else {
639 priv->mode = SQ_SG_MODE;
640 err = gfar_parse_group(np, priv, model);
641 if(err)
642 goto err_grp_init;
643 }
406 644
407 if (priv->interruptTransmit < 0 || 645 for (i = 0; i < priv->num_tx_queues; i++)
408 priv->interruptReceive < 0 || 646 priv->tx_queue[i] = NULL;
409 priv->interruptError < 0) { 647 for (i = 0; i < priv->num_rx_queues; i++)
410 err = -EINVAL; 648 priv->rx_queue[i] = NULL;
411 goto err_out; 649
650 for (i = 0; i < priv->num_tx_queues; i++) {
651 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
652 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
653 if (!priv->tx_queue[i]) {
654 err = -ENOMEM;
655 goto tx_alloc_failed;
412 } 656 }
657 priv->tx_queue[i]->tx_skbuff = NULL;
658 priv->tx_queue[i]->qindex = i;
659 priv->tx_queue[i]->dev = dev;
660 spin_lock_init(&(priv->tx_queue[i]->txlock));
413 } 661 }
414 662
663 for (i = 0; i < priv->num_rx_queues; i++) {
664 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
665 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
666 if (!priv->rx_queue[i]) {
667 err = -ENOMEM;
668 goto rx_alloc_failed;
669 }
670 priv->rx_queue[i]->rx_skbuff = NULL;
671 priv->rx_queue[i]->qindex = i;
672 priv->rx_queue[i]->dev = dev;
673 spin_lock_init(&(priv->rx_queue[i]->rxlock));
674 }
675
676
415 stash = of_get_property(np, "bd-stash", NULL); 677 stash = of_get_property(np, "bd-stash", NULL);
416 678
417 if(stash) { 679 if (stash) {
418 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 680 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
419 priv->bd_stash_en = 1; 681 priv->bd_stash_en = 1;
420 } 682 }
@@ -472,8 +734,13 @@ static int gfar_of_init(struct net_device *dev)
472 734
473 return 0; 735 return 0;
474 736
475err_out: 737rx_alloc_failed:
476 iounmap(priv->regs); 738 free_rx_pointers(priv);
739tx_alloc_failed:
740 free_tx_pointers(priv);
741err_grp_init:
742 unmap_group_regs(priv);
743 free_netdev(dev);
477 return err; 744 return err;
478} 745}
479 746
@@ -491,6 +758,84 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
491 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 758 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
492} 759}
493 760
761static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
762{
763 unsigned int new_bit_map = 0x0;
764 int mask = 0x1 << (max_qs - 1), i;
765 for (i = 0; i < max_qs; i++) {
766 if (bit_map & mask)
767 new_bit_map = new_bit_map + (1 << i);
768 mask = mask >> 0x1;
769 }
770 return new_bit_map;
771}
772
773u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, u32 class)
774{
775 u32 rqfpr = FPR_FILER_MASK;
776 u32 rqfcr = 0x0;
777
778 rqfar--;
779 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
780 ftp_rqfpr[rqfar] = rqfpr;
781 ftp_rqfcr[rqfar] = rqfcr;
782 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
783
784 rqfar--;
785 rqfcr = RQFCR_CMP_NOMATCH;
786 ftp_rqfpr[rqfar] = rqfpr;
787 ftp_rqfcr[rqfar] = rqfcr;
788 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
789
790 rqfar--;
791 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
792 rqfpr = class;
793 ftp_rqfcr[rqfar] = rqfcr;
794 ftp_rqfpr[rqfar] = rqfpr;
795 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
796
797 rqfar--;
798 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
799 rqfpr = class;
800 ftp_rqfcr[rqfar] = rqfcr;
801 ftp_rqfpr[rqfar] = rqfpr;
802 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
803
804 return rqfar;
805}
806
807static void gfar_init_filer_table(struct gfar_private *priv)
808{
809 int i = 0x0;
810 u32 rqfar = MAX_FILER_IDX;
811 u32 rqfcr = 0x0;
812 u32 rqfpr = FPR_FILER_MASK;
813
814 /* Default rule */
815 rqfcr = RQFCR_CMP_MATCH;
816 ftp_rqfcr[rqfar] = rqfcr;
817 ftp_rqfpr[rqfar] = rqfpr;
818 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
819
820 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
821 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
822 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
823 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
824 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
825 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
826
827 /* cur_filer_idx indicated the fisrt non-masked rule */
828 priv->cur_filer_idx = rqfar;
829
830 /* Rest are masked rules */
831 rqfcr = RQFCR_CMP_NOMATCH;
832 for (i = 0; i < rqfar; i++) {
833 ftp_rqfcr[i] = rqfcr;
834 ftp_rqfpr[i] = rqfpr;
835 gfar_write_filer(priv, i, rqfcr, rqfpr);
836 }
837}
838
494/* Set up the ethernet device structure, private data, 839/* Set up the ethernet device structure, private data,
495 * and anything else we need before we start */ 840 * and anything else we need before we start */
496static int gfar_probe(struct of_device *ofdev, 841static int gfar_probe(struct of_device *ofdev,
@@ -499,14 +844,17 @@ static int gfar_probe(struct of_device *ofdev,
499 u32 tempval; 844 u32 tempval;
500 struct net_device *dev = NULL; 845 struct net_device *dev = NULL;
501 struct gfar_private *priv = NULL; 846 struct gfar_private *priv = NULL;
502 int err = 0; 847 struct gfar __iomem *regs = NULL;
848 int err = 0, i, grp_idx = 0;
503 int len_devname; 849 int len_devname;
850 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
851 u32 isrg = 0;
852 u32 *baddr;
504 853
505 /* Create an ethernet device instance */ 854 err = gfar_of_init(ofdev, &dev);
506 dev = alloc_etherdev(sizeof (*priv));
507 855
508 if (NULL == dev) 856 if (err)
509 return -ENOMEM; 857 return err;
510 858
511 priv = netdev_priv(dev); 859 priv = netdev_priv(dev);
512 priv->ndev = dev; 860 priv->ndev = dev;
@@ -514,50 +862,46 @@ static int gfar_probe(struct of_device *ofdev,
514 priv->node = ofdev->node; 862 priv->node = ofdev->node;
515 SET_NETDEV_DEV(dev, &ofdev->dev); 863 SET_NETDEV_DEV(dev, &ofdev->dev);
516 864
517 err = gfar_of_init(dev);
518
519 if (err)
520 goto regs_fail;
521
522 spin_lock_init(&priv->txlock);
523 spin_lock_init(&priv->rxlock);
524 spin_lock_init(&priv->bflock); 865 spin_lock_init(&priv->bflock);
525 INIT_WORK(&priv->reset_task, gfar_reset_task); 866 INIT_WORK(&priv->reset_task, gfar_reset_task);
526 867
527 dev_set_drvdata(&ofdev->dev, priv); 868 dev_set_drvdata(&ofdev->dev, priv);
869 regs = priv->gfargrp[0].regs;
528 870
529 /* Stop the DMA engine now, in case it was running before */ 871 /* Stop the DMA engine now, in case it was running before */
530 /* (The firmware could have used it, and left it running). */ 872 /* (The firmware could have used it, and left it running). */
531 gfar_halt(dev); 873 gfar_halt(dev);
532 874
533 /* Reset MAC layer */ 875 /* Reset MAC layer */
534 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 876 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
535 877
536 /* We need to delay at least 3 TX clocks */ 878 /* We need to delay at least 3 TX clocks */
537 udelay(2); 879 udelay(2);
538 880
539 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 881 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
540 gfar_write(&priv->regs->maccfg1, tempval); 882 gfar_write(&regs->maccfg1, tempval);
541 883
542 /* Initialize MACCFG2. */ 884 /* Initialize MACCFG2. */
543 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 885 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
544 886
545 /* Initialize ECNTRL */ 887 /* Initialize ECNTRL */
546 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 888 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
547 889
548 /* Set the dev->base_addr to the gfar reg region */ 890 /* Set the dev->base_addr to the gfar reg region */
549 dev->base_addr = (unsigned long) (priv->regs); 891 dev->base_addr = (unsigned long) regs;
550 892
551 SET_NETDEV_DEV(dev, &ofdev->dev); 893 SET_NETDEV_DEV(dev, &ofdev->dev);
552 894
553 /* Fill in the dev structure */ 895 /* Fill in the dev structure */
554 dev->watchdog_timeo = TX_TIMEOUT; 896 dev->watchdog_timeo = TX_TIMEOUT;
555 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
556 dev->mtu = 1500; 897 dev->mtu = 1500;
557
558 dev->netdev_ops = &gfar_netdev_ops; 898 dev->netdev_ops = &gfar_netdev_ops;
559 dev->ethtool_ops = &gfar_ethtool_ops; 899 dev->ethtool_ops = &gfar_ethtool_ops;
560 900
901 /* Register for napi ...We are registering NAPI for each grp */
902 for (i = 0; i < priv->num_grps; i++)
903 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
904
561 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 905 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
562 priv->rx_csum_enable = 1; 906 priv->rx_csum_enable = 1;
563 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 907 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -573,35 +917,35 @@ static int gfar_probe(struct of_device *ofdev,
573 priv->extended_hash = 1; 917 priv->extended_hash = 1;
574 priv->hash_width = 9; 918 priv->hash_width = 9;
575 919
576 priv->hash_regs[0] = &priv->regs->igaddr0; 920 priv->hash_regs[0] = &regs->igaddr0;
577 priv->hash_regs[1] = &priv->regs->igaddr1; 921 priv->hash_regs[1] = &regs->igaddr1;
578 priv->hash_regs[2] = &priv->regs->igaddr2; 922 priv->hash_regs[2] = &regs->igaddr2;
579 priv->hash_regs[3] = &priv->regs->igaddr3; 923 priv->hash_regs[3] = &regs->igaddr3;
580 priv->hash_regs[4] = &priv->regs->igaddr4; 924 priv->hash_regs[4] = &regs->igaddr4;
581 priv->hash_regs[5] = &priv->regs->igaddr5; 925 priv->hash_regs[5] = &regs->igaddr5;
582 priv->hash_regs[6] = &priv->regs->igaddr6; 926 priv->hash_regs[6] = &regs->igaddr6;
583 priv->hash_regs[7] = &priv->regs->igaddr7; 927 priv->hash_regs[7] = &regs->igaddr7;
584 priv->hash_regs[8] = &priv->regs->gaddr0; 928 priv->hash_regs[8] = &regs->gaddr0;
585 priv->hash_regs[9] = &priv->regs->gaddr1; 929 priv->hash_regs[9] = &regs->gaddr1;
586 priv->hash_regs[10] = &priv->regs->gaddr2; 930 priv->hash_regs[10] = &regs->gaddr2;
587 priv->hash_regs[11] = &priv->regs->gaddr3; 931 priv->hash_regs[11] = &regs->gaddr3;
588 priv->hash_regs[12] = &priv->regs->gaddr4; 932 priv->hash_regs[12] = &regs->gaddr4;
589 priv->hash_regs[13] = &priv->regs->gaddr5; 933 priv->hash_regs[13] = &regs->gaddr5;
590 priv->hash_regs[14] = &priv->regs->gaddr6; 934 priv->hash_regs[14] = &regs->gaddr6;
591 priv->hash_regs[15] = &priv->regs->gaddr7; 935 priv->hash_regs[15] = &regs->gaddr7;
592 936
593 } else { 937 } else {
594 priv->extended_hash = 0; 938 priv->extended_hash = 0;
595 priv->hash_width = 8; 939 priv->hash_width = 8;
596 940
597 priv->hash_regs[0] = &priv->regs->gaddr0; 941 priv->hash_regs[0] = &regs->gaddr0;
598 priv->hash_regs[1] = &priv->regs->gaddr1; 942 priv->hash_regs[1] = &regs->gaddr1;
599 priv->hash_regs[2] = &priv->regs->gaddr2; 943 priv->hash_regs[2] = &regs->gaddr2;
600 priv->hash_regs[3] = &priv->regs->gaddr3; 944 priv->hash_regs[3] = &regs->gaddr3;
601 priv->hash_regs[4] = &priv->regs->gaddr4; 945 priv->hash_regs[4] = &regs->gaddr4;
602 priv->hash_regs[5] = &priv->regs->gaddr5; 946 priv->hash_regs[5] = &regs->gaddr5;
603 priv->hash_regs[6] = &priv->regs->gaddr6; 947 priv->hash_regs[6] = &regs->gaddr6;
604 priv->hash_regs[7] = &priv->regs->gaddr7; 948 priv->hash_regs[7] = &regs->gaddr7;
605 } 949 }
606 950
607 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 951 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
@@ -612,15 +956,70 @@ static int gfar_probe(struct of_device *ofdev,
612 if (dev->features & NETIF_F_IP_CSUM) 956 if (dev->features & NETIF_F_IP_CSUM)
613 dev->hard_header_len += GMAC_FCB_LEN; 957 dev->hard_header_len += GMAC_FCB_LEN;
614 958
959 /* Program the isrg regs only if number of grps > 1 */
960 if (priv->num_grps > 1) {
961 baddr = &regs->isrg0;
962 for (i = 0; i < priv->num_grps; i++) {
963 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
964 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
965 gfar_write(baddr, isrg);
966 baddr++;
967 isrg = 0x0;
968 }
969 }
970
971 /* Need to reverse the bit maps as bit_map's MSB is q0
972 * but, for_each_bit parses from right to left, which
973 * basically reverses the queue numbers */
974 for (i = 0; i< priv->num_grps; i++) {
975 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
976 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
977 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
978 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
979 }
980
981 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
982 * also assign queues to groups */
983 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
984 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
985 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
986 priv->num_rx_queues) {
987 priv->gfargrp[grp_idx].num_rx_queues++;
988 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
989 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
990 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
991 }
992 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
993 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
994 priv->num_tx_queues) {
995 priv->gfargrp[grp_idx].num_tx_queues++;
996 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
997 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
998 tqueue = tqueue | (TQUEUE_EN0 >> i);
999 }
1000 priv->gfargrp[grp_idx].rstat = rstat;
1001 priv->gfargrp[grp_idx].tstat = tstat;
1002 rstat = tstat =0;
1003 }
1004
1005 gfar_write(&regs->rqueue, rqueue);
1006 gfar_write(&regs->tqueue, tqueue);
1007
615 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1008 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
616 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
617 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
618 priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
619 1009
620 priv->txcoalescing = DEFAULT_TX_COALESCE; 1010 /* Initializing some of the rx/tx queue level parameters */
621 priv->txic = DEFAULT_TXIC; 1011 for (i = 0; i < priv->num_tx_queues; i++) {
622 priv->rxcoalescing = DEFAULT_RX_COALESCE; 1012 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
623 priv->rxic = DEFAULT_RXIC; 1013 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1014 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1015 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1016 }
1017
1018 for (i = 0; i < priv->num_rx_queues; i++) {
1019 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1020 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1021 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1022 }
624 1023
625 /* Enable most messages by default */ 1024 /* Enable most messages by default */
626 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1025 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -641,20 +1040,43 @@ static int gfar_probe(struct of_device *ofdev,
641 1040
642 /* fill out IRQ number and name fields */ 1041 /* fill out IRQ number and name fields */
643 len_devname = strlen(dev->name); 1042 len_devname = strlen(dev->name);
644 strncpy(&priv->int_name_tx[0], dev->name, len_devname); 1043 for (i = 0; i < priv->num_grps; i++) {
645 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1044 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
646 strncpy(&priv->int_name_tx[len_devname], 1045 len_devname);
647 "_tx", sizeof("_tx") + 1); 1046 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
648 1047 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
649 strncpy(&priv->int_name_rx[0], dev->name, len_devname); 1048 "_g", sizeof("_g"));
650 strncpy(&priv->int_name_rx[len_devname], 1049 priv->gfargrp[i].int_name_tx[
651 "_rx", sizeof("_rx") + 1); 1050 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1051 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1052 priv->gfargrp[i].int_name_tx)],
1053 "_tx", sizeof("_tx") + 1);
1054
1055 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1056 len_devname);
1057 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1058 "_g", sizeof("_g"));
1059 priv->gfargrp[i].int_name_rx[
1060 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1061 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1062 priv->gfargrp[i].int_name_rx)],
1063 "_rx", sizeof("_rx") + 1);
1064
1065 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1066 len_devname);
1067 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1068 "_g", sizeof("_g"));
1069 priv->gfargrp[i].int_name_er[strlen(
1070 priv->gfargrp[i].int_name_er)] = i+48;
1071 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1072 priv->gfargrp[i].int_name_er)],
1073 "_er", sizeof("_er") + 1);
1074 } else
1075 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1076 }
652 1077
653 strncpy(&priv->int_name_er[0], dev->name, len_devname); 1078 /* Initialize the filer table */
654 strncpy(&priv->int_name_er[len_devname], 1079 gfar_init_filer_table(priv);
655 "_er", sizeof("_er") + 1);
656 } else
657 priv->int_name_tx[len_devname] = '\0';
658 1080
659 /* Create all the sysfs files */ 1081 /* Create all the sysfs files */
660 gfar_init_sysfs(dev); 1082 gfar_init_sysfs(dev);
@@ -665,14 +1087,19 @@ static int gfar_probe(struct of_device *ofdev,
665 /* Even more device info helps when determining which kernel */ 1087 /* Even more device info helps when determining which kernel */
666 /* provided which set of benchmarks. */ 1088 /* provided which set of benchmarks. */
667 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1089 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
668 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 1090 for (i = 0; i < priv->num_rx_queues; i++)
669 dev->name, priv->rx_ring_size, priv->tx_ring_size); 1091 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
1092 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1093 for(i = 0; i < priv->num_tx_queues; i++)
1094 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
1095 dev->name, i, priv->tx_queue[i]->tx_ring_size);
670 1096
671 return 0; 1097 return 0;
672 1098
673register_fail: 1099register_fail:
674 iounmap(priv->regs); 1100 unmap_group_regs(priv);
675regs_fail: 1101 free_tx_pointers(priv);
1102 free_rx_pointers(priv);
676 if (priv->phy_node) 1103 if (priv->phy_node)
677 of_node_put(priv->phy_node); 1104 of_node_put(priv->phy_node);
678 if (priv->tbi_node) 1105 if (priv->tbi_node)
@@ -693,7 +1120,7 @@ static int gfar_remove(struct of_device *ofdev)
693 dev_set_drvdata(&ofdev->dev, NULL); 1120 dev_set_drvdata(&ofdev->dev, NULL);
694 1121
695 unregister_netdev(priv->ndev); 1122 unregister_netdev(priv->ndev);
696 iounmap(priv->regs); 1123 unmap_group_regs(priv);
697 free_netdev(priv->ndev); 1124 free_netdev(priv->ndev);
698 1125
699 return 0; 1126 return 0;
@@ -705,6 +1132,7 @@ static int gfar_suspend(struct device *dev)
705{ 1132{
706 struct gfar_private *priv = dev_get_drvdata(dev); 1133 struct gfar_private *priv = dev_get_drvdata(dev);
707 struct net_device *ndev = priv->ndev; 1134 struct net_device *ndev = priv->ndev;
1135 struct gfar __iomem *regs = priv->gfargrp[0].regs;
708 unsigned long flags; 1136 unsigned long flags;
709 u32 tempval; 1137 u32 tempval;
710 1138
@@ -714,34 +1142,37 @@ static int gfar_suspend(struct device *dev)
714 netif_device_detach(ndev); 1142 netif_device_detach(ndev);
715 1143
716 if (netif_running(ndev)) { 1144 if (netif_running(ndev)) {
717 spin_lock_irqsave(&priv->txlock, flags); 1145
718 spin_lock(&priv->rxlock); 1146 local_irq_save(flags);
1147 lock_tx_qs(priv);
1148 lock_rx_qs(priv);
719 1149
720 gfar_halt_nodisable(ndev); 1150 gfar_halt_nodisable(ndev);
721 1151
722 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1152 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
723 tempval = gfar_read(&priv->regs->maccfg1); 1153 tempval = gfar_read(&regs->maccfg1);
724 1154
725 tempval &= ~MACCFG1_TX_EN; 1155 tempval &= ~MACCFG1_TX_EN;
726 1156
727 if (!magic_packet) 1157 if (!magic_packet)
728 tempval &= ~MACCFG1_RX_EN; 1158 tempval &= ~MACCFG1_RX_EN;
729 1159
730 gfar_write(&priv->regs->maccfg1, tempval); 1160 gfar_write(&regs->maccfg1, tempval);
731 1161
732 spin_unlock(&priv->rxlock); 1162 unlock_rx_qs(priv);
733 spin_unlock_irqrestore(&priv->txlock, flags); 1163 unlock_tx_qs(priv);
1164 local_irq_restore(flags);
734 1165
735 napi_disable(&priv->napi); 1166 disable_napi(priv);
736 1167
737 if (magic_packet) { 1168 if (magic_packet) {
738 /* Enable interrupt on Magic Packet */ 1169 /* Enable interrupt on Magic Packet */
739 gfar_write(&priv->regs->imask, IMASK_MAG); 1170 gfar_write(&regs->imask, IMASK_MAG);
740 1171
741 /* Enable Magic Packet mode */ 1172 /* Enable Magic Packet mode */
742 tempval = gfar_read(&priv->regs->maccfg2); 1173 tempval = gfar_read(&regs->maccfg2);
743 tempval |= MACCFG2_MPEN; 1174 tempval |= MACCFG2_MPEN;
744 gfar_write(&priv->regs->maccfg2, tempval); 1175 gfar_write(&regs->maccfg2, tempval);
745 } else { 1176 } else {
746 phy_stop(priv->phydev); 1177 phy_stop(priv->phydev);
747 } 1178 }
@@ -754,6 +1185,7 @@ static int gfar_resume(struct device *dev)
754{ 1185{
755 struct gfar_private *priv = dev_get_drvdata(dev); 1186 struct gfar_private *priv = dev_get_drvdata(dev);
756 struct net_device *ndev = priv->ndev; 1187 struct net_device *ndev = priv->ndev;
1188 struct gfar __iomem *regs = priv->gfargrp[0].regs;
757 unsigned long flags; 1189 unsigned long flags;
758 u32 tempval; 1190 u32 tempval;
759 int magic_packet = priv->wol_en && 1191 int magic_packet = priv->wol_en &&
@@ -770,22 +1202,23 @@ static int gfar_resume(struct device *dev)
770 /* Disable Magic Packet mode, in case something 1202 /* Disable Magic Packet mode, in case something
771 * else woke us up. 1203 * else woke us up.
772 */ 1204 */
1205 local_irq_save(flags);
1206 lock_tx_qs(priv);
1207 lock_rx_qs(priv);
773 1208
774 spin_lock_irqsave(&priv->txlock, flags); 1209 tempval = gfar_read(&regs->maccfg2);
775 spin_lock(&priv->rxlock);
776
777 tempval = gfar_read(&priv->regs->maccfg2);
778 tempval &= ~MACCFG2_MPEN; 1210 tempval &= ~MACCFG2_MPEN;
779 gfar_write(&priv->regs->maccfg2, tempval); 1211 gfar_write(&regs->maccfg2, tempval);
780 1212
781 gfar_start(ndev); 1213 gfar_start(ndev);
782 1214
783 spin_unlock(&priv->rxlock); 1215 unlock_rx_qs(priv);
784 spin_unlock_irqrestore(&priv->txlock, flags); 1216 unlock_tx_qs(priv);
1217 local_irq_restore(flags);
785 1218
786 netif_device_attach(ndev); 1219 netif_device_attach(ndev);
787 1220
788 napi_enable(&priv->napi); 1221 enable_napi(priv);
789 1222
790 return 0; 1223 return 0;
791} 1224}
@@ -812,7 +1245,7 @@ static int gfar_restore(struct device *dev)
812 phy_start(priv->phydev); 1245 phy_start(priv->phydev);
813 1246
814 netif_device_attach(ndev); 1247 netif_device_attach(ndev);
815 napi_enable(&priv->napi); 1248 napi_enable(&priv->gfargrp.napi);
816 1249
817 return 0; 1250 return 0;
818} 1251}
@@ -851,7 +1284,10 @@ static int gfar_legacy_resume(struct of_device *ofdev)
851static phy_interface_t gfar_get_interface(struct net_device *dev) 1284static phy_interface_t gfar_get_interface(struct net_device *dev)
852{ 1285{
853 struct gfar_private *priv = netdev_priv(dev); 1286 struct gfar_private *priv = netdev_priv(dev);
854 u32 ecntrl = gfar_read(&priv->regs->ecntrl); 1287 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1288 u32 ecntrl;
1289
1290 ecntrl = gfar_read(&regs->ecntrl);
855 1291
856 if (ecntrl & ECNTRL_SGMII_MODE) 1292 if (ecntrl & ECNTRL_SGMII_MODE)
857 return PHY_INTERFACE_MODE_SGMII; 1293 return PHY_INTERFACE_MODE_SGMII;
@@ -973,46 +1409,52 @@ static void gfar_configure_serdes(struct net_device *dev)
973static void init_registers(struct net_device *dev) 1409static void init_registers(struct net_device *dev)
974{ 1410{
975 struct gfar_private *priv = netdev_priv(dev); 1411 struct gfar_private *priv = netdev_priv(dev);
1412 struct gfar __iomem *regs = NULL;
1413 int i = 0;
976 1414
977 /* Clear IEVENT */ 1415 for (i = 0; i < priv->num_grps; i++) {
978 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 1416 regs = priv->gfargrp[i].regs;
1417 /* Clear IEVENT */
1418 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
979 1419
980 /* Initialize IMASK */ 1420 /* Initialize IMASK */
981 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 1421 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1422 }
982 1423
1424 regs = priv->gfargrp[0].regs;
983 /* Init hash registers to zero */ 1425 /* Init hash registers to zero */
984 gfar_write(&priv->regs->igaddr0, 0); 1426 gfar_write(&regs->igaddr0, 0);
985 gfar_write(&priv->regs->igaddr1, 0); 1427 gfar_write(&regs->igaddr1, 0);
986 gfar_write(&priv->regs->igaddr2, 0); 1428 gfar_write(&regs->igaddr2, 0);
987 gfar_write(&priv->regs->igaddr3, 0); 1429 gfar_write(&regs->igaddr3, 0);
988 gfar_write(&priv->regs->igaddr4, 0); 1430 gfar_write(&regs->igaddr4, 0);
989 gfar_write(&priv->regs->igaddr5, 0); 1431 gfar_write(&regs->igaddr5, 0);
990 gfar_write(&priv->regs->igaddr6, 0); 1432 gfar_write(&regs->igaddr6, 0);
991 gfar_write(&priv->regs->igaddr7, 0); 1433 gfar_write(&regs->igaddr7, 0);
992 1434
993 gfar_write(&priv->regs->gaddr0, 0); 1435 gfar_write(&regs->gaddr0, 0);
994 gfar_write(&priv->regs->gaddr1, 0); 1436 gfar_write(&regs->gaddr1, 0);
995 gfar_write(&priv->regs->gaddr2, 0); 1437 gfar_write(&regs->gaddr2, 0);
996 gfar_write(&priv->regs->gaddr3, 0); 1438 gfar_write(&regs->gaddr3, 0);
997 gfar_write(&priv->regs->gaddr4, 0); 1439 gfar_write(&regs->gaddr4, 0);
998 gfar_write(&priv->regs->gaddr5, 0); 1440 gfar_write(&regs->gaddr5, 0);
999 gfar_write(&priv->regs->gaddr6, 0); 1441 gfar_write(&regs->gaddr6, 0);
1000 gfar_write(&priv->regs->gaddr7, 0); 1442 gfar_write(&regs->gaddr7, 0);
1001 1443
1002 /* Zero out the rmon mib registers if it has them */ 1444 /* Zero out the rmon mib registers if it has them */
1003 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1445 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1004 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 1446 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1005 1447
1006 /* Mask off the CAM interrupts */ 1448 /* Mask off the CAM interrupts */
1007 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 1449 gfar_write(&regs->rmon.cam1, 0xffffffff);
1008 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 1450 gfar_write(&regs->rmon.cam2, 0xffffffff);
1009 } 1451 }
1010 1452
1011 /* Initialize the max receive buffer length */ 1453 /* Initialize the max receive buffer length */
1012 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1454 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1013 1455
1014 /* Initialize the Minimum Frame Length Register */ 1456 /* Initialize the Minimum Frame Length Register */
1015 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 1457 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1016} 1458}
1017 1459
1018 1460
@@ -1020,23 +1462,28 @@ static void init_registers(struct net_device *dev)
1020static void gfar_halt_nodisable(struct net_device *dev) 1462static void gfar_halt_nodisable(struct net_device *dev)
1021{ 1463{
1022 struct gfar_private *priv = netdev_priv(dev); 1464 struct gfar_private *priv = netdev_priv(dev);
1023 struct gfar __iomem *regs = priv->regs; 1465 struct gfar __iomem *regs = NULL;
1024 u32 tempval; 1466 u32 tempval;
1467 int i = 0;
1025 1468
1026 /* Mask all interrupts */ 1469 for (i = 0; i < priv->num_grps; i++) {
1027 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1470 regs = priv->gfargrp[i].regs;
1471 /* Mask all interrupts */
1472 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1028 1473
1029 /* Clear all interrupts */ 1474 /* Clear all interrupts */
1030 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1475 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1476 }
1031 1477
1478 regs = priv->gfargrp[0].regs;
1032 /* Stop the DMA, and wait for it to stop */ 1479 /* Stop the DMA, and wait for it to stop */
1033 tempval = gfar_read(&priv->regs->dmactrl); 1480 tempval = gfar_read(&regs->dmactrl);
1034 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1481 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1035 != (DMACTRL_GRS | DMACTRL_GTS)) { 1482 != (DMACTRL_GRS | DMACTRL_GTS)) {
1036 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1483 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1037 gfar_write(&priv->regs->dmactrl, tempval); 1484 gfar_write(&regs->dmactrl, tempval);
1038 1485
1039 while (!(gfar_read(&priv->regs->ievent) & 1486 while (!(gfar_read(&regs->ievent) &
1040 (IEVENT_GRSC | IEVENT_GTSC))) 1487 (IEVENT_GRSC | IEVENT_GTSC)))
1041 cpu_relax(); 1488 cpu_relax();
1042 } 1489 }
@@ -1046,7 +1493,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
1046void gfar_halt(struct net_device *dev) 1493void gfar_halt(struct net_device *dev)
1047{ 1494{
1048 struct gfar_private *priv = netdev_priv(dev); 1495 struct gfar_private *priv = netdev_priv(dev);
1049 struct gfar __iomem *regs = priv->regs; 1496 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1050 u32 tempval; 1497 u32 tempval;
1051 1498
1052 gfar_halt_nodisable(dev); 1499 gfar_halt_nodisable(dev);
@@ -1057,101 +1504,131 @@ void gfar_halt(struct net_device *dev)
1057 gfar_write(&regs->maccfg1, tempval); 1504 gfar_write(&regs->maccfg1, tempval);
1058} 1505}
1059 1506
1507static void free_grp_irqs(struct gfar_priv_grp *grp)
1508{
1509 free_irq(grp->interruptError, grp);
1510 free_irq(grp->interruptTransmit, grp);
1511 free_irq(grp->interruptReceive, grp);
1512}
1513
1060void stop_gfar(struct net_device *dev) 1514void stop_gfar(struct net_device *dev)
1061{ 1515{
1062 struct gfar_private *priv = netdev_priv(dev); 1516 struct gfar_private *priv = netdev_priv(dev);
1063 unsigned long flags; 1517 unsigned long flags;
1518 int i;
1064 1519
1065 phy_stop(priv->phydev); 1520 phy_stop(priv->phydev);
1066 1521
1522
1067 /* Lock it down */ 1523 /* Lock it down */
1068 spin_lock_irqsave(&priv->txlock, flags); 1524 local_irq_save(flags);
1069 spin_lock(&priv->rxlock); 1525 lock_tx_qs(priv);
1526 lock_rx_qs(priv);
1070 1527
1071 gfar_halt(dev); 1528 gfar_halt(dev);
1072 1529
1073 spin_unlock(&priv->rxlock); 1530 unlock_rx_qs(priv);
1074 spin_unlock_irqrestore(&priv->txlock, flags); 1531 unlock_tx_qs(priv);
1532 local_irq_restore(flags);
1075 1533
1076 /* Free the IRQs */ 1534 /* Free the IRQs */
1077 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1535 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1078 free_irq(priv->interruptError, dev); 1536 for (i = 0; i < priv->num_grps; i++)
1079 free_irq(priv->interruptTransmit, dev); 1537 free_grp_irqs(&priv->gfargrp[i]);
1080 free_irq(priv->interruptReceive, dev);
1081 } else { 1538 } else {
1082 free_irq(priv->interruptTransmit, dev); 1539 for (i = 0; i < priv->num_grps; i++)
1540 free_irq(priv->gfargrp[i].interruptTransmit,
1541 &priv->gfargrp[i]);
1083 } 1542 }
1084 1543
1085 free_skb_resources(priv); 1544 free_skb_resources(priv);
1086} 1545}
1087 1546
1088/* If there are any tx skbs or rx skbs still around, free them. 1547static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1089 * Then free tx_skbuff and rx_skbuff */
1090static void free_skb_resources(struct gfar_private *priv)
1091{ 1548{
1092 struct device *dev = &priv->ofdev->dev;
1093 struct rxbd8 *rxbdp;
1094 struct txbd8 *txbdp; 1549 struct txbd8 *txbdp;
1550 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1095 int i, j; 1551 int i, j;
1096 1552
1097 /* Go through all the buffer descriptors and free their data buffers */ 1553 txbdp = tx_queue->tx_bd_base;
1098 txbdp = priv->tx_bd_base;
1099
1100 if (!priv->tx_skbuff)
1101 goto skip_tx_skbuff;
1102 1554
1103 for (i = 0; i < priv->tx_ring_size; i++) { 1555 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1104 if (!priv->tx_skbuff[i]) 1556 if (!tx_queue->tx_skbuff[i])
1105 continue; 1557 continue;
1106 1558
1107 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1559 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1108 txbdp->length, DMA_TO_DEVICE); 1560 txbdp->length, DMA_TO_DEVICE);
1109 txbdp->lstatus = 0; 1561 txbdp->lstatus = 0;
1110 for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { 1562 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1563 j++) {
1111 txbdp++; 1564 txbdp++;
1112 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1565 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1113 txbdp->length, DMA_TO_DEVICE); 1566 txbdp->length, DMA_TO_DEVICE);
1114 } 1567 }
1115 txbdp++; 1568 txbdp++;
1116 dev_kfree_skb_any(priv->tx_skbuff[i]); 1569 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1117 priv->tx_skbuff[i] = NULL; 1570 tx_queue->tx_skbuff[i] = NULL;
1118 } 1571 }
1572 kfree(tx_queue->tx_skbuff);
1573}
1119 1574
1120 kfree(priv->tx_skbuff); 1575static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1121skip_tx_skbuff: 1576{
1122 1577 struct rxbd8 *rxbdp;
1123 rxbdp = priv->rx_bd_base; 1578 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1579 int i;
1124 1580
1125 if (!priv->rx_skbuff) 1581 rxbdp = rx_queue->rx_bd_base;
1126 goto skip_rx_skbuff;
1127 1582
1128 for (i = 0; i < priv->rx_ring_size; i++) { 1583 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1129 if (priv->rx_skbuff[i]) { 1584 if (rx_queue->rx_skbuff[i]) {
1130 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr, 1585 dma_unmap_single(&priv->ofdev->dev,
1131 priv->rx_buffer_size, 1586 rxbdp->bufPtr, priv->rx_buffer_size,
1132 DMA_FROM_DEVICE); 1587 DMA_FROM_DEVICE);
1133 dev_kfree_skb_any(priv->rx_skbuff[i]); 1588 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1134 priv->rx_skbuff[i] = NULL; 1589 rx_queue->rx_skbuff[i] = NULL;
1135 } 1590 }
1136
1137 rxbdp->lstatus = 0; 1591 rxbdp->lstatus = 0;
1138 rxbdp->bufPtr = 0; 1592 rxbdp->bufPtr = 0;
1139 rxbdp++; 1593 rxbdp++;
1140 } 1594 }
1595 kfree(rx_queue->rx_skbuff);
1596}
1597
1598/* If there are any tx skbs or rx skbs still around, free them.
1599 * Then free tx_skbuff and rx_skbuff */
1600static void free_skb_resources(struct gfar_private *priv)
1601{
1602 struct gfar_priv_tx_q *tx_queue = NULL;
1603 struct gfar_priv_rx_q *rx_queue = NULL;
1604 int i;
1605
1606 /* Go through all the buffer descriptors and free their data buffers */
1607 for (i = 0; i < priv->num_tx_queues; i++) {
1608 tx_queue = priv->tx_queue[i];
1609 if(!tx_queue->tx_skbuff)
1610 free_skb_tx_queue(tx_queue);
1611 }
1141 1612
1142 kfree(priv->rx_skbuff); 1613 for (i = 0; i < priv->num_rx_queues; i++) {
1143skip_rx_skbuff: 1614 rx_queue = priv->rx_queue[i];
1615 if(!rx_queue->rx_skbuff)
1616 free_skb_rx_queue(rx_queue);
1617 }
1144 1618
1145 dma_free_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size + 1619 dma_free_coherent(&priv->ofdev->dev,
1146 sizeof(*rxbdp) * priv->rx_ring_size, 1620 sizeof(struct txbd8) * priv->total_tx_ring_size +
1147 priv->tx_bd_base, priv->tx_bd_dma_base); 1621 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1622 priv->tx_queue[0]->tx_bd_base,
1623 priv->tx_queue[0]->tx_bd_dma_base);
1148} 1624}
1149 1625
1150void gfar_start(struct net_device *dev) 1626void gfar_start(struct net_device *dev)
1151{ 1627{
1152 struct gfar_private *priv = netdev_priv(dev); 1628 struct gfar_private *priv = netdev_priv(dev);
1153 struct gfar __iomem *regs = priv->regs; 1629 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1154 u32 tempval; 1630 u32 tempval;
1631 int i = 0;
1155 1632
1156 /* Enable Rx and Tx in MACCFG1 */ 1633 /* Enable Rx and Tx in MACCFG1 */
1157 tempval = gfar_read(&regs->maccfg1); 1634 tempval = gfar_read(&regs->maccfg1);
@@ -1159,94 +1636,158 @@ void gfar_start(struct net_device *dev)
1159 gfar_write(&regs->maccfg1, tempval); 1636 gfar_write(&regs->maccfg1, tempval);
1160 1637
1161 /* Initialize DMACTRL to have WWR and WOP */ 1638 /* Initialize DMACTRL to have WWR and WOP */
1162 tempval = gfar_read(&priv->regs->dmactrl); 1639 tempval = gfar_read(&regs->dmactrl);
1163 tempval |= DMACTRL_INIT_SETTINGS; 1640 tempval |= DMACTRL_INIT_SETTINGS;
1164 gfar_write(&priv->regs->dmactrl, tempval); 1641 gfar_write(&regs->dmactrl, tempval);
1165 1642
1166 /* Make sure we aren't stopped */ 1643 /* Make sure we aren't stopped */
1167 tempval = gfar_read(&priv->regs->dmactrl); 1644 tempval = gfar_read(&regs->dmactrl);
1168 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1645 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1169 gfar_write(&priv->regs->dmactrl, tempval); 1646 gfar_write(&regs->dmactrl, tempval);
1170 1647
1171 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1648 for (i = 0; i < priv->num_grps; i++) {
1172 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 1649 regs = priv->gfargrp[i].regs;
1173 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 1650 /* Clear THLT/RHLT, so that the DMA starts polling now */
1174 1651 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1175 /* Unmask the interrupts we look for */ 1652 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1176 gfar_write(&regs->imask, IMASK_DEFAULT); 1653 /* Unmask the interrupts we look for */
1654 gfar_write(&regs->imask, IMASK_DEFAULT);
1655 }
1177 1656
1178 dev->trans_start = jiffies; 1657 dev->trans_start = jiffies;
1179} 1658}
1180 1659
1181/* Bring the controller up and running */ 1660void gfar_configure_coalescing(struct gfar_private *priv,
1182int startup_gfar(struct net_device *ndev) 1661 unsigned int tx_mask, unsigned int rx_mask)
1183{ 1662{
1184 struct gfar_private *priv = netdev_priv(ndev); 1663 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1185 struct gfar __iomem *regs = priv->regs; 1664 u32 *baddr;
1186 int err; 1665 int i = 0;
1187 1666
1188 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1667 /* Backward compatible case ---- even if we enable
1668 * multiple queues, there's only single reg to program
1669 */
1670 gfar_write(&regs->txic, 0);
1671 if(likely(priv->tx_queue[0]->txcoalescing))
1672 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1189 1673
1190 err = gfar_alloc_skb_resources(ndev); 1674 gfar_write(&regs->rxic, 0);
1191 if (err) 1675 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1192 return err; 1676 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1677
1678 if (priv->mode == MQ_MG_MODE) {
1679 baddr = &regs->txic0;
1680 for_each_bit (i, &tx_mask, priv->num_tx_queues) {
1681 if (likely(priv->tx_queue[i]->txcoalescing)) {
1682 gfar_write(baddr + i, 0);
1683 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1684 }
1685 }
1193 1686
1194 gfar_init_mac(ndev); 1687 baddr = &regs->rxic0;
1688 for_each_bit (i, &rx_mask, priv->num_rx_queues) {
1689 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1690 gfar_write(baddr + i, 0);
1691 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1692 }
1693 }
1694 }
1695}
1696
1697static int register_grp_irqs(struct gfar_priv_grp *grp)
1698{
1699 struct gfar_private *priv = grp->priv;
1700 struct net_device *dev = priv->ndev;
1701 int err;
1195 1702
1196 /* If the device has multiple interrupts, register for 1703 /* If the device has multiple interrupts, register for
1197 * them. Otherwise, only register for the one */ 1704 * them. Otherwise, only register for the one */
1198 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1705 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1199 /* Install our interrupt handlers for Error, 1706 /* Install our interrupt handlers for Error,
1200 * Transmit, and Receive */ 1707 * Transmit, and Receive */
1201 err = request_irq(priv->interruptError, gfar_error, 0, 1708 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1202 priv->int_name_er, ndev); 1709 grp->int_name_er,grp)) < 0) {
1203 if (err) {
1204 if (netif_msg_intr(priv)) 1710 if (netif_msg_intr(priv))
1205 pr_err("%s: Can't get IRQ %d\n", ndev->name, 1711 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1206 priv->interruptError); 1712 dev->name, grp->interruptError);
1207 goto err_irq_fail; 1713
1714 goto err_irq_fail;
1208 } 1715 }
1209 1716
1210 err = request_irq(priv->interruptTransmit, gfar_transmit, 0, 1717 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1211 priv->int_name_tx, ndev); 1718 0, grp->int_name_tx, grp)) < 0) {
1212 if (err) {
1213 if (netif_msg_intr(priv)) 1719 if (netif_msg_intr(priv))
1214 pr_err("%s: Can't get IRQ %d\n", ndev->name, 1720 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1215 priv->interruptTransmit); 1721 dev->name, grp->interruptTransmit);
1216 goto tx_irq_fail; 1722 goto tx_irq_fail;
1217 } 1723 }
1218 1724
1219 err = request_irq(priv->interruptReceive, gfar_receive, 0, 1725 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1220 priv->int_name_rx, ndev); 1726 grp->int_name_rx, grp)) < 0) {
1221 if (err) {
1222 if (netif_msg_intr(priv)) 1727 if (netif_msg_intr(priv))
1223 pr_err("%s: Can't get IRQ %d (receive0)\n", 1728 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1224 ndev->name, priv->interruptReceive); 1729 dev->name, grp->interruptReceive);
1225 goto rx_irq_fail; 1730 goto rx_irq_fail;
1226 } 1731 }
1227 } else { 1732 } else {
1228 err = request_irq(priv->interruptTransmit, gfar_interrupt, 1733 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1229 0, priv->int_name_tx, ndev); 1734 grp->int_name_tx, grp)) < 0) {
1230 if (err) {
1231 if (netif_msg_intr(priv)) 1735 if (netif_msg_intr(priv))
1232 pr_err("%s: Can't get IRQ %d\n", ndev->name, 1736 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1233 priv->interruptTransmit); 1737 dev->name, grp->interruptTransmit);
1234 goto err_irq_fail; 1738 goto err_irq_fail;
1235 } 1739 }
1236 } 1740 }
1237 1741
1742 return 0;
1743
1744rx_irq_fail:
1745 free_irq(grp->interruptTransmit, grp);
1746tx_irq_fail:
1747 free_irq(grp->interruptError, grp);
1748err_irq_fail:
1749 return err;
1750
1751}
1752
1753/* Bring the controller up and running */
1754int startup_gfar(struct net_device *ndev)
1755{
1756 struct gfar_private *priv = netdev_priv(ndev);
1757 struct gfar __iomem *regs = NULL;
1758 int err, i, j;
1759
1760 for (i = 0; i < priv->num_grps; i++) {
1761 regs= priv->gfargrp[i].regs;
1762 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1763 }
1764
1765 regs= priv->gfargrp[0].regs;
1766 err = gfar_alloc_skb_resources(ndev);
1767 if (err)
1768 return err;
1769
1770 gfar_init_mac(ndev);
1771
1772 for (i = 0; i < priv->num_grps; i++) {
1773 err = register_grp_irqs(&priv->gfargrp[i]);
1774 if (err) {
1775 for (j = 0; j < i; j++)
1776 free_grp_irqs(&priv->gfargrp[j]);
1777 goto irq_fail;
1778 }
1779 }
1780
1238 /* Start the controller */ 1781 /* Start the controller */
1239 gfar_start(ndev); 1782 gfar_start(ndev);
1240 1783
1241 phy_start(priv->phydev); 1784 phy_start(priv->phydev);
1242 1785
1786 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1787
1243 return 0; 1788 return 0;
1244 1789
1245rx_irq_fail: 1790irq_fail:
1246 free_irq(priv->interruptTransmit, ndev);
1247tx_irq_fail:
1248 free_irq(priv->interruptError, ndev);
1249err_irq_fail:
1250 free_skb_resources(priv); 1791 free_skb_resources(priv);
1251 return err; 1792 return err;
1252} 1793}
@@ -1258,7 +1799,7 @@ static int gfar_enet_open(struct net_device *dev)
1258 struct gfar_private *priv = netdev_priv(dev); 1799 struct gfar_private *priv = netdev_priv(dev);
1259 int err; 1800 int err;
1260 1801
1261 napi_enable(&priv->napi); 1802 enable_napi(priv);
1262 1803
1263 skb_queue_head_init(&priv->rx_recycle); 1804 skb_queue_head_init(&priv->rx_recycle);
1264 1805
@@ -1269,18 +1810,18 @@ static int gfar_enet_open(struct net_device *dev)
1269 1810
1270 err = init_phy(dev); 1811 err = init_phy(dev);
1271 1812
1272 if(err) { 1813 if (err) {
1273 napi_disable(&priv->napi); 1814 disable_napi(priv);
1274 return err; 1815 return err;
1275 } 1816 }
1276 1817
1277 err = startup_gfar(dev); 1818 err = startup_gfar(dev);
1278 if (err) { 1819 if (err) {
1279 napi_disable(&priv->napi); 1820 disable_napi(priv);
1280 return err; 1821 return err;
1281 } 1822 }
1282 1823
1283 netif_start_queue(dev); 1824 netif_tx_start_all_queues(dev);
1284 1825
1285 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1826 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1286 1827
@@ -1349,15 +1890,23 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1349static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1890static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1350{ 1891{
1351 struct gfar_private *priv = netdev_priv(dev); 1892 struct gfar_private *priv = netdev_priv(dev);
1893 struct gfar_priv_tx_q *tx_queue = NULL;
1894 struct netdev_queue *txq;
1895 struct gfar __iomem *regs = NULL;
1352 struct txfcb *fcb = NULL; 1896 struct txfcb *fcb = NULL;
1353 struct txbd8 *txbdp, *txbdp_start, *base; 1897 struct txbd8 *txbdp, *txbdp_start, *base;
1354 u32 lstatus; 1898 u32 lstatus;
1355 int i; 1899 int i, rq = 0;
1356 u32 bufaddr; 1900 u32 bufaddr;
1357 unsigned long flags; 1901 unsigned long flags;
1358 unsigned int nr_frags, length; 1902 unsigned int nr_frags, length;
1359 1903
1360 base = priv->tx_bd_base; 1904
1905 rq = skb->queue_mapping;
1906 tx_queue = priv->tx_queue[rq];
1907 txq = netdev_get_tx_queue(dev, rq);
1908 base = tx_queue->tx_bd_base;
1909 regs = tx_queue->grp->regs;
1361 1910
1362 /* make space for additional header when fcb is needed */ 1911 /* make space for additional header when fcb is needed */
1363 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1912 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1378,21 +1927,21 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1378 /* total number of fragments in the SKB */ 1927 /* total number of fragments in the SKB */
1379 nr_frags = skb_shinfo(skb)->nr_frags; 1928 nr_frags = skb_shinfo(skb)->nr_frags;
1380 1929
1381 spin_lock_irqsave(&priv->txlock, flags); 1930 spin_lock_irqsave(&tx_queue->txlock, flags);
1382 1931
1383 /* check if there is space to queue this packet */ 1932 /* check if there is space to queue this packet */
1384 if ((nr_frags+1) > priv->num_txbdfree) { 1933 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1385 /* no space, stop the queue */ 1934 /* no space, stop the queue */
1386 netif_stop_queue(dev); 1935 netif_tx_stop_queue(txq);
1387 dev->stats.tx_fifo_errors++; 1936 dev->stats.tx_fifo_errors++;
1388 spin_unlock_irqrestore(&priv->txlock, flags); 1937 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1389 return NETDEV_TX_BUSY; 1938 return NETDEV_TX_BUSY;
1390 } 1939 }
1391 1940
1392 /* Update transmit stats */ 1941 /* Update transmit stats */
1393 dev->stats.tx_bytes += skb->len; 1942 dev->stats.tx_bytes += skb->len;
1394 1943
1395 txbdp = txbdp_start = priv->cur_tx; 1944 txbdp = txbdp_start = tx_queue->cur_tx;
1396 1945
1397 if (nr_frags == 0) { 1946 if (nr_frags == 0) {
1398 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1947 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
@@ -1400,7 +1949,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1400 /* Place the fragment addresses and lengths into the TxBDs */ 1949 /* Place the fragment addresses and lengths into the TxBDs */
1401 for (i = 0; i < nr_frags; i++) { 1950 for (i = 0; i < nr_frags; i++) {
1402 /* Point at the next BD, wrapping as needed */ 1951 /* Point at the next BD, wrapping as needed */
1403 txbdp = next_txbd(txbdp, base, priv->tx_ring_size); 1952 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1404 1953
1405 length = skb_shinfo(skb)->frags[i].size; 1954 length = skb_shinfo(skb)->frags[i].size;
1406 1955
@@ -1442,7 +1991,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1442 } 1991 }
1443 1992
1444 /* setup the TxBD length and buffer pointer for the first BD */ 1993 /* setup the TxBD length and buffer pointer for the first BD */
1445 priv->tx_skbuff[priv->skb_curtx] = skb; 1994 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1446 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 1995 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1447 skb_headlen(skb), DMA_TO_DEVICE); 1996 skb_headlen(skb), DMA_TO_DEVICE);
1448 1997
@@ -1462,29 +2011,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1462 2011
1463 /* Update the current skb pointer to the next entry we will use 2012 /* Update the current skb pointer to the next entry we will use
1464 * (wrapping if necessary) */ 2013 * (wrapping if necessary) */
1465 priv->skb_curtx = (priv->skb_curtx + 1) & 2014 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1466 TX_RING_MOD_MASK(priv->tx_ring_size); 2015 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1467 2016
1468 priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); 2017 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1469 2018
1470 /* reduce TxBD free count */ 2019 /* reduce TxBD free count */
1471 priv->num_txbdfree -= (nr_frags + 1); 2020 tx_queue->num_txbdfree -= (nr_frags + 1);
1472 2021
1473 dev->trans_start = jiffies; 2022 dev->trans_start = jiffies;
1474 2023
1475 /* If the next BD still needs to be cleaned up, then the bds 2024 /* If the next BD still needs to be cleaned up, then the bds
1476 are full. We need to tell the kernel to stop sending us stuff. */ 2025 are full. We need to tell the kernel to stop sending us stuff. */
1477 if (!priv->num_txbdfree) { 2026 if (!tx_queue->num_txbdfree) {
1478 netif_stop_queue(dev); 2027 netif_tx_stop_queue(txq);
1479 2028
1480 dev->stats.tx_fifo_errors++; 2029 dev->stats.tx_fifo_errors++;
1481 } 2030 }
1482 2031
1483 /* Tell the DMA to go go go */ 2032 /* Tell the DMA to go go go */
1484 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2033 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1485 2034
1486 /* Unlock priv */ 2035 /* Unlock priv */
1487 spin_unlock_irqrestore(&priv->txlock, flags); 2036 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1488 2037
1489 return NETDEV_TX_OK; 2038 return NETDEV_TX_OK;
1490} 2039}
@@ -1494,7 +2043,7 @@ static int gfar_close(struct net_device *dev)
1494{ 2043{
1495 struct gfar_private *priv = netdev_priv(dev); 2044 struct gfar_private *priv = netdev_priv(dev);
1496 2045
1497 napi_disable(&priv->napi); 2046 disable_napi(priv);
1498 2047
1499 skb_queue_purge(&priv->rx_recycle); 2048 skb_queue_purge(&priv->rx_recycle);
1500 cancel_work_sync(&priv->reset_task); 2049 cancel_work_sync(&priv->reset_task);
@@ -1504,7 +2053,7 @@ static int gfar_close(struct net_device *dev)
1504 phy_disconnect(priv->phydev); 2053 phy_disconnect(priv->phydev);
1505 priv->phydev = NULL; 2054 priv->phydev = NULL;
1506 2055
1507 netif_stop_queue(dev); 2056 netif_tx_stop_all_queues(dev);
1508 2057
1509 return 0; 2058 return 0;
1510} 2059}
@@ -1523,50 +2072,55 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1523 struct vlan_group *grp) 2072 struct vlan_group *grp)
1524{ 2073{
1525 struct gfar_private *priv = netdev_priv(dev); 2074 struct gfar_private *priv = netdev_priv(dev);
2075 struct gfar __iomem *regs = NULL;
1526 unsigned long flags; 2076 unsigned long flags;
1527 u32 tempval; 2077 u32 tempval;
1528 2078
1529 spin_lock_irqsave(&priv->rxlock, flags); 2079 regs = priv->gfargrp[0].regs;
2080 local_irq_save(flags);
2081 lock_rx_qs(priv);
1530 2082
1531 priv->vlgrp = grp; 2083 priv->vlgrp = grp;
1532 2084
1533 if (grp) { 2085 if (grp) {
1534 /* Enable VLAN tag insertion */ 2086 /* Enable VLAN tag insertion */
1535 tempval = gfar_read(&priv->regs->tctrl); 2087 tempval = gfar_read(&regs->tctrl);
1536 tempval |= TCTRL_VLINS; 2088 tempval |= TCTRL_VLINS;
1537 2089
1538 gfar_write(&priv->regs->tctrl, tempval); 2090 gfar_write(&regs->tctrl, tempval);
1539 2091
1540 /* Enable VLAN tag extraction */ 2092 /* Enable VLAN tag extraction */
1541 tempval = gfar_read(&priv->regs->rctrl); 2093 tempval = gfar_read(&regs->rctrl);
1542 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2094 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1543 gfar_write(&priv->regs->rctrl, tempval); 2095 gfar_write(&regs->rctrl, tempval);
1544 } else { 2096 } else {
1545 /* Disable VLAN tag insertion */ 2097 /* Disable VLAN tag insertion */
1546 tempval = gfar_read(&priv->regs->tctrl); 2098 tempval = gfar_read(&regs->tctrl);
1547 tempval &= ~TCTRL_VLINS; 2099 tempval &= ~TCTRL_VLINS;
1548 gfar_write(&priv->regs->tctrl, tempval); 2100 gfar_write(&regs->tctrl, tempval);
1549 2101
1550 /* Disable VLAN tag extraction */ 2102 /* Disable VLAN tag extraction */
1551 tempval = gfar_read(&priv->regs->rctrl); 2103 tempval = gfar_read(&regs->rctrl);
1552 tempval &= ~RCTRL_VLEX; 2104 tempval &= ~RCTRL_VLEX;
1553 /* If parse is no longer required, then disable parser */ 2105 /* If parse is no longer required, then disable parser */
1554 if (tempval & RCTRL_REQ_PARSER) 2106 if (tempval & RCTRL_REQ_PARSER)
1555 tempval |= RCTRL_PRSDEP_INIT; 2107 tempval |= RCTRL_PRSDEP_INIT;
1556 else 2108 else
1557 tempval &= ~RCTRL_PRSDEP_INIT; 2109 tempval &= ~RCTRL_PRSDEP_INIT;
1558 gfar_write(&priv->regs->rctrl, tempval); 2110 gfar_write(&regs->rctrl, tempval);
1559 } 2111 }
1560 2112
1561 gfar_change_mtu(dev, dev->mtu); 2113 gfar_change_mtu(dev, dev->mtu);
1562 2114
1563 spin_unlock_irqrestore(&priv->rxlock, flags); 2115 unlock_rx_qs(priv);
2116 local_irq_restore(flags);
1564} 2117}
1565 2118
1566static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2119static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1567{ 2120{
1568 int tempsize, tempval; 2121 int tempsize, tempval;
1569 struct gfar_private *priv = netdev_priv(dev); 2122 struct gfar_private *priv = netdev_priv(dev);
2123 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1570 int oldsize = priv->rx_buffer_size; 2124 int oldsize = priv->rx_buffer_size;
1571 int frame_size = new_mtu + ETH_HLEN; 2125 int frame_size = new_mtu + ETH_HLEN;
1572 2126
@@ -1598,20 +2152,20 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1598 2152
1599 dev->mtu = new_mtu; 2153 dev->mtu = new_mtu;
1600 2154
1601 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 2155 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1602 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 2156 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1603 2157
1604 /* If the mtu is larger than the max size for standard 2158 /* If the mtu is larger than the max size for standard
1605 * ethernet frames (ie, a jumbo frame), then set maccfg2 2159 * ethernet frames (ie, a jumbo frame), then set maccfg2
1606 * to allow huge frames, and to check the length */ 2160 * to allow huge frames, and to check the length */
1607 tempval = gfar_read(&priv->regs->maccfg2); 2161 tempval = gfar_read(&regs->maccfg2);
1608 2162
1609 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 2163 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1610 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2164 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1611 else 2165 else
1612 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2166 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1613 2167
1614 gfar_write(&priv->regs->maccfg2, tempval); 2168 gfar_write(&regs->maccfg2, tempval);
1615 2169
1616 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2170 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1617 startup_gfar(dev); 2171 startup_gfar(dev);
@@ -1631,10 +2185,10 @@ static void gfar_reset_task(struct work_struct *work)
1631 struct net_device *dev = priv->ndev; 2185 struct net_device *dev = priv->ndev;
1632 2186
1633 if (dev->flags & IFF_UP) { 2187 if (dev->flags & IFF_UP) {
1634 netif_stop_queue(dev); 2188 netif_tx_stop_all_queues(dev);
1635 stop_gfar(dev); 2189 stop_gfar(dev);
1636 startup_gfar(dev); 2190 startup_gfar(dev);
1637 netif_start_queue(dev); 2191 netif_tx_start_all_queues(dev);
1638 } 2192 }
1639 2193
1640 netif_tx_schedule_all(dev); 2194 netif_tx_schedule_all(dev);
@@ -1649,24 +2203,27 @@ static void gfar_timeout(struct net_device *dev)
1649} 2203}
1650 2204
1651/* Interrupt Handler for Transmit complete */ 2205/* Interrupt Handler for Transmit complete */
1652static int gfar_clean_tx_ring(struct net_device *dev) 2206static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1653{ 2207{
2208 struct net_device *dev = tx_queue->dev;
1654 struct gfar_private *priv = netdev_priv(dev); 2209 struct gfar_private *priv = netdev_priv(dev);
2210 struct gfar_priv_rx_q *rx_queue = NULL;
1655 struct txbd8 *bdp; 2211 struct txbd8 *bdp;
1656 struct txbd8 *lbdp = NULL; 2212 struct txbd8 *lbdp = NULL;
1657 struct txbd8 *base = priv->tx_bd_base; 2213 struct txbd8 *base = tx_queue->tx_bd_base;
1658 struct sk_buff *skb; 2214 struct sk_buff *skb;
1659 int skb_dirtytx; 2215 int skb_dirtytx;
1660 int tx_ring_size = priv->tx_ring_size; 2216 int tx_ring_size = tx_queue->tx_ring_size;
1661 int frags = 0; 2217 int frags = 0;
1662 int i; 2218 int i;
1663 int howmany = 0; 2219 int howmany = 0;
1664 u32 lstatus; 2220 u32 lstatus;
1665 2221
1666 bdp = priv->dirty_tx; 2222 rx_queue = priv->rx_queue[tx_queue->qindex];
1667 skb_dirtytx = priv->skb_dirtytx; 2223 bdp = tx_queue->dirty_tx;
2224 skb_dirtytx = tx_queue->skb_dirtytx;
1668 2225
1669 while ((skb = priv->tx_skbuff[skb_dirtytx])) { 2226 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
1670 frags = skb_shinfo(skb)->nr_frags; 2227 frags = skb_shinfo(skb)->nr_frags;
1671 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2228 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1672 2229
@@ -1698,74 +2255,71 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1698 * If there's room in the queue (limit it to rx_buffer_size) 2255 * If there's room in the queue (limit it to rx_buffer_size)
1699 * we add this skb back into the pool, if it's the right size 2256 * we add this skb back into the pool, if it's the right size
1700 */ 2257 */
1701 if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && 2258 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
1702 skb_recycle_check(skb, priv->rx_buffer_size + 2259 skb_recycle_check(skb, priv->rx_buffer_size +
1703 RXBUF_ALIGNMENT)) 2260 RXBUF_ALIGNMENT))
1704 __skb_queue_head(&priv->rx_recycle, skb); 2261 __skb_queue_head(&priv->rx_recycle, skb);
1705 else 2262 else
1706 dev_kfree_skb_any(skb); 2263 dev_kfree_skb_any(skb);
1707 2264
1708 priv->tx_skbuff[skb_dirtytx] = NULL; 2265 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
1709 2266
1710 skb_dirtytx = (skb_dirtytx + 1) & 2267 skb_dirtytx = (skb_dirtytx + 1) &
1711 TX_RING_MOD_MASK(tx_ring_size); 2268 TX_RING_MOD_MASK(tx_ring_size);
1712 2269
1713 howmany++; 2270 howmany++;
1714 priv->num_txbdfree += frags + 1; 2271 tx_queue->num_txbdfree += frags + 1;
1715 } 2272 }
1716 2273
1717 /* If we freed a buffer, we can restart transmission, if necessary */ 2274 /* If we freed a buffer, we can restart transmission, if necessary */
1718 if (netif_queue_stopped(dev) && priv->num_txbdfree) 2275 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
1719 netif_wake_queue(dev); 2276 netif_wake_subqueue(dev, tx_queue->qindex);
1720 2277
1721 /* Update dirty indicators */ 2278 /* Update dirty indicators */
1722 priv->skb_dirtytx = skb_dirtytx; 2279 tx_queue->skb_dirtytx = skb_dirtytx;
1723 priv->dirty_tx = bdp; 2280 tx_queue->dirty_tx = bdp;
1724 2281
1725 dev->stats.tx_packets += howmany; 2282 dev->stats.tx_packets += howmany;
1726 2283
1727 return howmany; 2284 return howmany;
1728} 2285}
1729 2286
1730static void gfar_schedule_cleanup(struct net_device *dev) 2287static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
1731{ 2288{
1732 struct gfar_private *priv = netdev_priv(dev);
1733 unsigned long flags; 2289 unsigned long flags;
1734 2290
1735 spin_lock_irqsave(&priv->txlock, flags); 2291 spin_lock_irqsave(&gfargrp->grplock, flags);
1736 spin_lock(&priv->rxlock); 2292 if (napi_schedule_prep(&gfargrp->napi)) {
1737 2293 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
1738 if (napi_schedule_prep(&priv->napi)) { 2294 __napi_schedule(&gfargrp->napi);
1739 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1740 __napi_schedule(&priv->napi);
1741 } else { 2295 } else {
1742 /* 2296 /*
1743 * Clear IEVENT, so interrupts aren't called again 2297 * Clear IEVENT, so interrupts aren't called again
1744 * because of the packets that have already arrived. 2298 * because of the packets that have already arrived.
1745 */ 2299 */
1746 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2300 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
1747 } 2301 }
2302 spin_unlock_irqrestore(&gfargrp->grplock, flags);
1748 2303
1749 spin_unlock(&priv->rxlock);
1750 spin_unlock_irqrestore(&priv->txlock, flags);
1751} 2304}
1752 2305
1753/* Interrupt Handler for Transmit complete */ 2306/* Interrupt Handler for Transmit complete */
1754static irqreturn_t gfar_transmit(int irq, void *dev_id) 2307static irqreturn_t gfar_transmit(int irq, void *grp_id)
1755{ 2308{
1756 gfar_schedule_cleanup((struct net_device *)dev_id); 2309 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1757 return IRQ_HANDLED; 2310 return IRQ_HANDLED;
1758} 2311}
1759 2312
1760static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 2313static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
1761 struct sk_buff *skb) 2314 struct sk_buff *skb)
1762{ 2315{
2316 struct net_device *dev = rx_queue->dev;
1763 struct gfar_private *priv = netdev_priv(dev); 2317 struct gfar_private *priv = netdev_priv(dev);
1764 dma_addr_t buf; 2318 dma_addr_t buf;
1765 2319
1766 buf = dma_map_single(&priv->ofdev->dev, skb->data, 2320 buf = dma_map_single(&priv->ofdev->dev, skb->data,
1767 priv->rx_buffer_size, DMA_FROM_DEVICE); 2321 priv->rx_buffer_size, DMA_FROM_DEVICE);
1768 gfar_init_rxbdp(dev, bdp, buf); 2322 gfar_init_rxbdp(rx_queue, bdp, buf);
1769} 2323}
1770 2324
1771 2325
@@ -1832,9 +2386,9 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
1832 } 2386 }
1833} 2387}
1834 2388
1835irqreturn_t gfar_receive(int irq, void *dev_id) 2389irqreturn_t gfar_receive(int irq, void *grp_id)
1836{ 2390{
1837 gfar_schedule_cleanup((struct net_device *)dev_id); 2391 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1838 return IRQ_HANDLED; 2392 return IRQ_HANDLED;
1839} 2393}
1840 2394
@@ -1864,6 +2418,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1864 fcb = (struct rxfcb *)skb->data; 2418 fcb = (struct rxfcb *)skb->data;
1865 2419
1866 /* Remove the FCB from the skb */ 2420 /* Remove the FCB from the skb */
2421 skb_set_queue_mapping(skb, fcb->rq);
1867 /* Remove the padded bytes, if there are any */ 2422 /* Remove the padded bytes, if there are any */
1868 if (amount_pull) 2423 if (amount_pull)
1869 skb_pull(skb, amount_pull); 2424 skb_pull(skb, amount_pull);
@@ -1890,8 +2445,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1890 * until the budget/quota has been reached. Returns the number 2445 * until the budget/quota has been reached. Returns the number
1891 * of frames handled 2446 * of frames handled
1892 */ 2447 */
1893int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 2448int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
1894{ 2449{
2450 struct net_device *dev = rx_queue->dev;
1895 struct rxbd8 *bdp, *base; 2451 struct rxbd8 *bdp, *base;
1896 struct sk_buff *skb; 2452 struct sk_buff *skb;
1897 int pkt_len; 2453 int pkt_len;
@@ -1900,8 +2456,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1900 struct gfar_private *priv = netdev_priv(dev); 2456 struct gfar_private *priv = netdev_priv(dev);
1901 2457
1902 /* Get the first full descriptor */ 2458 /* Get the first full descriptor */
1903 bdp = priv->cur_rx; 2459 bdp = rx_queue->cur_rx;
1904 base = priv->rx_bd_base; 2460 base = rx_queue->rx_bd_base;
1905 2461
1906 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 2462 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1907 priv->padding; 2463 priv->padding;
@@ -1913,7 +2469,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1913 /* Add another skb for the future */ 2469 /* Add another skb for the future */
1914 newskb = gfar_new_skb(dev); 2470 newskb = gfar_new_skb(dev);
1915 2471
1916 skb = priv->rx_skbuff[priv->skb_currx]; 2472 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
1917 2473
1918 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2474 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
1919 priv->rx_buffer_size, DMA_FROM_DEVICE); 2475 priv->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1961,45 +2517,76 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1961 2517
1962 } 2518 }
1963 2519
1964 priv->rx_skbuff[priv->skb_currx] = newskb; 2520 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
1965 2521
1966 /* Setup the new bdp */ 2522 /* Setup the new bdp */
1967 gfar_new_rxbdp(dev, bdp, newskb); 2523 gfar_new_rxbdp(rx_queue, bdp, newskb);
1968 2524
1969 /* Update to the next pointer */ 2525 /* Update to the next pointer */
1970 bdp = next_bd(bdp, base, priv->rx_ring_size); 2526 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
1971 2527
1972 /* update to point at the next skb */ 2528 /* update to point at the next skb */
1973 priv->skb_currx = 2529 rx_queue->skb_currx =
1974 (priv->skb_currx + 1) & 2530 (rx_queue->skb_currx + 1) &
1975 RX_RING_MOD_MASK(priv->rx_ring_size); 2531 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
1976 } 2532 }
1977 2533
1978 /* Update the current rxbd pointer to be the next one */ 2534 /* Update the current rxbd pointer to be the next one */
1979 priv->cur_rx = bdp; 2535 rx_queue->cur_rx = bdp;
1980 2536
1981 return howmany; 2537 return howmany;
1982} 2538}
1983 2539
1984static int gfar_poll(struct napi_struct *napi, int budget) 2540static int gfar_poll(struct napi_struct *napi, int budget)
1985{ 2541{
1986 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 2542 struct gfar_priv_grp *gfargrp = container_of(napi,
1987 struct net_device *dev = priv->ndev; 2543 struct gfar_priv_grp, napi);
1988 int tx_cleaned = 0; 2544 struct gfar_private *priv = gfargrp->priv;
1989 int rx_cleaned = 0; 2545 struct gfar __iomem *regs = gfargrp->regs;
2546 struct gfar_priv_tx_q *tx_queue = NULL;
2547 struct gfar_priv_rx_q *rx_queue = NULL;
2548 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2549 int tx_cleaned = 0, i, left_over_budget = budget, serviced_queues = 0;
2550 int num_queues = 0;
1990 unsigned long flags; 2551 unsigned long flags;
1991 2552
2553 num_queues = gfargrp->num_rx_queues;
2554 budget_per_queue = budget/num_queues;
2555
1992 /* Clear IEVENT, so interrupts aren't called again 2556 /* Clear IEVENT, so interrupts aren't called again
1993 * because of the packets that have already arrived */ 2557 * because of the packets that have already arrived */
1994 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2558 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
1995 2559
1996 /* If we fail to get the lock, don't bother with the TX BDs */ 2560 while (num_queues && left_over_budget) {
1997 if (spin_trylock_irqsave(&priv->txlock, flags)) {
1998 tx_cleaned = gfar_clean_tx_ring(dev);
1999 spin_unlock_irqrestore(&priv->txlock, flags);
2000 }
2001 2561
2002 rx_cleaned = gfar_clean_rx_ring(dev, budget); 2562 budget_per_queue = left_over_budget/num_queues;
2563 left_over_budget = 0;
2564
2565 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2566 if (test_bit(i, &serviced_queues))
2567 continue;
2568 rx_queue = priv->rx_queue[i];
2569 tx_queue = priv->tx_queue[rx_queue->qindex];
2570
2571 /* If we fail to get the lock,
2572 * don't bother with the TX BDs */
2573 if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
2574 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2575 spin_unlock_irqrestore(&tx_queue->txlock,
2576 flags);
2577 }
2578
2579 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2580 budget_per_queue);
2581 rx_cleaned += rx_cleaned_per_queue;
2582 if(rx_cleaned_per_queue < budget_per_queue) {
2583 left_over_budget = left_over_budget +
2584 (budget_per_queue - rx_cleaned_per_queue);
2585 set_bit(i, &serviced_queues);
2586 num_queues--;
2587 }
2588 }
2589 }
2003 2590
2004 if (tx_cleaned) 2591 if (tx_cleaned)
2005 return budget; 2592 return budget;
@@ -2008,20 +2595,14 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2008 napi_complete(napi); 2595 napi_complete(napi);
2009 2596
2010 /* Clear the halt bit in RSTAT */ 2597 /* Clear the halt bit in RSTAT */
2011 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 2598 gfar_write(&regs->rstat, gfargrp->rstat);
2012 2599
2013 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 2600 gfar_write(&regs->imask, IMASK_DEFAULT);
2014 2601
2015 /* If we are coalescing interrupts, update the timer */ 2602 /* If we are coalescing interrupts, update the timer */
2016 /* Otherwise, clear it */ 2603 /* Otherwise, clear it */
2017 if (likely(priv->rxcoalescing)) { 2604 gfar_configure_coalescing(priv,
2018 gfar_write(&priv->regs->rxic, 0); 2605 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
2019 gfar_write(&priv->regs->rxic, priv->rxic);
2020 }
2021 if (likely(priv->txcoalescing)) {
2022 gfar_write(&priv->regs->txic, 0);
2023 gfar_write(&priv->regs->txic, priv->txic);
2024 }
2025 } 2606 }
2026 2607
2027 return rx_cleaned; 2608 return rx_cleaned;
@@ -2036,44 +2617,49 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2036static void gfar_netpoll(struct net_device *dev) 2617static void gfar_netpoll(struct net_device *dev)
2037{ 2618{
2038 struct gfar_private *priv = netdev_priv(dev); 2619 struct gfar_private *priv = netdev_priv(dev);
2620 int i = 0;
2039 2621
2040 /* If the device has multiple interrupts, run tx/rx */ 2622 /* If the device has multiple interrupts, run tx/rx */
2041 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2623 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2042 disable_irq(priv->interruptTransmit); 2624 for (i = 0; i < priv->num_grps; i++) {
2043 disable_irq(priv->interruptReceive); 2625 disable_irq(priv->gfargrp[i].interruptTransmit);
2044 disable_irq(priv->interruptError); 2626 disable_irq(priv->gfargrp[i].interruptReceive);
2045 gfar_interrupt(priv->interruptTransmit, dev); 2627 disable_irq(priv->gfargrp[i].interruptError);
2046 enable_irq(priv->interruptError); 2628 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2047 enable_irq(priv->interruptReceive); 2629 &priv->gfargrp[i]);
2048 enable_irq(priv->interruptTransmit); 2630 enable_irq(priv->gfargrp[i].interruptError);
2631 enable_irq(priv->gfargrp[i].interruptReceive);
2632 enable_irq(priv->gfargrp[i].interruptTransmit);
2633 }
2049 } else { 2634 } else {
2050 disable_irq(priv->interruptTransmit); 2635 for (i = 0; i < priv->num_grps; i++) {
2051 gfar_interrupt(priv->interruptTransmit, dev); 2636 disable_irq(priv->gfargrp[i].interruptTransmit);
2052 enable_irq(priv->interruptTransmit); 2637 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2638 &priv->gfargrp[i]);
2639 enable_irq(priv->gfargrp[i].interruptTransmit);
2053 } 2640 }
2054} 2641}
2055#endif 2642#endif
2056 2643
2057/* The interrupt handler for devices with one interrupt */ 2644/* The interrupt handler for devices with one interrupt */
2058static irqreturn_t gfar_interrupt(int irq, void *dev_id) 2645static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2059{ 2646{
2060 struct net_device *dev = dev_id; 2647 struct gfar_priv_grp *gfargrp = grp_id;
2061 struct gfar_private *priv = netdev_priv(dev);
2062 2648
2063 /* Save ievent for future reference */ 2649 /* Save ievent for future reference */
2064 u32 events = gfar_read(&priv->regs->ievent); 2650 u32 events = gfar_read(&gfargrp->regs->ievent);
2065 2651
2066 /* Check for reception */ 2652 /* Check for reception */
2067 if (events & IEVENT_RX_MASK) 2653 if (events & IEVENT_RX_MASK)
2068 gfar_receive(irq, dev_id); 2654 gfar_receive(irq, grp_id);
2069 2655
2070 /* Check for transmit completion */ 2656 /* Check for transmit completion */
2071 if (events & IEVENT_TX_MASK) 2657 if (events & IEVENT_TX_MASK)
2072 gfar_transmit(irq, dev_id); 2658 gfar_transmit(irq, grp_id);
2073 2659
2074 /* Check for errors */ 2660 /* Check for errors */
2075 if (events & IEVENT_ERR_MASK) 2661 if (events & IEVENT_ERR_MASK)
2076 gfar_error(irq, dev_id); 2662 gfar_error(irq, grp_id);
2077 2663
2078 return IRQ_HANDLED; 2664 return IRQ_HANDLED;
2079} 2665}
@@ -2087,12 +2673,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
2087static void adjust_link(struct net_device *dev) 2673static void adjust_link(struct net_device *dev)
2088{ 2674{
2089 struct gfar_private *priv = netdev_priv(dev); 2675 struct gfar_private *priv = netdev_priv(dev);
2090 struct gfar __iomem *regs = priv->regs; 2676 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2091 unsigned long flags; 2677 unsigned long flags;
2092 struct phy_device *phydev = priv->phydev; 2678 struct phy_device *phydev = priv->phydev;
2093 int new_state = 0; 2679 int new_state = 0;
2094 2680
2095 spin_lock_irqsave(&priv->txlock, flags); 2681 local_irq_save(flags);
2682 lock_tx_qs(priv);
2683
2096 if (phydev->link) { 2684 if (phydev->link) {
2097 u32 tempval = gfar_read(&regs->maccfg2); 2685 u32 tempval = gfar_read(&regs->maccfg2);
2098 u32 ecntrl = gfar_read(&regs->ecntrl); 2686 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2157,8 +2745,8 @@ static void adjust_link(struct net_device *dev)
2157 2745
2158 if (new_state && netif_msg_link(priv)) 2746 if (new_state && netif_msg_link(priv))
2159 phy_print_status(phydev); 2747 phy_print_status(phydev);
2160 2748 unlock_tx_qs(priv);
2161 spin_unlock_irqrestore(&priv->txlock, flags); 2749 local_irq_restore(flags);
2162} 2750}
2163 2751
2164/* Update the hash table based on the current list of multicast 2752/* Update the hash table based on the current list of multicast
@@ -2169,10 +2757,10 @@ static void gfar_set_multi(struct net_device *dev)
2169{ 2757{
2170 struct dev_mc_list *mc_ptr; 2758 struct dev_mc_list *mc_ptr;
2171 struct gfar_private *priv = netdev_priv(dev); 2759 struct gfar_private *priv = netdev_priv(dev);
2172 struct gfar __iomem *regs = priv->regs; 2760 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2173 u32 tempval; 2761 u32 tempval;
2174 2762
2175 if(dev->flags & IFF_PROMISC) { 2763 if (dev->flags & IFF_PROMISC) {
2176 /* Set RCTRL to PROM */ 2764 /* Set RCTRL to PROM */
2177 tempval = gfar_read(&regs->rctrl); 2765 tempval = gfar_read(&regs->rctrl);
2178 tempval |= RCTRL_PROM; 2766 tempval |= RCTRL_PROM;
@@ -2184,7 +2772,7 @@ static void gfar_set_multi(struct net_device *dev)
2184 gfar_write(&regs->rctrl, tempval); 2772 gfar_write(&regs->rctrl, tempval);
2185 } 2773 }
2186 2774
2187 if(dev->flags & IFF_ALLMULTI) { 2775 if (dev->flags & IFF_ALLMULTI) {
2188 /* Set the hash to rx all multicast frames */ 2776 /* Set the hash to rx all multicast frames */
2189 gfar_write(&regs->igaddr0, 0xffffffff); 2777 gfar_write(&regs->igaddr0, 0xffffffff);
2190 gfar_write(&regs->igaddr1, 0xffffffff); 2778 gfar_write(&regs->igaddr1, 0xffffffff);
@@ -2236,7 +2824,7 @@ static void gfar_set_multi(struct net_device *dev)
2236 em_num = 0; 2824 em_num = 0;
2237 } 2825 }
2238 2826
2239 if(dev->mc_count == 0) 2827 if (dev->mc_count == 0)
2240 return; 2828 return;
2241 2829
2242 /* Parse the list, and set the appropriate bits */ 2830 /* Parse the list, and set the appropriate bits */
@@ -2302,10 +2890,11 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2302static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 2890static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2303{ 2891{
2304 struct gfar_private *priv = netdev_priv(dev); 2892 struct gfar_private *priv = netdev_priv(dev);
2893 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2305 int idx; 2894 int idx;
2306 char tmpbuf[MAC_ADDR_LEN]; 2895 char tmpbuf[MAC_ADDR_LEN];
2307 u32 tempval; 2896 u32 tempval;
2308 u32 __iomem *macptr = &priv->regs->macstnaddr1; 2897 u32 __iomem *macptr = &regs->macstnaddr1;
2309 2898
2310 macptr += num*2; 2899 macptr += num*2;
2311 2900
@@ -2322,16 +2911,18 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2322} 2911}
2323 2912
2324/* GFAR error interrupt handler */ 2913/* GFAR error interrupt handler */
2325static irqreturn_t gfar_error(int irq, void *dev_id) 2914static irqreturn_t gfar_error(int irq, void *grp_id)
2326{ 2915{
2327 struct net_device *dev = dev_id; 2916 struct gfar_priv_grp *gfargrp = grp_id;
2328 struct gfar_private *priv = netdev_priv(dev); 2917 struct gfar __iomem *regs = gfargrp->regs;
2918 struct gfar_private *priv= gfargrp->priv;
2919 struct net_device *dev = priv->ndev;
2329 2920
2330 /* Save ievent for future reference */ 2921 /* Save ievent for future reference */
2331 u32 events = gfar_read(&priv->regs->ievent); 2922 u32 events = gfar_read(&regs->ievent);
2332 2923
2333 /* Clear IEVENT */ 2924 /* Clear IEVENT */
2334 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK); 2925 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
2335 2926
2336 /* Magic Packet is not an error. */ 2927 /* Magic Packet is not an error. */
2337 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 2928 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
@@ -2341,7 +2932,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2341 /* Hmm... */ 2932 /* Hmm... */
2342 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 2933 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2343 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2934 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2344 dev->name, events, gfar_read(&priv->regs->imask)); 2935 dev->name, events, gfar_read(&regs->imask));
2345 2936
2346 /* Update the error counters */ 2937 /* Update the error counters */
2347 if (events & IEVENT_TXE) { 2938 if (events & IEVENT_TXE) {
@@ -2359,7 +2950,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2359 priv->extra_stats.tx_underrun++; 2950 priv->extra_stats.tx_underrun++;
2360 2951
2361 /* Reactivate the Tx Queues */ 2952 /* Reactivate the Tx Queues */
2362 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2953 gfar_write(&regs->tstat, gfargrp->tstat);
2363 } 2954 }
2364 if (netif_msg_tx_err(priv)) 2955 if (netif_msg_tx_err(priv))
2365 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 2956 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
@@ -2368,11 +2959,11 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2368 dev->stats.rx_errors++; 2959 dev->stats.rx_errors++;
2369 priv->extra_stats.rx_bsy++; 2960 priv->extra_stats.rx_bsy++;
2370 2961
2371 gfar_receive(irq, dev_id); 2962 gfar_receive(irq, grp_id);
2372 2963
2373 if (netif_msg_rx_err(priv)) 2964 if (netif_msg_rx_err(priv))
2374 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 2965 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2375 dev->name, gfar_read(&priv->regs->rstat)); 2966 dev->name, gfar_read(&regs->rstat));
2376 } 2967 }
2377 if (events & IEVENT_BABR) { 2968 if (events & IEVENT_BABR) {
2378 dev->stats.rx_errors++; 2969 dev->stats.rx_errors++;
@@ -2397,17 +2988,18 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2397 return IRQ_HANDLED; 2988 return IRQ_HANDLED;
2398} 2989}
2399 2990
2400/* work with hotplug and coldplug */
2401MODULE_ALIAS("platform:fsl-gianfar");
2402
2403static struct of_device_id gfar_match[] = 2991static struct of_device_id gfar_match[] =
2404{ 2992{
2405 { 2993 {
2406 .type = "network", 2994 .type = "network",
2407 .compatible = "gianfar", 2995 .compatible = "gianfar",
2408 }, 2996 },
2997 {
2998 .compatible = "fsl,etsec2",
2999 },
2409 {}, 3000 {},
2410}; 3001};
3002MODULE_DEVICE_TABLE(of, gfar_match);
2411 3003
2412/* Structure for a device driver */ 3004/* Structure for a device driver */
2413static struct of_platform_driver gfar_driver = { 3005static struct of_platform_driver gfar_driver = {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 05732faa2f90..44b63daa7ff3 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -7,8 +7,9 @@
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
10 * 11 *
11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 12 * Copyright 2002-2009 Freescale Semiconductor, Inc.
12 * 13 *
13 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
@@ -74,6 +75,13 @@
74extern const char gfar_driver_name[]; 75extern const char gfar_driver_name[];
75extern const char gfar_driver_version[]; 76extern const char gfar_driver_version[];
76 77
78/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
79#define MAX_TX_QS 0x8
80#define MAX_RX_QS 0x8
81
82/* MAXIMUM NUMBER OF GROUPS SUPPORTED */
83#define MAXGROUPS 0x2
84
77/* These need to be powers of 2 for this driver */ 85/* These need to be powers of 2 for this driver */
78#define DEFAULT_TX_RING_SIZE 256 86#define DEFAULT_TX_RING_SIZE 256
79#define DEFAULT_RX_RING_SIZE 256 87#define DEFAULT_RX_RING_SIZE 256
@@ -171,12 +179,63 @@ extern const char gfar_driver_version[];
171 179
172#define MINFLR_INIT_SETTINGS 0x00000040 180#define MINFLR_INIT_SETTINGS 0x00000040
173 181
182/* Tqueue control */
183#define TQUEUE_EN0 0x00008000
184#define TQUEUE_EN1 0x00004000
185#define TQUEUE_EN2 0x00002000
186#define TQUEUE_EN3 0x00001000
187#define TQUEUE_EN4 0x00000800
188#define TQUEUE_EN5 0x00000400
189#define TQUEUE_EN6 0x00000200
190#define TQUEUE_EN7 0x00000100
191#define TQUEUE_EN_ALL 0x0000FF00
192
193#define TR03WT_WT0_MASK 0xFF000000
194#define TR03WT_WT1_MASK 0x00FF0000
195#define TR03WT_WT2_MASK 0x0000FF00
196#define TR03WT_WT3_MASK 0x000000FF
197
198#define TR47WT_WT4_MASK 0xFF000000
199#define TR47WT_WT5_MASK 0x00FF0000
200#define TR47WT_WT6_MASK 0x0000FF00
201#define TR47WT_WT7_MASK 0x000000FF
202
203/* Rqueue control */
204#define RQUEUE_EX0 0x00800000
205#define RQUEUE_EX1 0x00400000
206#define RQUEUE_EX2 0x00200000
207#define RQUEUE_EX3 0x00100000
208#define RQUEUE_EX4 0x00080000
209#define RQUEUE_EX5 0x00040000
210#define RQUEUE_EX6 0x00020000
211#define RQUEUE_EX7 0x00010000
212#define RQUEUE_EX_ALL 0x00FF0000
213
214#define RQUEUE_EN0 0x00000080
215#define RQUEUE_EN1 0x00000040
216#define RQUEUE_EN2 0x00000020
217#define RQUEUE_EN3 0x00000010
218#define RQUEUE_EN4 0x00000008
219#define RQUEUE_EN5 0x00000004
220#define RQUEUE_EN6 0x00000002
221#define RQUEUE_EN7 0x00000001
222#define RQUEUE_EN_ALL 0x000000FF
223
174/* Init to do tx snooping for buffers and descriptors */ 224/* Init to do tx snooping for buffers and descriptors */
175#define DMACTRL_INIT_SETTINGS 0x000000c3 225#define DMACTRL_INIT_SETTINGS 0x000000c3
176#define DMACTRL_GRS 0x00000010 226#define DMACTRL_GRS 0x00000010
177#define DMACTRL_GTS 0x00000008 227#define DMACTRL_GTS 0x00000008
178 228
179#define TSTAT_CLEAR_THALT 0x80000000 229#define TSTAT_CLEAR_THALT_ALL 0xFF000000
230#define TSTAT_CLEAR_THALT 0x80000000
231#define TSTAT_CLEAR_THALT0 0x80000000
232#define TSTAT_CLEAR_THALT1 0x40000000
233#define TSTAT_CLEAR_THALT2 0x20000000
234#define TSTAT_CLEAR_THALT3 0x10000000
235#define TSTAT_CLEAR_THALT4 0x08000000
236#define TSTAT_CLEAR_THALT5 0x04000000
237#define TSTAT_CLEAR_THALT6 0x02000000
238#define TSTAT_CLEAR_THALT7 0x01000000
180 239
181/* Interrupt coalescing macros */ 240/* Interrupt coalescing macros */
182#define IC_ICEN 0x80000000 241#define IC_ICEN 0x80000000
@@ -227,6 +286,13 @@ extern const char gfar_driver_version[];
227#define TCTRL_IPCSEN 0x00004000 286#define TCTRL_IPCSEN 0x00004000
228#define TCTRL_TUCSEN 0x00002000 287#define TCTRL_TUCSEN 0x00002000
229#define TCTRL_VLINS 0x00001000 288#define TCTRL_VLINS 0x00001000
289#define TCTRL_THDF 0x00000800
290#define TCTRL_RFCPAUSE 0x00000010
291#define TCTRL_TFCPAUSE 0x00000008
292#define TCTRL_TXSCHED_MASK 0x00000006
293#define TCTRL_TXSCHED_INIT 0x00000000
294#define TCTRL_TXSCHED_PRIO 0x00000002
295#define TCTRL_TXSCHED_WRRS 0x00000004
230#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN) 296#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
231 297
232#define IEVENT_INIT_CLEAR 0xffffffff 298#define IEVENT_INIT_CLEAR 0xffffffff
@@ -315,6 +381,84 @@ extern const char gfar_driver_version[];
315#define BD_LFLAG(flags) ((flags) << 16) 381#define BD_LFLAG(flags) ((flags) << 16)
316#define BD_LENGTH_MASK 0x0000ffff 382#define BD_LENGTH_MASK 0x0000ffff
317 383
384#define CLASS_CODE_UNRECOG 0x00
385#define CLASS_CODE_DUMMY1 0x01
386#define CLASS_CODE_ETHERTYPE1 0x02
387#define CLASS_CODE_ETHERTYPE2 0x03
388#define CLASS_CODE_USER_PROG1 0x04
389#define CLASS_CODE_USER_PROG2 0x05
390#define CLASS_CODE_USER_PROG3 0x06
391#define CLASS_CODE_USER_PROG4 0x07
392#define CLASS_CODE_TCP_IPV4 0x08
393#define CLASS_CODE_UDP_IPV4 0x09
394#define CLASS_CODE_AH_ESP_IPV4 0x0a
395#define CLASS_CODE_SCTP_IPV4 0x0b
396#define CLASS_CODE_TCP_IPV6 0x0c
397#define CLASS_CODE_UDP_IPV6 0x0d
398#define CLASS_CODE_AH_ESP_IPV6 0x0e
399#define CLASS_CODE_SCTP_IPV6 0x0f
400
401#define FPR_FILER_MASK 0xFFFFFFFF
402#define MAX_FILER_IDX 0xFF
403
404/* RQFCR register bits */
405#define RQFCR_GPI 0x80000000
406#define RQFCR_HASHTBL_Q 0x00000000
407#define RQFCR_HASHTBL_0 0x00020000
408#define RQFCR_HASHTBL_1 0x00040000
409#define RQFCR_HASHTBL_2 0x00060000
410#define RQFCR_HASHTBL_3 0x00080000
411#define RQFCR_HASH 0x00010000
412#define RQFCR_CLE 0x00000200
413#define RQFCR_RJE 0x00000100
414#define RQFCR_AND 0x00000080
415#define RQFCR_CMP_EXACT 0x00000000
416#define RQFCR_CMP_MATCH 0x00000020
417#define RQFCR_CMP_NOEXACT 0x00000040
418#define RQFCR_CMP_NOMATCH 0x00000060
419
420/* RQFCR PID values */
421#define RQFCR_PID_MASK 0x00000000
422#define RQFCR_PID_PARSE 0x00000001
423#define RQFCR_PID_ARB 0x00000002
424#define RQFCR_PID_DAH 0x00000003
425#define RQFCR_PID_DAL 0x00000004
426#define RQFCR_PID_SAH 0x00000005
427#define RQFCR_PID_SAL 0x00000006
428#define RQFCR_PID_ETY 0x00000007
429#define RQFCR_PID_VID 0x00000008
430#define RQFCR_PID_PRI 0x00000009
431#define RQFCR_PID_TOS 0x0000000A
432#define RQFCR_PID_L4P 0x0000000B
433#define RQFCR_PID_DIA 0x0000000C
434#define RQFCR_PID_SIA 0x0000000D
435#define RQFCR_PID_DPT 0x0000000E
436#define RQFCR_PID_SPT 0x0000000F
437
438/* RQFPR when PID is 0x0001 */
439#define RQFPR_HDR_GE_512 0x00200000
440#define RQFPR_LERR 0x00100000
441#define RQFPR_RAR 0x00080000
442#define RQFPR_RARQ 0x00040000
443#define RQFPR_AR 0x00020000
444#define RQFPR_ARQ 0x00010000
445#define RQFPR_EBC 0x00008000
446#define RQFPR_VLN 0x00004000
447#define RQFPR_CFI 0x00002000
448#define RQFPR_JUM 0x00001000
449#define RQFPR_IPF 0x00000800
450#define RQFPR_FIF 0x00000400
451#define RQFPR_IPV4 0x00000200
452#define RQFPR_IPV6 0x00000100
453#define RQFPR_ICC 0x00000080
454#define RQFPR_ICV 0x00000040
455#define RQFPR_TCP 0x00000020
456#define RQFPR_UDP 0x00000010
457#define RQFPR_TUC 0x00000008
458#define RQFPR_TUV 0x00000004
459#define RQFPR_PER 0x00000002
460#define RQFPR_EER 0x00000001
461
318/* TxBD status field bits */ 462/* TxBD status field bits */
319#define TXBD_READY 0x8000 463#define TXBD_READY 0x8000
320#define TXBD_PADCRC 0x4000 464#define TXBD_PADCRC 0x4000
@@ -503,25 +647,32 @@ struct gfar_stats {
503 647
504struct gfar { 648struct gfar {
505 u32 tsec_id; /* 0x.000 - Controller ID register */ 649 u32 tsec_id; /* 0x.000 - Controller ID register */
506 u8 res1[12]; 650 u32 tsec_id2; /* 0x.004 - Controller ID2 register */
651 u8 res1[8];
507 u32 ievent; /* 0x.010 - Interrupt Event Register */ 652 u32 ievent; /* 0x.010 - Interrupt Event Register */
508 u32 imask; /* 0x.014 - Interrupt Mask Register */ 653 u32 imask; /* 0x.014 - Interrupt Mask Register */
509 u32 edis; /* 0x.018 - Error Disabled Register */ 654 u32 edis; /* 0x.018 - Error Disabled Register */
510 u8 res2[4]; 655 u32 emapg; /* 0x.01c - Group Error mapping register */
511 u32 ecntrl; /* 0x.020 - Ethernet Control Register */ 656 u32 ecntrl; /* 0x.020 - Ethernet Control Register */
512 u32 minflr; /* 0x.024 - Minimum Frame Length Register */ 657 u32 minflr; /* 0x.024 - Minimum Frame Length Register */
513 u32 ptv; /* 0x.028 - Pause Time Value Register */ 658 u32 ptv; /* 0x.028 - Pause Time Value Register */
514 u32 dmactrl; /* 0x.02c - DMA Control Register */ 659 u32 dmactrl; /* 0x.02c - DMA Control Register */
515 u32 tbipa; /* 0x.030 - TBI PHY Address Register */ 660 u32 tbipa; /* 0x.030 - TBI PHY Address Register */
516 u8 res3[88]; 661 u8 res2[28];
662 u32 fifo_rx_pause; /* 0x.050 - FIFO receive pause start threshold
663 register */
664 u32 fifo_rx_pause_shutoff; /* x.054 - FIFO receive starve shutoff
665 register */
666 u32 fifo_rx_alarm; /* 0x.058 - FIFO receive alarm start threshold
667 register */
668 u32 fifo_rx_alarm_shutoff; /*0x.05c - FIFO receive alarm starve
669 shutoff register */
670 u8 res3[44];
517 u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */ 671 u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
518 u8 res4[8]; 672 u8 res4[8];
519 u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */ 673 u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
520 u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */ 674 u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
521 u8 res5[4]; 675 u8 res5[96];
522 u32 fifo_rx_pause; /* 0x.0a4 - FIFO receive pause threshold register */
523 u32 fifo_rx_alarm; /* 0x.0a8 - FIFO receive alarm threshold register */
524 u8 res6[84];
525 u32 tctrl; /* 0x.100 - Transmit Control Register */ 676 u32 tctrl; /* 0x.100 - Transmit Control Register */
526 u32 tstat; /* 0x.104 - Transmit Status Register */ 677 u32 tstat; /* 0x.104 - Transmit Status Register */
527 u32 dfvlan; /* 0x.108 - Default VLAN Control word */ 678 u32 dfvlan; /* 0x.108 - Default VLAN Control word */
@@ -572,7 +723,11 @@ struct gfar {
572 u8 res12[8]; 723 u8 res12[8];
573 u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */ 724 u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
574 u32 rqueue; /* 0x.314 - Receive queue control register */ 725 u32 rqueue; /* 0x.314 - Receive queue control register */
575 u8 res13[24]; 726 u32 rir0; /* 0x.318 - Ring mapping register 0 */
727 u32 rir1; /* 0x.31c - Ring mapping register 1 */
728 u32 rir2; /* 0x.320 - Ring mapping register 2 */
729 u32 rir3; /* 0x.324 - Ring mapping register 3 */
730 u8 res13[8];
576 u32 rbifx; /* 0x.330 - Receive bit field extract control register */ 731 u32 rbifx; /* 0x.330 - Receive bit field extract control register */
577 u32 rqfar; /* 0x.334 - Receive queue filing table address register */ 732 u32 rqfar; /* 0x.334 - Receive queue filing table address register */
578 u32 rqfcr; /* 0x.338 - Receive queue filing table control register */ 733 u32 rqfcr; /* 0x.338 - Receive queue filing table control register */
@@ -621,7 +776,7 @@ struct gfar {
621 u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */ 776 u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
622 u8 res18[12]; 777 u8 res18[12];
623 u8 gfar_mii_regs[24]; /* See gianfar_phy.h */ 778 u8 gfar_mii_regs[24]; /* See gianfar_phy.h */
624 u8 res19[4]; 779 u32 ifctrl; /* 0x.538 - Interface control register */
625 u32 ifstat; /* 0x.53c - Interface Status Register */ 780 u32 ifstat; /* 0x.53c - Interface Status Register */
626 u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */ 781 u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
627 u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */ 782 u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
@@ -682,8 +837,30 @@ struct gfar {
682 u8 res23c[248]; 837 u8 res23c[248];
683 u32 attr; /* 0x.bf8 - Attributes Register */ 838 u32 attr; /* 0x.bf8 - Attributes Register */
684 u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ 839 u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
685 u8 res24[1024]; 840 u8 res24[688];
686 841 u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */
842 u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */
843 u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */
844 u32 isrg3; /* 0x.ebc - Interrupt steering group 3 register */
845 u8 res25[16];
846 u32 rxic0; /* 0x.ed0 - Ring 0 Rx interrupt coalescing */
847 u32 rxic1; /* 0x.ed4 - Ring 1 Rx interrupt coalescing */
848 u32 rxic2; /* 0x.ed8 - Ring 2 Rx interrupt coalescing */
849 u32 rxic3; /* 0x.edc - Ring 3 Rx interrupt coalescing */
850 u32 rxic4; /* 0x.ee0 - Ring 4 Rx interrupt coalescing */
851 u32 rxic5; /* 0x.ee4 - Ring 5 Rx interrupt coalescing */
852 u32 rxic6; /* 0x.ee8 - Ring 6 Rx interrupt coalescing */
853 u32 rxic7; /* 0x.eec - Ring 7 Rx interrupt coalescing */
854 u8 res26[32];
855 u32 txic0; /* 0x.f10 - Ring 0 Tx interrupt coalescing */
856 u32 txic1; /* 0x.f14 - Ring 1 Tx interrupt coalescing */
857 u32 txic2; /* 0x.f18 - Ring 2 Tx interrupt coalescing */
858 u32 txic3; /* 0x.f1c - Ring 3 Tx interrupt coalescing */
859 u32 txic4; /* 0x.f20 - Ring 4 Tx interrupt coalescing */
860 u32 txic5; /* 0x.f24 - Ring 5 Tx interrupt coalescing */
861 u32 txic6; /* 0x.f28 - Ring 6 Tx interrupt coalescing */
862 u32 txic7; /* 0x.f2c - Ring 7 Tx interrupt coalescing */
863 u8 res27[208];
687}; 864};
688 865
689/* Flags related to gianfar device features */ 866/* Flags related to gianfar device features */
@@ -699,6 +876,133 @@ struct gfar {
699#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 876#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
700#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 877#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
701 878
879#if (MAXGROUPS == 2)
880#define DEFAULT_MAPPING 0xAA
881#else
882#define DEFAULT_MAPPING 0xFF
883#endif
884
885#define ISRG_SHIFT_TX 0x10
886#define ISRG_SHIFT_RX 0x18
887
888/* The same driver can operate in two modes */
889/* SQ_SG_MODE: Single Queue Single Group Mode
890 * (Backward compatible mode)
891 * MQ_MG_MODE: Multi Queue Multi Group mode
892 */
893enum {
894 SQ_SG_MODE = 0,
895 MQ_MG_MODE
896};
897
898/**
899 * struct gfar_priv_tx_q - per tx queue structure
900 * @txlock: per queue tx spin lock
901 * @tx_skbuff:skb pointers
902 * @skb_curtx: to be used skb pointer
903 * @skb_dirtytx:the last used skb pointer
904 * @qindex: index of this queue
905 * @dev: back pointer to the dev structure
906 * @grp: back pointer to the group to which this queue belongs
907 * @tx_bd_base: First tx buffer descriptor
908 * @cur_tx: Next free ring entry
909 * @dirty_tx: First buffer in line to be transmitted
910 * @tx_ring_size: Tx ring size
911 * @num_txbdfree: number of free TxBds
912 * @txcoalescing: enable/disable tx coalescing
913 * @txic: transmit interrupt coalescing value
914 * @txcount: coalescing value if based on tx frame count
915 * @txtime: coalescing value if based on time
916 */
917struct gfar_priv_tx_q {
918 spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
919 struct sk_buff ** tx_skbuff;
920 /* Buffer descriptor pointers */
921 dma_addr_t tx_bd_dma_base;
922 struct txbd8 *tx_bd_base;
923 struct txbd8 *cur_tx;
924 struct txbd8 *dirty_tx;
925 struct net_device *dev;
926 struct gfar_priv_grp *grp;
927 u16 skb_curtx;
928 u16 skb_dirtytx;
929 u16 qindex;
930 unsigned int tx_ring_size;
931 unsigned int num_txbdfree;
932 /* Configuration info for the coalescing features */
933 unsigned char txcoalescing;
934 unsigned long txic;
935 unsigned short txcount;
936 unsigned short txtime;
937};
938
939/**
940 * struct gfar_priv_rx_q - per rx queue structure
941 * @rxlock: per queue rx spin lock
942 * @rx_skbuff: skb pointers
943 * @skb_currx: currently use skb pointer
944 * @rx_bd_base: First rx buffer descriptor
945 * @cur_rx: Next free rx ring entry
946 * @qindex: index of this queue
947 * @dev: back pointer to the dev structure
948 * @rx_ring_size: Rx ring size
949 * @rxcoalescing: enable/disable rx-coalescing
950 * @rxic: receive interrupt coalescing vlaue
951 */
952
953struct gfar_priv_rx_q {
954 spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
955 struct sk_buff ** rx_skbuff;
956 dma_addr_t rx_bd_dma_base;
957 struct rxbd8 *rx_bd_base;
958 struct rxbd8 *cur_rx;
959 struct net_device *dev;
960 struct gfar_priv_grp *grp;
961 u16 skb_currx;
962 u16 qindex;
963 unsigned int rx_ring_size;
964 /* RX Coalescing values */
965 unsigned char rxcoalescing;
966 unsigned long rxic;
967};
968
969/**
970 * struct gfar_priv_grp - per group structure
971 * @napi: the napi poll function
972 * @priv: back pointer to the priv structure
973 * @regs: the ioremapped register space for this group
974 * @grp_id: group id for this group
975 * @interruptTransmit: The TX interrupt number for this group
976 * @interruptReceive: The RX interrupt number for this group
977 * @interruptError: The ERROR interrupt number for this group
978 * @int_name_tx: tx interrupt name for this group
979 * @int_name_rx: rx interrupt name for this group
980 * @int_name_er: er interrupt name for this group
981 */
982
983struct gfar_priv_grp {
984 spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
985 struct napi_struct napi;
986 struct gfar_private *priv;
987 struct gfar __iomem *regs;
988 unsigned int grp_id;
989 unsigned int rx_bit_map;
990 unsigned int tx_bit_map;
991 unsigned int num_tx_queues;
992 unsigned int num_rx_queues;
993 unsigned int rstat;
994 unsigned int tstat;
995 unsigned int imask;
996 unsigned int ievent;
997 unsigned int interruptTransmit;
998 unsigned int interruptReceive;
999 unsigned int interruptError;
1000
1001 char int_name_tx[GFAR_INT_NAME_MAX];
1002 char int_name_rx[GFAR_INT_NAME_MAX];
1003 char int_name_er[GFAR_INT_NAME_MAX];
1004};
1005
702/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1006/* Struct stolen almost completely (and shamelessly) from the FCC enet source
703 * (Ok, that's not so true anymore, but there is a family resemblence) 1007 * (Ok, that's not so true anymore, but there is a family resemblence)
704 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base 1008 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -709,63 +1013,36 @@ struct gfar {
709 * the buffer descriptor determines the actual condition. 1013 * the buffer descriptor determines the actual condition.
710 */ 1014 */
711struct gfar_private { 1015struct gfar_private {
712 /* Fields controlled by TX lock */
713 spinlock_t txlock;
714
715 /* Pointer to the array of skbuffs */
716 struct sk_buff ** tx_skbuff;
717
718 /* next free skb in the array */
719 u16 skb_curtx;
720
721 /* First skb in line to be transmitted */
722 u16 skb_dirtytx;
723
724 /* Configuration info for the coalescing features */
725 unsigned char txcoalescing;
726 unsigned long txic;
727 1016
728 /* Buffer descriptor pointers */ 1017 /* Indicates how many tx, rx queues are enabled */
729 dma_addr_t tx_bd_dma_base; 1018 unsigned int num_tx_queues;
730 struct txbd8 *tx_bd_base; /* First tx buffer descriptor */ 1019 unsigned int num_rx_queues;
731 struct txbd8 *cur_tx; /* Next free ring entry */ 1020 unsigned int num_grps;
732 struct txbd8 *dirty_tx; /* First buffer in line 1021 unsigned int mode;
733 to be transmitted */
734 unsigned int tx_ring_size;
735 unsigned int num_txbdfree; /* number of TxBDs free */
736 1022
737 /* RX Locked fields */ 1023 /* The total tx and rx ring size for the enabled queues */
738 spinlock_t rxlock; 1024 unsigned int total_tx_ring_size;
1025 unsigned int total_rx_ring_size;
739 1026
740 struct device_node *node; 1027 struct device_node *node;
741 struct net_device *ndev; 1028 struct net_device *ndev;
742 struct of_device *ofdev; 1029 struct of_device *ofdev;
743 struct napi_struct napi;
744 1030
745 /* skb array and index */ 1031 struct gfar_priv_grp gfargrp[MAXGROUPS];
746 struct sk_buff ** rx_skbuff; 1032 struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
747 u16 skb_currx; 1033 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
748 1034
749 /* RX Coalescing values */ 1035 /* RX per device parameters */
750 unsigned char rxcoalescing;
751 unsigned long rxic;
752
753 struct rxbd8 *rx_bd_base; /* First Rx buffers */
754 struct rxbd8 *cur_rx; /* Next free rx ring entry */
755
756 /* RX parameters */
757 unsigned int rx_ring_size;
758 unsigned int rx_buffer_size; 1036 unsigned int rx_buffer_size;
759 unsigned int rx_stash_size; 1037 unsigned int rx_stash_size;
760 unsigned int rx_stash_index; 1038 unsigned int rx_stash_index;
761 1039
1040 u32 cur_filer_idx;
1041
762 struct sk_buff_head rx_recycle; 1042 struct sk_buff_head rx_recycle;
763 1043
764 struct vlan_group *vlgrp; 1044 struct vlan_group *vlgrp;
765 1045
766 /* Unprotected fields */
767 /* Pointer to the GFAR memory mapped Registers */
768 struct gfar __iomem *regs;
769 1046
770 /* Hash registers and their width */ 1047 /* Hash registers and their width */
771 u32 __iomem *hash_regs[16]; 1048 u32 __iomem *hash_regs[16];
@@ -786,13 +1063,10 @@ struct gfar_private {
786 unsigned char rx_csum_enable:1, 1063 unsigned char rx_csum_enable:1,
787 extended_hash:1, 1064 extended_hash:1,
788 bd_stash_en:1, 1065 bd_stash_en:1,
1066 rx_filer_enable:1,
789 wol_en:1; /* Wake-on-LAN enabled */ 1067 wol_en:1; /* Wake-on-LAN enabled */
790 unsigned short padding; 1068 unsigned short padding;
791 1069
792 unsigned int interruptTransmit;
793 unsigned int interruptReceive;
794 unsigned int interruptError;
795
796 /* PHY stuff */ 1070 /* PHY stuff */
797 struct phy_device *phydev; 1071 struct phy_device *phydev;
798 struct mii_bus *mii_bus; 1072 struct mii_bus *mii_bus;
@@ -804,14 +1078,13 @@ struct gfar_private {
804 1078
805 struct work_struct reset_task; 1079 struct work_struct reset_task;
806 1080
807 char int_name_tx[GFAR_INT_NAME_MAX];
808 char int_name_rx[GFAR_INT_NAME_MAX];
809 char int_name_er[GFAR_INT_NAME_MAX];
810
811 /* Network Statistics */ 1081 /* Network Statistics */
812 struct gfar_extra_stats extra_stats; 1082 struct gfar_extra_stats extra_stats;
813}; 1083};
814 1084
1085extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
1086extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
1087
815static inline u32 gfar_read(volatile unsigned __iomem *addr) 1088static inline u32 gfar_read(volatile unsigned __iomem *addr)
816{ 1089{
817 u32 val; 1090 u32 val;
@@ -824,12 +1097,28 @@ static inline void gfar_write(volatile unsigned __iomem *addr, u32 val)
824 out_be32(addr, val); 1097 out_be32(addr, val);
825} 1098}
826 1099
1100static inline void gfar_write_filer(struct gfar_private *priv,
1101 unsigned int far, unsigned int fcr, unsigned int fpr)
1102{
1103 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1104
1105 gfar_write(&regs->rqfar, far);
1106 gfar_write(&regs->rqfcr, fcr);
1107 gfar_write(&regs->rqfpr, fpr);
1108}
1109
1110extern void lock_rx_qs(struct gfar_private *priv);
1111extern void lock_tx_qs(struct gfar_private *priv);
1112extern void unlock_rx_qs(struct gfar_private *priv);
1113extern void unlock_tx_qs(struct gfar_private *priv);
827extern irqreturn_t gfar_receive(int irq, void *dev_id); 1114extern irqreturn_t gfar_receive(int irq, void *dev_id);
828extern int startup_gfar(struct net_device *dev); 1115extern int startup_gfar(struct net_device *dev);
829extern void stop_gfar(struct net_device *dev); 1116extern void stop_gfar(struct net_device *dev);
830extern void gfar_halt(struct net_device *dev); 1117extern void gfar_halt(struct net_device *dev);
831extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 1118extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
832 int enable, u32 regnum, u32 read); 1119 int enable, u32 regnum, u32 read);
1120extern void gfar_configure_coalescing(struct gfar_private *priv,
1121 unsigned int tx_mask, unsigned int rx_mask);
833void gfar_init_sysfs(struct net_device *dev); 1122void gfar_init_sysfs(struct net_device *dev);
834 1123
835extern const struct ethtool_ops gfar_ethtool_ops; 1124extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6c144b525b47..1010367695e4 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -7,8 +7,9 @@
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
10 * 11 *
11 * Copyright (c) 2003,2004 Freescale Semiconductor, Inc. 12 * Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
12 * 13 *
13 * This software may be used and distributed according to 14 * This software may be used and distributed according to
14 * the terms of the GNU Public License, Version 2, incorporated herein 15 * the terms of the GNU Public License, Version 2, incorporated herein
@@ -41,7 +42,7 @@
41#include "gianfar.h" 42#include "gianfar.h"
42 43
43extern void gfar_start(struct net_device *dev); 44extern void gfar_start(struct net_device *dev);
44extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 45extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
45 46
46#define GFAR_MAX_COAL_USECS 0xffff 47#define GFAR_MAX_COAL_USECS 0xffff
47#define GFAR_MAX_COAL_FRAMES 0xff 48#define GFAR_MAX_COAL_FRAMES 0xff
@@ -136,10 +137,11 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
136{ 137{
137 int i; 138 int i;
138 struct gfar_private *priv = netdev_priv(dev); 139 struct gfar_private *priv = netdev_priv(dev);
140 struct gfar __iomem *regs = priv->gfargrp[0].regs;
139 u64 *extra = (u64 *) & priv->extra_stats; 141 u64 *extra = (u64 *) & priv->extra_stats;
140 142
141 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 143 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
142 u32 __iomem *rmon = (u32 __iomem *) & priv->regs->rmon; 144 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
143 struct gfar_stats *stats = (struct gfar_stats *) buf; 145 struct gfar_stats *stats = (struct gfar_stats *) buf;
144 146
145 for (i = 0; i < GFAR_RMON_LEN; i++) 147 for (i = 0; i < GFAR_RMON_LEN; i++)
@@ -197,12 +199,18 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{ 199{
198 struct gfar_private *priv = netdev_priv(dev); 200 struct gfar_private *priv = netdev_priv(dev);
199 struct phy_device *phydev = priv->phydev; 201 struct phy_device *phydev = priv->phydev;
202 struct gfar_priv_rx_q *rx_queue = NULL;
203 struct gfar_priv_tx_q *tx_queue = NULL;
200 204
201 if (NULL == phydev) 205 if (NULL == phydev)
202 return -ENODEV; 206 return -ENODEV;
207 tx_queue = priv->tx_queue[0];
208 rx_queue = priv->rx_queue[0];
203 209
204 cmd->maxtxpkt = get_icft_value(priv->txic); 210 /* etsec-1.7 and older versions have only one txic
205 cmd->maxrxpkt = get_icft_value(priv->rxic); 211 * and rxic regs although they support multiple queues */
212 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
213 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
206 214
207 return phy_ethtool_gset(phydev, cmd); 215 return phy_ethtool_gset(phydev, cmd);
208} 216}
@@ -218,7 +226,7 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
218{ 226{
219 int i; 227 int i;
220 struct gfar_private *priv = netdev_priv(dev); 228 struct gfar_private *priv = netdev_priv(dev);
221 u32 __iomem *theregs = (u32 __iomem *) priv->regs; 229 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
222 u32 *buf = (u32 *) regbuf; 230 u32 *buf = (u32 *) regbuf;
223 231
224 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) 232 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
@@ -279,6 +287,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
279static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 287static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
280{ 288{
281 struct gfar_private *priv = netdev_priv(dev); 289 struct gfar_private *priv = netdev_priv(dev);
290 struct gfar_priv_rx_q *rx_queue = NULL;
291 struct gfar_priv_tx_q *tx_queue = NULL;
282 unsigned long rxtime; 292 unsigned long rxtime;
283 unsigned long rxcount; 293 unsigned long rxcount;
284 unsigned long txtime; 294 unsigned long txtime;
@@ -290,10 +300,13 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
290 if (NULL == priv->phydev) 300 if (NULL == priv->phydev)
291 return -ENODEV; 301 return -ENODEV;
292 302
293 rxtime = get_ictt_value(priv->rxic); 303 rx_queue = priv->rx_queue[0];
294 rxcount = get_icft_value(priv->rxic); 304 tx_queue = priv->tx_queue[0];
295 txtime = get_ictt_value(priv->txic); 305
296 txcount = get_icft_value(priv->txic); 306 rxtime = get_ictt_value(rx_queue->rxic);
307 rxcount = get_icft_value(rx_queue->rxic);
308 txtime = get_ictt_value(tx_queue->txic);
309 txcount = get_icft_value(tx_queue->txic);
297 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); 310 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
298 cvals->rx_max_coalesced_frames = rxcount; 311 cvals->rx_max_coalesced_frames = rxcount;
299 312
@@ -339,16 +352,23 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
339static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 352static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
340{ 353{
341 struct gfar_private *priv = netdev_priv(dev); 354 struct gfar_private *priv = netdev_priv(dev);
355 int i = 0;
342 356
343 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 357 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
344 return -EOPNOTSUPP; 358 return -EOPNOTSUPP;
345 359
346 /* Set up rx coalescing */ 360 /* Set up rx coalescing */
361 /* As of now, we will enable/disable coalescing for all
362 * queues together in case of eTSEC2, this will be modified
363 * along with the ethtool interface */
347 if ((cvals->rx_coalesce_usecs == 0) || 364 if ((cvals->rx_coalesce_usecs == 0) ||
348 (cvals->rx_max_coalesced_frames == 0)) 365 (cvals->rx_max_coalesced_frames == 0)) {
349 priv->rxcoalescing = 0; 366 for (i = 0; i < priv->num_rx_queues; i++)
350 else 367 priv->rx_queue[i]->rxcoalescing = 0;
351 priv->rxcoalescing = 1; 368 } else {
369 for (i = 0; i < priv->num_rx_queues; i++)
370 priv->rx_queue[i]->rxcoalescing = 1;
371 }
352 372
353 if (NULL == priv->phydev) 373 if (NULL == priv->phydev)
354 return -ENODEV; 374 return -ENODEV;
@@ -366,15 +386,21 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
366 return -EINVAL; 386 return -EINVAL;
367 } 387 }
368 388
369 priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, 389 for (i = 0; i < priv->num_rx_queues; i++) {
370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); 390 priv->rx_queue[i]->rxic = mk_ic_value(
391 cvals->rx_max_coalesced_frames,
392 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
393 }
371 394
372 /* Set up tx coalescing */ 395 /* Set up tx coalescing */
373 if ((cvals->tx_coalesce_usecs == 0) || 396 if ((cvals->tx_coalesce_usecs == 0) ||
374 (cvals->tx_max_coalesced_frames == 0)) 397 (cvals->tx_max_coalesced_frames == 0)) {
375 priv->txcoalescing = 0; 398 for (i = 0; i < priv->num_tx_queues; i++)
376 else 399 priv->tx_queue[i]->txcoalescing = 0;
377 priv->txcoalescing = 1; 400 } else {
401 for (i = 0; i < priv->num_tx_queues; i++)
402 priv->tx_queue[i]->txcoalescing = 1;
403 }
378 404
379 /* Check the bounds of the values */ 405 /* Check the bounds of the values */
380 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 406 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
@@ -389,16 +415,13 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
389 return -EINVAL; 415 return -EINVAL;
390 } 416 }
391 417
392 priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames, 418 for (i = 0; i < priv->num_tx_queues; i++) {
393 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 419 priv->tx_queue[i]->txic = mk_ic_value(
394 420 cvals->tx_max_coalesced_frames,
395 gfar_write(&priv->regs->rxic, 0); 421 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
396 if (priv->rxcoalescing) 422 }
397 gfar_write(&priv->regs->rxic, priv->rxic);
398 423
399 gfar_write(&priv->regs->txic, 0); 424 gfar_configure_coalescing(priv, 0xFF, 0xFF);
400 if (priv->txcoalescing)
401 gfar_write(&priv->regs->txic, priv->txic);
402 425
403 return 0; 426 return 0;
404} 427}
@@ -409,6 +432,11 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
409static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 432static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
410{ 433{
411 struct gfar_private *priv = netdev_priv(dev); 434 struct gfar_private *priv = netdev_priv(dev);
435 struct gfar_priv_tx_q *tx_queue = NULL;
436 struct gfar_priv_rx_q *rx_queue = NULL;
437
438 tx_queue = priv->tx_queue[0];
439 rx_queue = priv->rx_queue[0];
412 440
413 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; 441 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
414 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; 442 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
@@ -418,10 +446,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
418 /* Values changeable by the user. The valid values are 446 /* Values changeable by the user. The valid values are
419 * in the range 1 to the "*_max_pending" counterpart above. 447 * in the range 1 to the "*_max_pending" counterpart above.
420 */ 448 */
421 rvals->rx_pending = priv->rx_ring_size; 449 rvals->rx_pending = rx_queue->rx_ring_size;
422 rvals->rx_mini_pending = priv->rx_ring_size; 450 rvals->rx_mini_pending = rx_queue->rx_ring_size;
423 rvals->rx_jumbo_pending = priv->rx_ring_size; 451 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
424 rvals->tx_pending = priv->tx_ring_size; 452 rvals->tx_pending = tx_queue->tx_ring_size;
425} 453}
426 454
427/* Change the current ring parameters, stopping the controller if 455/* Change the current ring parameters, stopping the controller if
@@ -431,7 +459,7 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
431static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 459static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
432{ 460{
433 struct gfar_private *priv = netdev_priv(dev); 461 struct gfar_private *priv = netdev_priv(dev);
434 int err = 0; 462 int err = 0, i = 0;
435 463
436 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 464 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
437 return -EINVAL; 465 return -EINVAL;
@@ -451,34 +479,41 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
451 return -EINVAL; 479 return -EINVAL;
452 } 480 }
453 481
482
454 if (dev->flags & IFF_UP) { 483 if (dev->flags & IFF_UP) {
455 unsigned long flags; 484 unsigned long flags;
456 485
457 /* Halt TX and RX, and process the frames which 486 /* Halt TX and RX, and process the frames which
458 * have already been received */ 487 * have already been received */
459 spin_lock_irqsave(&priv->txlock, flags); 488 local_irq_save(flags);
460 spin_lock(&priv->rxlock); 489 lock_tx_qs(priv);
490 lock_rx_qs(priv);
461 491
462 gfar_halt(dev); 492 gfar_halt(dev);
463 493
464 spin_unlock(&priv->rxlock); 494 unlock_rx_qs(priv);
465 spin_unlock_irqrestore(&priv->txlock, flags); 495 unlock_tx_qs(priv);
496 local_irq_restore(flags);
466 497
467 gfar_clean_rx_ring(dev, priv->rx_ring_size); 498 for (i = 0; i < priv->num_rx_queues; i++)
499 gfar_clean_rx_ring(priv->rx_queue[i],
500 priv->rx_queue[i]->rx_ring_size);
468 501
469 /* Now we take down the rings to rebuild them */ 502 /* Now we take down the rings to rebuild them */
470 stop_gfar(dev); 503 stop_gfar(dev);
471 } 504 }
472 505
473 /* Change the size */ 506 /* Change the size */
474 priv->rx_ring_size = rvals->rx_pending; 507 for (i = 0; i < priv->num_rx_queues; i++) {
475 priv->tx_ring_size = rvals->tx_pending; 508 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
476 priv->num_txbdfree = priv->tx_ring_size; 509 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
510 priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
511 }
477 512
478 /* Rebuild the rings with the new size */ 513 /* Rebuild the rings with the new size */
479 if (dev->flags & IFF_UP) { 514 if (dev->flags & IFF_UP) {
480 err = startup_gfar(dev); 515 err = startup_gfar(dev);
481 netif_wake_queue(dev); 516 netif_tx_wake_all_queues(dev);
482 } 517 }
483 return err; 518 return err;
484} 519}
@@ -487,23 +522,28 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
487{ 522{
488 struct gfar_private *priv = netdev_priv(dev); 523 struct gfar_private *priv = netdev_priv(dev);
489 unsigned long flags; 524 unsigned long flags;
490 int err = 0; 525 int err = 0, i = 0;
491 526
492 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 527 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
493 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
494 529
530
495 if (dev->flags & IFF_UP) { 531 if (dev->flags & IFF_UP) {
496 /* Halt TX and RX, and process the frames which 532 /* Halt TX and RX, and process the frames which
497 * have already been received */ 533 * have already been received */
498 spin_lock_irqsave(&priv->txlock, flags); 534 local_irq_save(flags);
499 spin_lock(&priv->rxlock); 535 lock_tx_qs(priv);
536 lock_rx_qs(priv);
500 537
501 gfar_halt(dev); 538 gfar_halt(dev);
502 539
503 spin_unlock(&priv->rxlock); 540 unlock_tx_qs(priv);
504 spin_unlock_irqrestore(&priv->txlock, flags); 541 unlock_rx_qs(priv);
542 local_irq_save(flags);
505 543
506 gfar_clean_rx_ring(dev, priv->rx_ring_size); 544 for (i = 0; i < priv->num_rx_queues; i++)
545 gfar_clean_rx_ring(priv->rx_queue[i],
546 priv->rx_queue[i]->rx_ring_size);
507 547
508 /* Now we take down the rings to rebuild them */ 548 /* Now we take down the rings to rebuild them */
509 stop_gfar(dev); 549 stop_gfar(dev);
@@ -515,7 +555,7 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
515 555
516 if (dev->flags & IFF_UP) { 556 if (dev->flags & IFF_UP) {
517 err = startup_gfar(dev); 557 err = startup_gfar(dev);
518 netif_wake_queue(dev); 558 netif_tx_wake_all_queues(dev);
519 } 559 }
520 return err; 560 return err;
521} 561}
@@ -605,6 +645,241 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
605} 645}
606#endif 646#endif
607 647
648static int gfar_ethflow_to_class(int flow_type, u64 *class)
649{
650 switch (flow_type) {
651 case TCP_V4_FLOW:
652 *class = CLASS_CODE_TCP_IPV4;
653 break;
654 case UDP_V4_FLOW:
655 *class = CLASS_CODE_UDP_IPV4;
656 break;
657 case AH_V4_FLOW:
658 case ESP_V4_FLOW:
659 *class = CLASS_CODE_AH_ESP_IPV4;
660 break;
661 case SCTP_V4_FLOW:
662 *class = CLASS_CODE_SCTP_IPV4;
663 break;
664 case TCP_V6_FLOW:
665 *class = CLASS_CODE_TCP_IPV6;
666 break;
667 case UDP_V6_FLOW:
668 *class = CLASS_CODE_UDP_IPV6;
669 break;
670 case AH_V6_FLOW:
671 case ESP_V6_FLOW:
672 *class = CLASS_CODE_AH_ESP_IPV6;
673 break;
674 case SCTP_V6_FLOW:
675 *class = CLASS_CODE_SCTP_IPV6;
676 break;
677 default:
678 return 0;
679 }
680
681 return 1;
682}
683
684static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
685{
686 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
687
688 if (ethflow & RXH_L2DA) {
689 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
690 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
691 ftp_rqfpr[priv->cur_filer_idx] = fpr;
692 ftp_rqfcr[priv->cur_filer_idx] = fcr;
693 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
694 priv->cur_filer_idx = priv->cur_filer_idx - 1;
695
696 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
697 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
698 ftp_rqfpr[priv->cur_filer_idx] = fpr;
699 ftp_rqfcr[priv->cur_filer_idx] = fcr;
700 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
701 priv->cur_filer_idx = priv->cur_filer_idx - 1;
702 }
703
704 if (ethflow & RXH_VLAN) {
705 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
706 RQFCR_AND | RQFCR_HASHTBL_0;
707 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
708 ftp_rqfpr[priv->cur_filer_idx] = fpr;
709 ftp_rqfcr[priv->cur_filer_idx] = fcr;
710 priv->cur_filer_idx = priv->cur_filer_idx - 1;
711 }
712
713 if (ethflow & RXH_IP_SRC) {
714 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
715 RQFCR_AND | RQFCR_HASHTBL_0;
716 ftp_rqfpr[priv->cur_filer_idx] = fpr;
717 ftp_rqfcr[priv->cur_filer_idx] = fcr;
718 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
719 priv->cur_filer_idx = priv->cur_filer_idx - 1;
720 }
721
722 if (ethflow & (RXH_IP_DST)) {
723 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
724 RQFCR_AND | RQFCR_HASHTBL_0;
725 ftp_rqfpr[priv->cur_filer_idx] = fpr;
726 ftp_rqfcr[priv->cur_filer_idx] = fcr;
727 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
728 priv->cur_filer_idx = priv->cur_filer_idx - 1;
729 }
730
731 if (ethflow & RXH_L3_PROTO) {
732 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
733 RQFCR_AND | RQFCR_HASHTBL_0;
734 ftp_rqfpr[priv->cur_filer_idx] = fpr;
735 ftp_rqfcr[priv->cur_filer_idx] = fcr;
736 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
737 priv->cur_filer_idx = priv->cur_filer_idx - 1;
738 }
739
740 if (ethflow & RXH_L4_B_0_1) {
741 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
742 RQFCR_AND | RQFCR_HASHTBL_0;
743 ftp_rqfpr[priv->cur_filer_idx] = fpr;
744 ftp_rqfcr[priv->cur_filer_idx] = fcr;
745 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
746 priv->cur_filer_idx = priv->cur_filer_idx - 1;
747 }
748
749 if (ethflow & RXH_L4_B_2_3) {
750 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
751 RQFCR_AND | RQFCR_HASHTBL_0;
752 ftp_rqfpr[priv->cur_filer_idx] = fpr;
753 ftp_rqfcr[priv->cur_filer_idx] = fcr;
754 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
755 priv->cur_filer_idx = priv->cur_filer_idx - 1;
756 }
757}
758
759static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
760{
761 unsigned int last_rule_idx = priv->cur_filer_idx;
762 unsigned int cmp_rqfpr;
763 unsigned int local_rqfpr[MAX_FILER_IDX + 1];
764 unsigned int local_rqfcr[MAX_FILER_IDX + 1];
765 int i = 0x0, k = 0x0;
766 int j = MAX_FILER_IDX, l = 0x0;
767
768 switch (class) {
769 case TCP_V4_FLOW:
770 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
771 break;
772 case UDP_V4_FLOW:
773 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
774 break;
775 case TCP_V6_FLOW:
776 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
777 break;
778 case UDP_V6_FLOW:
779 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
780 break;
781 case IPV4_FLOW:
782 cmp_rqfpr = RQFPR_IPV4;
783 case IPV6_FLOW:
784 cmp_rqfpr = RQFPR_IPV6;
785 break;
786 default:
787 printk(KERN_ERR "Right now this class is not supported\n");
788 return 0;
789 }
790
791 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
792 local_rqfpr[j] = ftp_rqfpr[i];
793 local_rqfcr[j] = ftp_rqfcr[i];
794 j--;
795 if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
796 RQFCR_CLE |RQFCR_AND)) &&
797 (ftp_rqfpr[i] == cmp_rqfpr))
798 break;
799 }
800
801 if (i == MAX_FILER_IDX + 1) {
802 printk(KERN_ERR "No parse rule found, ");
803 printk(KERN_ERR "can't create hash rules\n");
804 return 0;
805 }
806
807 /* If a match was found, then it begins the starting of a cluster rule
808 * if it was already programmed, we need to overwrite these rules
809 */
810 for (l = i+1; l < MAX_FILER_IDX; l++) {
811 if ((ftp_rqfcr[l] & RQFCR_CLE) &&
812 !(ftp_rqfcr[l] & RQFCR_AND)) {
813 ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
814 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
815 ftp_rqfpr[l] = FPR_FILER_MASK;
816 gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
817 break;
818 }
819
820 if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
821 continue;
822 else {
823 local_rqfpr[j] = ftp_rqfpr[l];
824 local_rqfcr[j] = ftp_rqfcr[l];
825 j--;
826 }
827 }
828
829 priv->cur_filer_idx = l - 1;
830 last_rule_idx = l;
831
832 /* hash rules */
833 ethflow_to_filer_rules(priv, ethflow);
834
835 /* Write back the popped out rules again */
836 for (k = j+1; k < MAX_FILER_IDX; k++) {
837 ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
838 ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
839 gfar_write_filer(priv, priv->cur_filer_idx,
840 local_rqfcr[k], local_rqfpr[k]);
841 if (!priv->cur_filer_idx)
842 break;
843 priv->cur_filer_idx = priv->cur_filer_idx - 1;
844 }
845
846 return 1;
847}
848
849static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
850{
851 u64 class;
852
853 if (!gfar_ethflow_to_class(cmd->flow_type, &class))
854 return -EINVAL;
855
856 if (class < CLASS_CODE_USER_PROG1 ||
857 class > CLASS_CODE_SCTP_IPV6)
858 return -EINVAL;
859
860 /* write the filer rules here */
861 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
862 return -1;
863
864 return 0;
865}
866
867static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
868{
869 struct gfar_private *priv = netdev_priv(dev);
870 int ret = 0;
871
872 switch(cmd->cmd) {
873 case ETHTOOL_SRXFH:
874 ret = gfar_set_hash_opts(priv, cmd);
875 break;
876 default:
877 ret = -EINVAL;
878 }
879
880 return ret;
881}
882
608const struct ethtool_ops gfar_ethtool_ops = { 883const struct ethtool_ops gfar_ethtool_ops = {
609 .get_settings = gfar_gsettings, 884 .get_settings = gfar_gsettings,
610 .set_settings = gfar_ssettings, 885 .set_settings = gfar_ssettings,
@@ -630,4 +905,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
630 .get_wol = gfar_get_wol, 905 .get_wol = gfar_get_wol,
631 .set_wol = gfar_set_wol, 906 .set_wol = gfar_set_wol,
632#endif 907#endif
908 .set_rxnfc = gfar_set_nfc,
633}; 909};
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index dd26da74f27a..3724835d2856 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -8,8 +8,9 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala (galak@kernel.crashing.org) 10 * Maintainer: Kumar Gala (galak@kernel.crashing.org)
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2005 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * 14 *
14 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
@@ -49,6 +50,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
49 const char *buf, size_t count) 50 const char *buf, size_t count)
50{ 51{
51 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 52 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
53 struct gfar __iomem *regs = priv->gfargrp[0].regs;
52 int new_setting = 0; 54 int new_setting = 0;
53 u32 temp; 55 u32 temp;
54 unsigned long flags; 56 unsigned long flags;
@@ -56,6 +58,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
56 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING)) 58 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
57 return count; 59 return count;
58 60
61
59 /* Find out the new setting */ 62 /* Find out the new setting */
60 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) 63 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
61 new_setting = 1; 64 new_setting = 1;
@@ -65,21 +68,24 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
65 else 68 else
66 return count; 69 return count;
67 70
68 spin_lock_irqsave(&priv->rxlock, flags); 71
72 local_irq_save(flags);
73 lock_rx_qs(priv);
69 74
70 /* Set the new stashing value */ 75 /* Set the new stashing value */
71 priv->bd_stash_en = new_setting; 76 priv->bd_stash_en = new_setting;
72 77
73 temp = gfar_read(&priv->regs->attr); 78 temp = gfar_read(&regs->attr);
74 79
75 if (new_setting) 80 if (new_setting)
76 temp |= ATTR_BDSTASH; 81 temp |= ATTR_BDSTASH;
77 else 82 else
78 temp &= ~(ATTR_BDSTASH); 83 temp &= ~(ATTR_BDSTASH);
79 84
80 gfar_write(&priv->regs->attr, temp); 85 gfar_write(&regs->attr, temp);
81 86
82 spin_unlock_irqrestore(&priv->rxlock, flags); 87 unlock_rx_qs(priv);
88 local_irq_restore(flags);
83 89
84 return count; 90 return count;
85} 91}
@@ -99,6 +105,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
99 const char *buf, size_t count) 105 const char *buf, size_t count)
100{ 106{
101 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 107 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
108 struct gfar __iomem *regs = priv->gfargrp[0].regs;
102 unsigned int length = simple_strtoul(buf, NULL, 0); 109 unsigned int length = simple_strtoul(buf, NULL, 0);
103 u32 temp; 110 u32 temp;
104 unsigned long flags; 111 unsigned long flags;
@@ -106,7 +113,9 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
106 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) 113 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
107 return count; 114 return count;
108 115
109 spin_lock_irqsave(&priv->rxlock, flags); 116 local_irq_save(flags);
117 lock_rx_qs(priv);
118
110 if (length > priv->rx_buffer_size) 119 if (length > priv->rx_buffer_size)
111 goto out; 120 goto out;
112 121
@@ -115,23 +124,24 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
115 124
116 priv->rx_stash_size = length; 125 priv->rx_stash_size = length;
117 126
118 temp = gfar_read(&priv->regs->attreli); 127 temp = gfar_read(&regs->attreli);
119 temp &= ~ATTRELI_EL_MASK; 128 temp &= ~ATTRELI_EL_MASK;
120 temp |= ATTRELI_EL(length); 129 temp |= ATTRELI_EL(length);
121 gfar_write(&priv->regs->attreli, temp); 130 gfar_write(&regs->attreli, temp);
122 131
123 /* Turn stashing on/off as appropriate */ 132 /* Turn stashing on/off as appropriate */
124 temp = gfar_read(&priv->regs->attr); 133 temp = gfar_read(&regs->attr);
125 134
126 if (length) 135 if (length)
127 temp |= ATTR_BUFSTASH; 136 temp |= ATTR_BUFSTASH;
128 else 137 else
129 temp &= ~(ATTR_BUFSTASH); 138 temp &= ~(ATTR_BUFSTASH);
130 139
131 gfar_write(&priv->regs->attr, temp); 140 gfar_write(&regs->attr, temp);
132 141
133out: 142out:
134 spin_unlock_irqrestore(&priv->rxlock, flags); 143 unlock_rx_qs(priv);
144 local_irq_restore(flags);
135 145
136 return count; 146 return count;
137} 147}
@@ -154,6 +164,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
154 const char *buf, size_t count) 164 const char *buf, size_t count)
155{ 165{
156 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 166 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
167 struct gfar __iomem *regs = priv->gfargrp[0].regs;
157 unsigned short index = simple_strtoul(buf, NULL, 0); 168 unsigned short index = simple_strtoul(buf, NULL, 0);
158 u32 temp; 169 u32 temp;
159 unsigned long flags; 170 unsigned long flags;
@@ -161,7 +172,9 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
161 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) 172 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
162 return count; 173 return count;
163 174
164 spin_lock_irqsave(&priv->rxlock, flags); 175 local_irq_save(flags);
176 lock_rx_qs(priv);
177
165 if (index > priv->rx_stash_size) 178 if (index > priv->rx_stash_size)
166 goto out; 179 goto out;
167 180
@@ -170,13 +183,14 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
170 183
171 priv->rx_stash_index = index; 184 priv->rx_stash_index = index;
172 185
173 temp = gfar_read(&priv->regs->attreli); 186 temp = gfar_read(&regs->attreli);
174 temp &= ~ATTRELI_EI_MASK; 187 temp &= ~ATTRELI_EI_MASK;
175 temp |= ATTRELI_EI(index); 188 temp |= ATTRELI_EI(index);
176 gfar_write(&priv->regs->attreli, flags); 189 gfar_write(&regs->attreli, flags);
177 190
178out: 191out:
179 spin_unlock_irqrestore(&priv->rxlock, flags); 192 unlock_rx_qs(priv);
193 local_irq_restore(flags);
180 194
181 return count; 195 return count;
182} 196}
@@ -198,6 +212,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
198 const char *buf, size_t count) 212 const char *buf, size_t count)
199{ 213{
200 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 214 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
215 struct gfar __iomem *regs = priv->gfargrp[0].regs;
201 unsigned int length = simple_strtoul(buf, NULL, 0); 216 unsigned int length = simple_strtoul(buf, NULL, 0);
202 u32 temp; 217 u32 temp;
203 unsigned long flags; 218 unsigned long flags;
@@ -205,16 +220,18 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
205 if (length > GFAR_MAX_FIFO_THRESHOLD) 220 if (length > GFAR_MAX_FIFO_THRESHOLD)
206 return count; 221 return count;
207 222
208 spin_lock_irqsave(&priv->txlock, flags); 223 local_irq_save(flags);
224 lock_tx_qs(priv);
209 225
210 priv->fifo_threshold = length; 226 priv->fifo_threshold = length;
211 227
212 temp = gfar_read(&priv->regs->fifo_tx_thr); 228 temp = gfar_read(&regs->fifo_tx_thr);
213 temp &= ~FIFO_TX_THR_MASK; 229 temp &= ~FIFO_TX_THR_MASK;
214 temp |= length; 230 temp |= length;
215 gfar_write(&priv->regs->fifo_tx_thr, temp); 231 gfar_write(&regs->fifo_tx_thr, temp);
216 232
217 spin_unlock_irqrestore(&priv->txlock, flags); 233 unlock_tx_qs(priv);
234 local_irq_restore(flags);
218 235
219 return count; 236 return count;
220} 237}
@@ -235,6 +252,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
235 const char *buf, size_t count) 252 const char *buf, size_t count)
236{ 253{
237 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 254 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
255 struct gfar __iomem *regs = priv->gfargrp[0].regs;
238 unsigned int num = simple_strtoul(buf, NULL, 0); 256 unsigned int num = simple_strtoul(buf, NULL, 0);
239 u32 temp; 257 u32 temp;
240 unsigned long flags; 258 unsigned long flags;
@@ -242,16 +260,18 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
242 if (num > GFAR_MAX_FIFO_STARVE) 260 if (num > GFAR_MAX_FIFO_STARVE)
243 return count; 261 return count;
244 262
245 spin_lock_irqsave(&priv->txlock, flags); 263 local_irq_save(flags);
264 lock_tx_qs(priv);
246 265
247 priv->fifo_starve = num; 266 priv->fifo_starve = num;
248 267
249 temp = gfar_read(&priv->regs->fifo_tx_starve); 268 temp = gfar_read(&regs->fifo_tx_starve);
250 temp &= ~FIFO_TX_STARVE_MASK; 269 temp &= ~FIFO_TX_STARVE_MASK;
251 temp |= num; 270 temp |= num;
252 gfar_write(&priv->regs->fifo_tx_starve, temp); 271 gfar_write(&regs->fifo_tx_starve, temp);
253 272
254 spin_unlock_irqrestore(&priv->txlock, flags); 273 unlock_tx_qs(priv);
274 local_irq_restore(flags);
255 275
256 return count; 276 return count;
257} 277}
@@ -273,6 +293,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
273 const char *buf, size_t count) 293 const char *buf, size_t count)
274{ 294{
275 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 295 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
296 struct gfar __iomem *regs = priv->gfargrp[0].regs;
276 unsigned int num = simple_strtoul(buf, NULL, 0); 297 unsigned int num = simple_strtoul(buf, NULL, 0);
277 u32 temp; 298 u32 temp;
278 unsigned long flags; 299 unsigned long flags;
@@ -280,16 +301,18 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
280 if (num > GFAR_MAX_FIFO_STARVE_OFF) 301 if (num > GFAR_MAX_FIFO_STARVE_OFF)
281 return count; 302 return count;
282 303
283 spin_lock_irqsave(&priv->txlock, flags); 304 local_irq_save(flags);
305 lock_tx_qs(priv);
284 306
285 priv->fifo_starve_off = num; 307 priv->fifo_starve_off = num;
286 308
287 temp = gfar_read(&priv->regs->fifo_tx_starve_shutoff); 309 temp = gfar_read(&regs->fifo_tx_starve_shutoff);
288 temp &= ~FIFO_TX_STARVE_OFF_MASK; 310 temp &= ~FIFO_TX_STARVE_OFF_MASK;
289 temp |= num; 311 temp |= num;
290 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp); 312 gfar_write(&regs->fifo_tx_starve_shutoff, temp);
291 313
292 spin_unlock_irqrestore(&priv->txlock, flags); 314 unlock_tx_qs(priv);
315 local_irq_restore(flags);
293 316
294 return count; 317 return count;
295} 318}
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index c5d92ec176d0..af117c626e73 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -24,6 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include <linux/module.h>
27#include <linux/sched.h> 28#include <linux/sched.h>
28#include <linux/string.h> 29#include <linux/string.h>
29#include <linux/errno.h> 30#include <linux/errno.h>
@@ -2990,6 +2991,7 @@ static struct of_device_id emac_match[] =
2990 }, 2991 },
2991 {}, 2992 {},
2992}; 2993};
2994MODULE_DEVICE_TABLE(of, emac_match);
2993 2995
2994static struct of_platform_driver emac_driver = { 2996static struct of_platform_driver emac_driver = {
2995 .name = "emac", 2997 .name = "emac",
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 801f088c134f..69c25668dd63 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -98,12 +98,15 @@ static void ri_tasklet(unsigned long dev)
98 stats->tx_packets++; 98 stats->tx_packets++;
99 stats->tx_bytes +=skb->len; 99 stats->tx_bytes +=skb->len;
100 100
101 skb->dev = __dev_get_by_index(&init_net, skb->iif); 101 rcu_read_lock();
102 skb->dev = dev_get_by_index_rcu(&init_net, skb->iif);
102 if (!skb->dev) { 103 if (!skb->dev) {
104 rcu_read_unlock();
103 dev_kfree_skb(skb); 105 dev_kfree_skb(skb);
104 stats->tx_dropped++; 106 stats->tx_dropped++;
105 break; 107 break;
106 } 108 }
109 rcu_read_unlock();
107 skb->iif = _dev->ifindex; 110 skb->iif = _dev->ifindex;
108 111
109 if (from & AT_EGRESS) { 112 if (from & AT_EGRESS) {
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 7be3a0b6a057..b3808ca49ef5 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -66,6 +66,8 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
66 E1000_EICR_RX_QUEUE3) 66 E1000_EICR_RX_QUEUE3)
67 67
68/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 68/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
69#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
70#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
69 71
70/* Receive Descriptor - Advanced */ 72/* Receive Descriptor - Advanced */
71union e1000_adv_rx_desc { 73union e1000_adv_rx_desc {
@@ -98,6 +100,7 @@ union e1000_adv_rx_desc {
98 100
99#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 101#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
100#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 102#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
103#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
101 104
102/* Transmit Descriptor - Advanced */ 105/* Transmit Descriptor - Advanced */
103union e1000_adv_tx_desc { 106union e1000_adv_tx_desc {
@@ -167,6 +170,17 @@ struct e1000_adv_tx_context_desc {
167#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ 170#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
168#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ 171#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
169 172
173/* ETQF register bit definitions */
174#define E1000_ETQF_FILTER_ENABLE (1 << 26)
175#define E1000_ETQF_1588 (1 << 30)
176
177/* FTQF register bit definitions */
178#define E1000_FTQF_VF_BP 0x00008000
179#define E1000_FTQF_1588_TIME_STAMP 0x08000000
180#define E1000_FTQF_MASK 0xF0000000
181#define E1000_FTQF_MASK_PROTO_BP 0x10000000
182#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
183
170#define E1000_NVM_APME_82575 0x0400 184#define E1000_NVM_APME_82575 0x0400
171#define MAX_NUM_VFS 8 185#define MAX_NUM_VFS 8
172 186
@@ -203,8 +217,19 @@ struct e1000_adv_tx_context_desc {
203#define E1000_IOVCTL 0x05BBC 217#define E1000_IOVCTL 0x05BBC
204#define E1000_IOVCTL_REUSE_VFQ 0x00000001 218#define E1000_IOVCTL_REUSE_VFQ 0x00000001
205 219
220#define E1000_RPLOLR_STRVLAN 0x40000000
221#define E1000_RPLOLR_STRCRC 0x80000000
222
223#define E1000_DTXCTL_8023LL 0x0004
224#define E1000_DTXCTL_VLAN_ADDED 0x0008
225#define E1000_DTXCTL_OOS_ENABLE 0x0010
226#define E1000_DTXCTL_MDP_EN 0x0020
227#define E1000_DTXCTL_SPOOF_INT 0x0040
228
206#define ALL_QUEUES 0xFFFF 229#define ALL_QUEUES 0xFFFF
207 230
231/* RX packet buffer size defines */
232#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
208void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); 233void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
209void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 234void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
210 235
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index cb916833f303..48fcab03b752 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -435,6 +435,39 @@
435/* Flow Control */ 435/* Flow Control */
436#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 436#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
437 437
438#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
439#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
440
441#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
442#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
443#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
444#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
445#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
446#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
447#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
448#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
449
450#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
451#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
452#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
453#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
454#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
455#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
456
457#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
458#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
459#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
460#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
461#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
462#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
463#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
464#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
465#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
466#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
467#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
468
469#define E1000_TIMINCA_16NS_SHIFT 24
470
438/* PCI Express Control */ 471/* PCI Express Control */
439#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 472#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
440#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 473#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
index ebc02ea3f198..bb112fb6c3a1 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/igb/e1000_mbx.h
@@ -58,10 +58,12 @@
58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) 58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
59 59
60#define E1000_VF_RESET 0x01 /* VF requests reset */ 60#define E1000_VF_RESET 0x01 /* VF requests reset */
61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ 61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
63#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 63#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
64#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 64#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
65#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
66#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
65 67
66#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ 68#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
67 69
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 76c338929f68..934e03b053ac 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -76,59 +76,18 @@
76#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ 76#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
77 77
78/* IEEE 1588 TIMESYNCH */ 78/* IEEE 1588 TIMESYNCH */
79#define E1000_TSYNCTXCTL 0x0B614 79#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
80#define E1000_TSYNCTXCTL_VALID (1<<0) 80#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
81#define E1000_TSYNCTXCTL_ENABLED (1<<4) 81#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
82#define E1000_TSYNCRXCTL 0x0B620 82#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
83#define E1000_TSYNCRXCTL_VALID (1<<0) 83#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
84#define E1000_TSYNCRXCTL_ENABLED (1<<4) 84#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
85enum { 85#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
86 E1000_TSYNCRXCTL_TYPE_L2_V2 = 0, 86#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
87 E1000_TSYNCRXCTL_TYPE_L4_V1 = (1<<1), 87#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
88 E1000_TSYNCRXCTL_TYPE_L2_L4_V2 = (1<<2), 88#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
89 E1000_TSYNCRXCTL_TYPE_ALL = (1<<3), 89#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
90 E1000_TSYNCRXCTL_TYPE_EVENT_V2 = (1<<3) | (1<<1), 90#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
91};
92#define E1000_TSYNCRXCFG 0x05F50
93enum {
94 E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE = 0<<0,
95 E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE = 1<<0,
96 E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE = 2<<0,
97 E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE = 3<<0,
98 E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE = 4<<0,
99
100 E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE = 0<<8,
101 E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE = 1<<8,
102 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE = 2<<8,
103 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE = 3<<8,
104 E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE = 8<<8,
105 E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE = 9<<8,
106 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE = 0xA<<8,
107 E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE = 0xB<<8,
108 E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE = 0xC<<8,
109 E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE = 0xD<<8,
110};
111#define E1000_SYSTIML 0x0B600
112#define E1000_SYSTIMH 0x0B604
113#define E1000_TIMINCA 0x0B608
114
115#define E1000_RXMTRL 0x0B634
116#define E1000_RXSTMPL 0x0B624
117#define E1000_RXSTMPH 0x0B628
118#define E1000_RXSATRL 0x0B62C
119#define E1000_RXSATRH 0x0B630
120
121#define E1000_TXSTMPL 0x0B618
122#define E1000_TXSTMPH 0x0B61C
123
124#define E1000_ETQF0 0x05CB0
125#define E1000_ETQF1 0x05CB4
126#define E1000_ETQF2 0x05CB8
127#define E1000_ETQF3 0x05CBC
128#define E1000_ETQF4 0x05CC0
129#define E1000_ETQF5 0x05CC4
130#define E1000_ETQF6 0x05CC8
131#define E1000_ETQF7 0x05CCC
132 91
133/* Filtering Registers */ 92/* Filtering Registers */
134#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) 93#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
@@ -143,7 +102,9 @@ enum {
143#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ 102#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
144 103
145#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) 104#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
105
146/* Split and Replication RX Control - RW */ 106/* Split and Replication RX Control - RW */
107#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
147/* 108/*
148 * Convenience macros 109 * Convenience macros
149 * 110 *
@@ -288,10 +249,17 @@ enum {
288#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 249#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
289#define E1000_RA 0x05400 /* Receive Address - RW Array */ 250#define E1000_RA 0x05400 /* Receive Address - RW Array */
290#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ 251#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
252#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
291#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 253#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
292 (0x054E0 + ((_i - 16) * 8))) 254 (0x054E0 + ((_i - 16) * 8)))
293#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 255#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
294 (0x054E4 + ((_i - 16) * 8))) 256 (0x054E4 + ((_i - 16) * 8)))
257#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
258#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
259#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
260#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
261#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
262#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
295#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 263#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
296#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ 264#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
297#define E1000_WUC 0x05800 /* Wakeup Control - RW */ 265#define E1000_WUC 0x05800 /* Wakeup Control - RW */
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index b805b1c63f80..3298f5a11dab 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -55,6 +55,8 @@ struct igb_adapter;
55#define IGB_DEFAULT_ITR 3 /* dynamic */ 55#define IGB_DEFAULT_ITR 3 /* dynamic */
56#define IGB_MAX_ITR_USECS 10000 56#define IGB_MAX_ITR_USECS 10000
57#define IGB_MIN_ITR_USECS 10 57#define IGB_MIN_ITR_USECS 10
58#define NON_Q_VECTORS 1
59#define MAX_Q_VECTORS 8
58 60
59/* Transmit and receive queues */ 61/* Transmit and receive queues */
60#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ 62#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \
@@ -71,9 +73,14 @@ struct vf_data_storage {
71 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; 73 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
72 u16 num_vf_mc_hashes; 74 u16 num_vf_mc_hashes;
73 u16 vlans_enabled; 75 u16 vlans_enabled;
74 bool clear_to_send; 76 u32 flags;
77 unsigned long last_nack;
75}; 78};
76 79
80#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
81#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
82#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
83
77/* RX descriptor control thresholds. 84/* RX descriptor control thresholds.
78 * PTHRESH - MAC will consider prefetch if it has fewer than this number of 85 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
79 * descriptors available in its onboard memory. 86 * descriptors available in its onboard memory.
@@ -85,17 +92,19 @@ struct vf_data_storage {
85 * descriptors until either it has this many to write back, or the 92 * descriptors until either it has this many to write back, or the
86 * ITR timer expires. 93 * ITR timer expires.
87 */ 94 */
88#define IGB_RX_PTHRESH 16 95#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
89#define IGB_RX_HTHRESH 8 96#define IGB_RX_HTHRESH 8
90#define IGB_RX_WTHRESH 1 97#define IGB_RX_WTHRESH 1
98#define IGB_TX_PTHRESH 8
99#define IGB_TX_HTHRESH 1
100#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
101 adapter->msix_entries) ? 0 : 16)
91 102
92/* this is the size past which hardware will drop packets when setting LPE=0 */ 103/* this is the size past which hardware will drop packets when setting LPE=0 */
93#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 104#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
94 105
95/* Supported Rx Buffer Sizes */ 106/* Supported Rx Buffer Sizes */
96#define IGB_RXBUFFER_128 128 /* Used for packet split */ 107#define IGB_RXBUFFER_128 128 /* Used for packet split */
97#define IGB_RXBUFFER_256 256 /* Used for packet split */
98#define IGB_RXBUFFER_512 512
99#define IGB_RXBUFFER_1024 1024 108#define IGB_RXBUFFER_1024 1024
100#define IGB_RXBUFFER_2048 2048 109#define IGB_RXBUFFER_2048 2048
101#define IGB_RXBUFFER_16384 16384 110#define IGB_RXBUFFER_16384 16384
@@ -141,36 +150,55 @@ struct igb_buffer {
141struct igb_tx_queue_stats { 150struct igb_tx_queue_stats {
142 u64 packets; 151 u64 packets;
143 u64 bytes; 152 u64 bytes;
153 u64 restart_queue;
144}; 154};
145 155
146struct igb_rx_queue_stats { 156struct igb_rx_queue_stats {
147 u64 packets; 157 u64 packets;
148 u64 bytes; 158 u64 bytes;
149 u64 drops; 159 u64 drops;
160 u64 csum_err;
161 u64 alloc_failed;
150}; 162};
151 163
152struct igb_ring { 164struct igb_q_vector {
153 struct igb_adapter *adapter; /* backlink */ 165 struct igb_adapter *adapter; /* backlink */
154 void *desc; /* descriptor ring memory */ 166 struct igb_ring *rx_ring;
155 dma_addr_t dma; /* phys address of the ring */ 167 struct igb_ring *tx_ring;
156 unsigned int size; /* length of desc. ring in bytes */ 168 struct napi_struct napi;
157 unsigned int count; /* number of desc. in the ring */
158 u16 next_to_use;
159 u16 next_to_clean;
160 u16 head;
161 u16 tail;
162 struct igb_buffer *buffer_info; /* array of buffer info structs */
163 169
164 u32 eims_value; 170 u32 eims_value;
165 u32 itr_val;
166 u16 itr_register;
167 u16 cpu; 171 u16 cpu;
168 172
169 u16 queue_index; 173 u16 itr_val;
170 u16 reg_idx; 174 u8 set_itr;
175 u8 itr_shift;
176 void __iomem *itr_register;
177
178 char name[IFNAMSIZ + 9];
179};
180
181struct igb_ring {
182 struct igb_q_vector *q_vector; /* backlink to q_vector */
183 struct net_device *netdev; /* back pointer to net_device */
184 struct pci_dev *pdev; /* pci device for dma mapping */
185 dma_addr_t dma; /* phys address of the ring */
186 void *desc; /* descriptor ring memory */
187 unsigned int size; /* length of desc. ring in bytes */
188 u16 count; /* number of desc. in the ring */
189 u16 next_to_use;
190 u16 next_to_clean;
191 u8 queue_index;
192 u8 reg_idx;
193 void __iomem *head;
194 void __iomem *tail;
195 struct igb_buffer *buffer_info; /* array of buffer info structs */
196
171 unsigned int total_bytes; 197 unsigned int total_bytes;
172 unsigned int total_packets; 198 unsigned int total_packets;
173 199
200 u32 flags;
201
174 union { 202 union {
175 /* TX */ 203 /* TX */
176 struct { 204 struct {
@@ -180,16 +208,18 @@ struct igb_ring {
180 /* RX */ 208 /* RX */
181 struct { 209 struct {
182 struct igb_rx_queue_stats rx_stats; 210 struct igb_rx_queue_stats rx_stats;
183 u64 rx_queue_drops; 211 u32 rx_buffer_len;
184 struct napi_struct napi;
185 int set_itr;
186 struct igb_ring *buddy;
187 }; 212 };
188 }; 213 };
189
190 char name[IFNAMSIZ + 5];
191}; 214};
192 215
216#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */
217#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */
218
219#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */
220
221#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)
222
193#define E1000_RX_DESC_ADV(R, i) \ 223#define E1000_RX_DESC_ADV(R, i) \
194 (&(((union e1000_adv_rx_desc *)((R).desc))[i])) 224 (&(((union e1000_adv_rx_desc *)((R).desc))[i]))
195#define E1000_TX_DESC_ADV(R, i) \ 225#define E1000_TX_DESC_ADV(R, i) \
@@ -197,6 +227,15 @@ struct igb_ring {
197#define E1000_TX_CTXTDESC_ADV(R, i) \ 227#define E1000_TX_CTXTDESC_ADV(R, i) \
198 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) 228 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
199 229
230/* igb_desc_unused - calculate if we have unused descriptors */
231static inline int igb_desc_unused(struct igb_ring *ring)
232{
233 if (ring->next_to_clean > ring->next_to_use)
234 return ring->next_to_clean - ring->next_to_use - 1;
235
236 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
237}
238
200/* board specific private data structure */ 239/* board specific private data structure */
201 240
202struct igb_adapter { 241struct igb_adapter {
@@ -205,18 +244,18 @@ struct igb_adapter {
205 struct vlan_group *vlgrp; 244 struct vlan_group *vlgrp;
206 u16 mng_vlan_id; 245 u16 mng_vlan_id;
207 u32 bd_number; 246 u32 bd_number;
208 u32 rx_buffer_len;
209 u32 wol; 247 u32 wol;
210 u32 en_mng_pt; 248 u32 en_mng_pt;
211 u16 link_speed; 249 u16 link_speed;
212 u16 link_duplex; 250 u16 link_duplex;
251
213 unsigned int total_tx_bytes; 252 unsigned int total_tx_bytes;
214 unsigned int total_tx_packets; 253 unsigned int total_tx_packets;
215 unsigned int total_rx_bytes; 254 unsigned int total_rx_bytes;
216 unsigned int total_rx_packets; 255 unsigned int total_rx_packets;
217 /* Interrupt Throttle Rate */ 256 /* Interrupt Throttle Rate */
218 u32 itr; 257 u32 rx_itr_setting;
219 u32 itr_setting; 258 u32 tx_itr_setting;
220 u16 tx_itr; 259 u16 tx_itr;
221 u16 rx_itr; 260 u16 rx_itr;
222 261
@@ -229,13 +268,7 @@ struct igb_adapter {
229 268
230 /* TX */ 269 /* TX */
231 struct igb_ring *tx_ring; /* One per active queue */ 270 struct igb_ring *tx_ring; /* One per active queue */
232 unsigned int restart_queue;
233 unsigned long tx_queue_len; 271 unsigned long tx_queue_len;
234 u32 txd_cmd;
235 u32 gotc;
236 u64 gotc_old;
237 u64 tpt_old;
238 u64 colc_old;
239 u32 tx_timeout_count; 272 u32 tx_timeout_count;
240 273
241 /* RX */ 274 /* RX */
@@ -243,18 +276,11 @@ struct igb_adapter {
243 int num_tx_queues; 276 int num_tx_queues;
244 int num_rx_queues; 277 int num_rx_queues;
245 278
246 u64 hw_csum_err;
247 u64 hw_csum_good;
248 u32 alloc_rx_buff_failed;
249 u32 gorc;
250 u64 gorc_old;
251 u16 rx_ps_hdr_size;
252 u32 max_frame_size; 279 u32 max_frame_size;
253 u32 min_frame_size; 280 u32 min_frame_size;
254 281
255 /* OS defined structs */ 282 /* OS defined structs */
256 struct net_device *netdev; 283 struct net_device *netdev;
257 struct napi_struct napi;
258 struct pci_dev *pdev; 284 struct pci_dev *pdev;
259 struct cyclecounter cycles; 285 struct cyclecounter cycles;
260 struct timecounter clock; 286 struct timecounter clock;
@@ -272,6 +298,9 @@ struct igb_adapter {
272 struct igb_ring test_rx_ring; 298 struct igb_ring test_rx_ring;
273 299
274 int msg_enable; 300 int msg_enable;
301
302 unsigned int num_q_vectors;
303 struct igb_q_vector *q_vector[MAX_Q_VECTORS];
275 struct msix_entry *msix_entries; 304 struct msix_entry *msix_entries;
276 u32 eims_enable_mask; 305 u32 eims_enable_mask;
277 u32 eims_other; 306 u32 eims_other;
@@ -282,8 +311,8 @@ struct igb_adapter {
282 u32 eeprom_wol; 311 u32 eeprom_wol;
283 312
284 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES]; 313 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
285 unsigned int tx_ring_count; 314 u16 tx_ring_count;
286 unsigned int rx_ring_count; 315 u16 rx_ring_count;
287 unsigned int vfs_allocated_count; 316 unsigned int vfs_allocated_count;
288 struct vf_data_storage *vf_data; 317 struct vf_data_storage *vf_data;
289}; 318};
@@ -291,9 +320,9 @@ struct igb_adapter {
291#define IGB_FLAG_HAS_MSI (1 << 0) 320#define IGB_FLAG_HAS_MSI (1 << 0)
292#define IGB_FLAG_DCA_ENABLED (1 << 1) 321#define IGB_FLAG_DCA_ENABLED (1 << 1)
293#define IGB_FLAG_QUAD_PORT_A (1 << 2) 322#define IGB_FLAG_QUAD_PORT_A (1 << 2)
294#define IGB_FLAG_NEED_CTX_IDX (1 << 3) 323#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
295#define IGB_FLAG_RX_CSUM_DISABLED (1 << 4)
296 324
325#define IGB_82576_TSYNC_SHIFT 19
297enum e1000_state_t { 326enum e1000_state_t {
298 __IGB_TESTING, 327 __IGB_TESTING,
299 __IGB_RESETTING, 328 __IGB_RESETTING,
@@ -313,10 +342,18 @@ extern void igb_down(struct igb_adapter *);
313extern void igb_reinit_locked(struct igb_adapter *); 342extern void igb_reinit_locked(struct igb_adapter *);
314extern void igb_reset(struct igb_adapter *); 343extern void igb_reset(struct igb_adapter *);
315extern int igb_set_spd_dplx(struct igb_adapter *, u16); 344extern int igb_set_spd_dplx(struct igb_adapter *, u16);
316extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *); 345extern int igb_setup_tx_resources(struct igb_ring *);
317extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *); 346extern int igb_setup_rx_resources(struct igb_ring *);
318extern void igb_free_tx_resources(struct igb_ring *); 347extern void igb_free_tx_resources(struct igb_ring *);
319extern void igb_free_rx_resources(struct igb_ring *); 348extern void igb_free_rx_resources(struct igb_ring *);
349extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
350extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
351extern void igb_setup_tctl(struct igb_adapter *);
352extern void igb_setup_rctl(struct igb_adapter *);
353extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
354extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
355 struct igb_buffer *);
356extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
320extern void igb_update_stats(struct igb_adapter *); 357extern void igb_update_stats(struct igb_adapter *);
321extern void igb_set_ethtool_ops(struct net_device *); 358extern void igb_set_ethtool_ops(struct net_device *);
322 359
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index a6da32f25a83..90b89a81f669 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -84,7 +84,6 @@ static const struct igb_stats igb_gstrings_stats[] = {
84 { "tx_single_coll_ok", IGB_STAT(stats.scc) }, 84 { "tx_single_coll_ok", IGB_STAT(stats.scc) },
85 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, 85 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
86 { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, 86 { "tx_timeout_count", IGB_STAT(tx_timeout_count) },
87 { "tx_restart_queue", IGB_STAT(restart_queue) },
88 { "rx_long_length_errors", IGB_STAT(stats.roc) }, 87 { "rx_long_length_errors", IGB_STAT(stats.roc) },
89 { "rx_short_length_errors", IGB_STAT(stats.ruc) }, 88 { "rx_short_length_errors", IGB_STAT(stats.ruc) },
90 { "rx_align_errors", IGB_STAT(stats.algnerrc) }, 89 { "rx_align_errors", IGB_STAT(stats.algnerrc) },
@@ -95,34 +94,32 @@ static const struct igb_stats igb_gstrings_stats[] = {
95 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, 94 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
96 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, 95 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
97 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 96 { "rx_long_byte_count", IGB_STAT(stats.gorc) },
98 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
99 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
100 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, 97 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
101 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
102 { "tx_smbus", IGB_STAT(stats.mgptc) }, 98 { "tx_smbus", IGB_STAT(stats.mgptc) },
103 { "rx_smbus", IGB_STAT(stats.mgprc) }, 99 { "rx_smbus", IGB_STAT(stats.mgprc) },
104 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 100 { "dropped_smbus", IGB_STAT(stats.mgpdc) },
105}; 101};
106 102
107#define IGB_QUEUE_STATS_LEN \ 103#define IGB_QUEUE_STATS_LEN \
108 (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \ 104 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
109 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \ 105 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
110 ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \ 106 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
111 (sizeof(struct igb_tx_queue_stats) / sizeof(u64)))) 107 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
112#define IGB_GLOBAL_STATS_LEN \ 108#define IGB_GLOBAL_STATS_LEN \
113 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) 109 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
114#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) 110#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
115static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { 111static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
116 "Register test (offline)", "Eeprom test (offline)", 112 "Register test (offline)", "Eeprom test (offline)",
117 "Interrupt test (offline)", "Loopback test (offline)", 113 "Interrupt test (offline)", "Loopback test (offline)",
118 "Link test (on/offline)" 114 "Link test (on/offline)"
119}; 115};
120#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN 116#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
121 117
122static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 118static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
123{ 119{
124 struct igb_adapter *adapter = netdev_priv(netdev); 120 struct igb_adapter *adapter = netdev_priv(netdev);
125 struct e1000_hw *hw = &adapter->hw; 121 struct e1000_hw *hw = &adapter->hw;
122 u32 status;
126 123
127 if (hw->phy.media_type == e1000_media_type_copper) { 124 if (hw->phy.media_type == e1000_media_type_copper) {
128 125
@@ -157,17 +154,20 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
157 154
158 ecmd->transceiver = XCVR_INTERNAL; 155 ecmd->transceiver = XCVR_INTERNAL;
159 156
160 if (rd32(E1000_STATUS) & E1000_STATUS_LU) { 157 status = rd32(E1000_STATUS);
161 158
162 adapter->hw.mac.ops.get_speed_and_duplex(hw, 159 if (status & E1000_STATUS_LU) {
163 &adapter->link_speed,
164 &adapter->link_duplex);
165 ecmd->speed = adapter->link_speed;
166 160
167 /* unfortunately FULL_DUPLEX != DUPLEX_FULL 161 if ((status & E1000_STATUS_SPEED_1000) ||
168 * and HALF_DUPLEX != DUPLEX_HALF */ 162 hw->phy.media_type != e1000_media_type_copper)
163 ecmd->speed = SPEED_1000;
164 else if (status & E1000_STATUS_SPEED_100)
165 ecmd->speed = SPEED_100;
166 else
167 ecmd->speed = SPEED_10;
169 168
170 if (adapter->link_duplex == FULL_DUPLEX) 169 if ((status & E1000_STATUS_FD) ||
170 hw->phy.media_type != e1000_media_type_copper)
171 ecmd->duplex = DUPLEX_FULL; 171 ecmd->duplex = DUPLEX_FULL;
172 else 172 else
173 ecmd->duplex = DUPLEX_HALF; 173 ecmd->duplex = DUPLEX_HALF;
@@ -258,8 +258,9 @@ static int igb_set_pauseparam(struct net_device *netdev,
258 if (netif_running(adapter->netdev)) { 258 if (netif_running(adapter->netdev)) {
259 igb_down(adapter); 259 igb_down(adapter);
260 igb_up(adapter); 260 igb_up(adapter);
261 } else 261 } else {
262 igb_reset(adapter); 262 igb_reset(adapter);
263 }
263 } else { 264 } else {
264 if (pause->rx_pause && pause->tx_pause) 265 if (pause->rx_pause && pause->tx_pause)
265 hw->fc.requested_mode = e1000_fc_full; 266 hw->fc.requested_mode = e1000_fc_full;
@@ -283,17 +284,20 @@ static int igb_set_pauseparam(struct net_device *netdev,
283static u32 igb_get_rx_csum(struct net_device *netdev) 284static u32 igb_get_rx_csum(struct net_device *netdev)
284{ 285{
285 struct igb_adapter *adapter = netdev_priv(netdev); 286 struct igb_adapter *adapter = netdev_priv(netdev);
286 return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED); 287 return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM);
287} 288}
288 289
289static int igb_set_rx_csum(struct net_device *netdev, u32 data) 290static int igb_set_rx_csum(struct net_device *netdev, u32 data)
290{ 291{
291 struct igb_adapter *adapter = netdev_priv(netdev); 292 struct igb_adapter *adapter = netdev_priv(netdev);
293 int i;
292 294
293 if (data) 295 for (i = 0; i < adapter->num_rx_queues; i++) {
294 adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED; 296 if (data)
295 else 297 adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM;
296 adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED; 298 else
299 adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM;
300 }
297 301
298 return 0; 302 return 0;
299} 303}
@@ -309,7 +313,7 @@ static int igb_set_tx_csum(struct net_device *netdev, u32 data)
309 313
310 if (data) { 314 if (data) {
311 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 315 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
312 if (adapter->hw.mac.type == e1000_82576) 316 if (adapter->hw.mac.type >= e1000_82576)
313 netdev->features |= NETIF_F_SCTP_CSUM; 317 netdev->features |= NETIF_F_SCTP_CSUM;
314 } else { 318 } else {
315 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 319 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -503,19 +507,10 @@ static void igb_get_regs(struct net_device *netdev,
503 regs_buff[119] = adapter->stats.scvpc; 507 regs_buff[119] = adapter->stats.scvpc;
504 regs_buff[120] = adapter->stats.hrmpc; 508 regs_buff[120] = adapter->stats.hrmpc;
505 509
506 /* These should probably be added to e1000_regs.h instead */
507 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
508 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
509 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
510 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
511 #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
512 #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
513 #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
514
515 for (i = 0; i < 4; i++) 510 for (i = 0; i < 4; i++)
516 regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); 511 regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
517 for (i = 0; i < 4; i++) 512 for (i = 0; i < 4; i++)
518 regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i)); 513 regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
519 for (i = 0; i < 4; i++) 514 for (i = 0; i < 4; i++)
520 regs_buff[129 + i] = rd32(E1000_RDBAL(i)); 515 regs_buff[129 + i] = rd32(E1000_RDBAL(i));
521 for (i = 0; i < 4; i++) 516 for (i = 0; i < 4; i++)
@@ -739,18 +734,18 @@ static int igb_set_ringparam(struct net_device *netdev,
739{ 734{
740 struct igb_adapter *adapter = netdev_priv(netdev); 735 struct igb_adapter *adapter = netdev_priv(netdev);
741 struct igb_ring *temp_ring; 736 struct igb_ring *temp_ring;
742 int i, err; 737 int i, err = 0;
743 u32 new_rx_count, new_tx_count; 738 u32 new_rx_count, new_tx_count;
744 739
745 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 740 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
746 return -EINVAL; 741 return -EINVAL;
747 742
748 new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD); 743 new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
749 new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD); 744 new_rx_count = max(new_rx_count, (u32)IGB_MIN_RXD);
750 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 745 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
751 746
752 new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD); 747 new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
753 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); 748 new_tx_count = max(new_tx_count, (u32)IGB_MIN_TXD);
754 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 749 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
755 750
756 if ((new_tx_count == adapter->tx_ring_count) && 751 if ((new_tx_count == adapter->tx_ring_count) &&
@@ -759,18 +754,30 @@ static int igb_set_ringparam(struct net_device *netdev,
759 return 0; 754 return 0;
760 } 755 }
761 756
757 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
758 msleep(1);
759
760 if (!netif_running(adapter->netdev)) {
761 for (i = 0; i < adapter->num_tx_queues; i++)
762 adapter->tx_ring[i].count = new_tx_count;
763 for (i = 0; i < adapter->num_rx_queues; i++)
764 adapter->rx_ring[i].count = new_rx_count;
765 adapter->tx_ring_count = new_tx_count;
766 adapter->rx_ring_count = new_rx_count;
767 goto clear_reset;
768 }
769
762 if (adapter->num_tx_queues > adapter->num_rx_queues) 770 if (adapter->num_tx_queues > adapter->num_rx_queues)
763 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); 771 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
764 else 772 else
765 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); 773 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
766 if (!temp_ring)
767 return -ENOMEM;
768 774
769 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 775 if (!temp_ring) {
770 msleep(1); 776 err = -ENOMEM;
777 goto clear_reset;
778 }
771 779
772 if (netif_running(adapter->netdev)) 780 igb_down(adapter);
773 igb_down(adapter);
774 781
775 /* 782 /*
776 * We can't just free everything and then setup again, 783 * We can't just free everything and then setup again,
@@ -783,7 +790,7 @@ static int igb_set_ringparam(struct net_device *netdev,
783 790
784 for (i = 0; i < adapter->num_tx_queues; i++) { 791 for (i = 0; i < adapter->num_tx_queues; i++) {
785 temp_ring[i].count = new_tx_count; 792 temp_ring[i].count = new_tx_count;
786 err = igb_setup_tx_resources(adapter, &temp_ring[i]); 793 err = igb_setup_tx_resources(&temp_ring[i]);
787 if (err) { 794 if (err) {
788 while (i) { 795 while (i) {
789 i--; 796 i--;
@@ -808,7 +815,7 @@ static int igb_set_ringparam(struct net_device *netdev,
808 815
809 for (i = 0; i < adapter->num_rx_queues; i++) { 816 for (i = 0; i < adapter->num_rx_queues; i++) {
810 temp_ring[i].count = new_rx_count; 817 temp_ring[i].count = new_rx_count;
811 err = igb_setup_rx_resources(adapter, &temp_ring[i]); 818 err = igb_setup_rx_resources(&temp_ring[i]);
812 if (err) { 819 if (err) {
813 while (i) { 820 while (i) {
814 i--; 821 i--;
@@ -827,14 +834,11 @@ static int igb_set_ringparam(struct net_device *netdev,
827 834
828 adapter->rx_ring_count = new_rx_count; 835 adapter->rx_ring_count = new_rx_count;
829 } 836 }
830
831 err = 0;
832err_setup: 837err_setup:
833 if (netif_running(adapter->netdev)) 838 igb_up(adapter);
834 igb_up(adapter);
835
836 clear_bit(__IGB_RESETTING, &adapter->state);
837 vfree(temp_ring); 839 vfree(temp_ring);
840clear_reset:
841 clear_bit(__IGB_RESETTING, &adapter->state);
838 return err; 842 return err;
839} 843}
840 844
@@ -942,7 +946,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
942{ 946{
943 struct e1000_hw *hw = &adapter->hw; 947 struct e1000_hw *hw = &adapter->hw;
944 u32 pat, val; 948 u32 pat, val;
945 u32 _test[] = 949 static const u32 _test[] =
946 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 950 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
947 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 951 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
948 wr32(reg, (_test[pat] & write)); 952 wr32(reg, (_test[pat] & write));
@@ -955,6 +959,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
955 return 1; 959 return 1;
956 } 960 }
957 } 961 }
962
958 return 0; 963 return 0;
959} 964}
960 965
@@ -972,6 +977,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
972 *data = reg; 977 *data = reg;
973 return 1; 978 return 1;
974 } 979 }
980
975 return 0; 981 return 0;
976} 982}
977 983
@@ -994,14 +1000,14 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
994 u32 value, before, after; 1000 u32 value, before, after;
995 u32 i, toggle; 1001 u32 i, toggle;
996 1002
997 toggle = 0x7FFFF3FF;
998
999 switch (adapter->hw.mac.type) { 1003 switch (adapter->hw.mac.type) {
1000 case e1000_82576: 1004 case e1000_82576:
1001 test = reg_test_82576; 1005 test = reg_test_82576;
1006 toggle = 0x7FFFF3FF;
1002 break; 1007 break;
1003 default: 1008 default:
1004 test = reg_test_82575; 1009 test = reg_test_82575;
1010 toggle = 0x7FFFF3FF;
1005 break; 1011 break;
1006 } 1012 }
1007 1013
@@ -1079,8 +1085,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1079 *data = 0; 1085 *data = 0;
1080 /* Read and add up the contents of the EEPROM */ 1086 /* Read and add up the contents of the EEPROM */
1081 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 1087 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1082 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) 1088 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
1083 < 0) {
1084 *data = 1; 1089 *data = 1;
1085 break; 1090 break;
1086 } 1091 }
@@ -1096,8 +1101,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1096 1101
1097static irqreturn_t igb_test_intr(int irq, void *data) 1102static irqreturn_t igb_test_intr(int irq, void *data)
1098{ 1103{
1099 struct net_device *netdev = (struct net_device *) data; 1104 struct igb_adapter *adapter = (struct igb_adapter *) data;
1100 struct igb_adapter *adapter = netdev_priv(netdev);
1101 struct e1000_hw *hw = &adapter->hw; 1105 struct e1000_hw *hw = &adapter->hw;
1102 1106
1103 adapter->test_icr |= rd32(E1000_ICR); 1107 adapter->test_icr |= rd32(E1000_ICR);
@@ -1115,32 +1119,36 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1115 *data = 0; 1119 *data = 0;
1116 1120
1117 /* Hook up test interrupt handler just for this test */ 1121 /* Hook up test interrupt handler just for this test */
1118 if (adapter->msix_entries) 1122 if (adapter->msix_entries) {
1119 /* NOTE: we don't test MSI-X interrupts here, yet */ 1123 if (request_irq(adapter->msix_entries[0].vector,
1120 return 0; 1124 &igb_test_intr, 0, netdev->name, adapter)) {
1121 1125 *data = 1;
1122 if (adapter->flags & IGB_FLAG_HAS_MSI) { 1126 return -1;
1127 }
1128 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1123 shared_int = false; 1129 shared_int = false;
1124 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1130 if (request_irq(irq,
1131 &igb_test_intr, 0, netdev->name, adapter)) {
1125 *data = 1; 1132 *data = 1;
1126 return -1; 1133 return -1;
1127 } 1134 }
1128 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED, 1135 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
1129 netdev->name, netdev)) { 1136 netdev->name, adapter)) {
1130 shared_int = false; 1137 shared_int = false;
1131 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, 1138 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
1132 netdev->name, netdev)) { 1139 netdev->name, adapter)) {
1133 *data = 1; 1140 *data = 1;
1134 return -1; 1141 return -1;
1135 } 1142 }
1136 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", 1143 dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1137 (shared_int ? "shared" : "unshared")); 1144 (shared_int ? "shared" : "unshared"));
1145
1138 /* Disable all the interrupts */ 1146 /* Disable all the interrupts */
1139 wr32(E1000_IMC, 0xFFFFFFFF); 1147 wr32(E1000_IMC, ~0);
1140 msleep(10); 1148 msleep(10);
1141 1149
1142 /* Define all writable bits for ICS */ 1150 /* Define all writable bits for ICS */
1143 switch(hw->mac.type) { 1151 switch (hw->mac.type) {
1144 case e1000_82575: 1152 case e1000_82575:
1145 ics_mask = 0x37F47EDD; 1153 ics_mask = 0x37F47EDD;
1146 break; 1154 break;
@@ -1230,190 +1238,61 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1230 msleep(10); 1238 msleep(10);
1231 1239
1232 /* Unhook test interrupt handler */ 1240 /* Unhook test interrupt handler */
1233 free_irq(irq, netdev); 1241 if (adapter->msix_entries)
1242 free_irq(adapter->msix_entries[0].vector, adapter);
1243 else
1244 free_irq(irq, adapter);
1234 1245
1235 return *data; 1246 return *data;
1236} 1247}
1237 1248
1238static void igb_free_desc_rings(struct igb_adapter *adapter) 1249static void igb_free_desc_rings(struct igb_adapter *adapter)
1239{ 1250{
1240 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1251 igb_free_tx_resources(&adapter->test_tx_ring);
1241 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1252 igb_free_rx_resources(&adapter->test_rx_ring);
1242 struct pci_dev *pdev = adapter->pdev;
1243 int i;
1244
1245 if (tx_ring->desc && tx_ring->buffer_info) {
1246 for (i = 0; i < tx_ring->count; i++) {
1247 struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
1248 if (buf->dma)
1249 pci_unmap_single(pdev, buf->dma, buf->length,
1250 PCI_DMA_TODEVICE);
1251 if (buf->skb)
1252 dev_kfree_skb(buf->skb);
1253 }
1254 }
1255
1256 if (rx_ring->desc && rx_ring->buffer_info) {
1257 for (i = 0; i < rx_ring->count; i++) {
1258 struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
1259 if (buf->dma)
1260 pci_unmap_single(pdev, buf->dma,
1261 IGB_RXBUFFER_2048,
1262 PCI_DMA_FROMDEVICE);
1263 if (buf->skb)
1264 dev_kfree_skb(buf->skb);
1265 }
1266 }
1267
1268 if (tx_ring->desc) {
1269 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
1270 tx_ring->dma);
1271 tx_ring->desc = NULL;
1272 }
1273 if (rx_ring->desc) {
1274 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
1275 rx_ring->dma);
1276 rx_ring->desc = NULL;
1277 }
1278
1279 kfree(tx_ring->buffer_info);
1280 tx_ring->buffer_info = NULL;
1281 kfree(rx_ring->buffer_info);
1282 rx_ring->buffer_info = NULL;
1283
1284 return;
1285} 1253}
1286 1254
1287static int igb_setup_desc_rings(struct igb_adapter *adapter) 1255static int igb_setup_desc_rings(struct igb_adapter *adapter)
1288{ 1256{
1289 struct e1000_hw *hw = &adapter->hw;
1290 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1257 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1291 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1258 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1292 struct pci_dev *pdev = adapter->pdev; 1259 struct e1000_hw *hw = &adapter->hw;
1293 struct igb_buffer *buffer_info; 1260 int ret_val;
1294 u32 rctl;
1295 int i, ret_val;
1296 1261
1297 /* Setup Tx descriptor ring and Tx buffers */ 1262 /* Setup Tx descriptor ring and Tx buffers */
1263 tx_ring->count = IGB_DEFAULT_TXD;
1264 tx_ring->pdev = adapter->pdev;
1265 tx_ring->netdev = adapter->netdev;
1266 tx_ring->reg_idx = adapter->vfs_allocated_count;
1298 1267
1299 if (!tx_ring->count) 1268 if (igb_setup_tx_resources(tx_ring)) {
1300 tx_ring->count = IGB_DEFAULT_TXD;
1301
1302 tx_ring->buffer_info = kcalloc(tx_ring->count,
1303 sizeof(struct igb_buffer),
1304 GFP_KERNEL);
1305 if (!tx_ring->buffer_info) {
1306 ret_val = 1; 1269 ret_val = 1;
1307 goto err_nomem; 1270 goto err_nomem;
1308 } 1271 }
1309 1272
1310 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1273 igb_setup_tctl(adapter);
1311 tx_ring->size = ALIGN(tx_ring->size, 4096); 1274 igb_configure_tx_ring(adapter, tx_ring);
1312 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1313 &tx_ring->dma);
1314 if (!tx_ring->desc) {
1315 ret_val = 2;
1316 goto err_nomem;
1317 }
1318 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1319
1320 wr32(E1000_TDBAL(0),
1321 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1322 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
1323 wr32(E1000_TDLEN(0),
1324 tx_ring->count * sizeof(union e1000_adv_tx_desc));
1325 wr32(E1000_TDH(0), 0);
1326 wr32(E1000_TDT(0), 0);
1327 wr32(E1000_TCTL,
1328 E1000_TCTL_PSP | E1000_TCTL_EN |
1329 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1330 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1331
1332 for (i = 0; i < tx_ring->count; i++) {
1333 union e1000_adv_tx_desc *tx_desc;
1334 struct sk_buff *skb;
1335 unsigned int size = 1024;
1336
1337 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
1338 skb = alloc_skb(size, GFP_KERNEL);
1339 if (!skb) {
1340 ret_val = 3;
1341 goto err_nomem;
1342 }
1343 skb_put(skb, size);
1344 buffer_info = &tx_ring->buffer_info[i];
1345 buffer_info->skb = skb;
1346 buffer_info->length = skb->len;
1347 buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
1348 PCI_DMA_TODEVICE);
1349 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
1350 tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
1351 E1000_ADVTXD_PAYLEN_SHIFT;
1352 tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
1353 tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
1354 E1000_TXD_CMD_IFCS |
1355 E1000_TXD_CMD_RS |
1356 E1000_ADVTXD_DTYP_DATA |
1357 E1000_ADVTXD_DCMD_DEXT);
1358 }
1359 1275
1360 /* Setup Rx descriptor ring and Rx buffers */ 1276 /* Setup Rx descriptor ring and Rx buffers */
1361 1277 rx_ring->count = IGB_DEFAULT_RXD;
1362 if (!rx_ring->count) 1278 rx_ring->pdev = adapter->pdev;
1363 rx_ring->count = IGB_DEFAULT_RXD; 1279 rx_ring->netdev = adapter->netdev;
1364 1280 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
1365 rx_ring->buffer_info = kcalloc(rx_ring->count, 1281 rx_ring->reg_idx = adapter->vfs_allocated_count;
1366 sizeof(struct igb_buffer), 1282
1367 GFP_KERNEL); 1283 if (igb_setup_rx_resources(rx_ring)) {
1368 if (!rx_ring->buffer_info) { 1284 ret_val = 3;
1369 ret_val = 4;
1370 goto err_nomem; 1285 goto err_nomem;
1371 } 1286 }
1372 1287
1373 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1288 /* set the default queue to queue 0 of PF */
1374 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1289 wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
1375 &rx_ring->dma);
1376 if (!rx_ring->desc) {
1377 ret_val = 5;
1378 goto err_nomem;
1379 }
1380 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1381 1290
1382 rctl = rd32(E1000_RCTL); 1291 /* enable receive ring */
1383 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); 1292 igb_setup_rctl(adapter);
1384 wr32(E1000_RDBAL(0), 1293 igb_configure_rx_ring(adapter, rx_ring);
1385 ((u64) rx_ring->dma & 0xFFFFFFFF)); 1294
1386 wr32(E1000_RDBAH(0), 1295 igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
1387 ((u64) rx_ring->dma >> 32));
1388 wr32(E1000_RDLEN(0), rx_ring->size);
1389 wr32(E1000_RDH(0), 0);
1390 wr32(E1000_RDT(0), 0);
1391 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1392 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1393 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1394 wr32(E1000_RCTL, rctl);
1395 wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
1396
1397 for (i = 0; i < rx_ring->count; i++) {
1398 union e1000_adv_rx_desc *rx_desc;
1399 struct sk_buff *skb;
1400
1401 buffer_info = &rx_ring->buffer_info[i];
1402 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
1403 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
1404 GFP_KERNEL);
1405 if (!skb) {
1406 ret_val = 6;
1407 goto err_nomem;
1408 }
1409 skb_reserve(skb, NET_IP_ALIGN);
1410 buffer_info->skb = skb;
1411 buffer_info->dma = pci_map_single(pdev, skb->data,
1412 IGB_RXBUFFER_2048,
1413 PCI_DMA_FROMDEVICE);
1414 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
1415 memset(skb->data, 0x00, skb->len);
1416 }
1417 1296
1418 return 0; 1297 return 0;
1419 1298
@@ -1489,7 +1368,10 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1489 struct e1000_hw *hw = &adapter->hw; 1368 struct e1000_hw *hw = &adapter->hw;
1490 u32 reg; 1369 u32 reg;
1491 1370
1492 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1371 reg = rd32(E1000_CTRL_EXT);
1372
1373 /* use CTRL_EXT to identify link type as SGMII can appear as copper */
1374 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1493 reg = rd32(E1000_RCTL); 1375 reg = rd32(E1000_RCTL);
1494 reg |= E1000_RCTL_LBM_TCVR; 1376 reg |= E1000_RCTL_LBM_TCVR;
1495 wr32(E1000_RCTL, reg); 1377 wr32(E1000_RCTL, reg);
@@ -1520,11 +1402,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1520 wr32(E1000_PCS_LCTL, reg); 1402 wr32(E1000_PCS_LCTL, reg);
1521 1403
1522 return 0; 1404 return 0;
1523 } else if (hw->phy.media_type == e1000_media_type_copper) {
1524 return igb_set_phy_loopback(adapter);
1525 } 1405 }
1526 1406
1527 return 7; 1407 return igb_set_phy_loopback(adapter);
1528} 1408}
1529 1409
1530static void igb_loopback_cleanup(struct igb_adapter *adapter) 1410static void igb_loopback_cleanup(struct igb_adapter *adapter)
@@ -1550,35 +1430,99 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
1550 unsigned int frame_size) 1430 unsigned int frame_size)
1551{ 1431{
1552 memset(skb->data, 0xFF, frame_size); 1432 memset(skb->data, 0xFF, frame_size);
1553 frame_size &= ~1; 1433 frame_size /= 2;
1554 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1434 memset(&skb->data[frame_size], 0xAA, frame_size - 1);
1555 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1435 memset(&skb->data[frame_size + 10], 0xBE, 1);
1556 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1436 memset(&skb->data[frame_size + 12], 0xAF, 1);
1557} 1437}
1558 1438
1559static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1439static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1560{ 1440{
1561 frame_size &= ~1; 1441 frame_size /= 2;
1562 if (*(skb->data + 3) == 0xFF) 1442 if (*(skb->data + 3) == 0xFF) {
1563 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1443 if ((*(skb->data + frame_size + 10) == 0xBE) &&
1564 (*(skb->data + frame_size / 2 + 12) == 0xAF)) 1444 (*(skb->data + frame_size + 12) == 0xAF)) {
1565 return 0; 1445 return 0;
1446 }
1447 }
1566 return 13; 1448 return 13;
1567} 1449}
1568 1450
1451static int igb_clean_test_rings(struct igb_ring *rx_ring,
1452 struct igb_ring *tx_ring,
1453 unsigned int size)
1454{
1455 union e1000_adv_rx_desc *rx_desc;
1456 struct igb_buffer *buffer_info;
1457 int rx_ntc, tx_ntc, count = 0;
1458 u32 staterr;
1459
1460 /* initialize next to clean and descriptor values */
1461 rx_ntc = rx_ring->next_to_clean;
1462 tx_ntc = tx_ring->next_to_clean;
1463 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1464 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1465
1466 while (staterr & E1000_RXD_STAT_DD) {
1467 /* check rx buffer */
1468 buffer_info = &rx_ring->buffer_info[rx_ntc];
1469
1470 /* unmap rx buffer, will be remapped by alloc_rx_buffers */
1471 pci_unmap_single(rx_ring->pdev,
1472 buffer_info->dma,
1473 rx_ring->rx_buffer_len,
1474 PCI_DMA_FROMDEVICE);
1475 buffer_info->dma = 0;
1476
1477 /* verify contents of skb */
1478 if (!igb_check_lbtest_frame(buffer_info->skb, size))
1479 count++;
1480
1481 /* unmap buffer on tx side */
1482 buffer_info = &tx_ring->buffer_info[tx_ntc];
1483 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
1484
1485 /* increment rx/tx next to clean counters */
1486 rx_ntc++;
1487 if (rx_ntc == rx_ring->count)
1488 rx_ntc = 0;
1489 tx_ntc++;
1490 if (tx_ntc == tx_ring->count)
1491 tx_ntc = 0;
1492
1493 /* fetch next descriptor */
1494 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1495 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1496 }
1497
1498 /* re-map buffers to ring, store next to clean values */
1499 igb_alloc_rx_buffers_adv(rx_ring, count);
1500 rx_ring->next_to_clean = rx_ntc;
1501 tx_ring->next_to_clean = tx_ntc;
1502
1503 return count;
1504}
1505
1569static int igb_run_loopback_test(struct igb_adapter *adapter) 1506static int igb_run_loopback_test(struct igb_adapter *adapter)
1570{ 1507{
1571 struct e1000_hw *hw = &adapter->hw;
1572 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1508 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1573 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1509 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1574 struct pci_dev *pdev = adapter->pdev; 1510 int i, j, lc, good_cnt, ret_val = 0;
1575 int i, j, k, l, lc, good_cnt; 1511 unsigned int size = 1024;
1576 int ret_val = 0; 1512 netdev_tx_t tx_ret_val;
1577 unsigned long time; 1513 struct sk_buff *skb;
1578 1514
1579 wr32(E1000_RDT(0), rx_ring->count - 1); 1515 /* allocate test skb */
1516 skb = alloc_skb(size, GFP_KERNEL);
1517 if (!skb)
1518 return 11;
1580 1519
1581 /* Calculate the loop count based on the largest descriptor ring 1520 /* place data into test skb */
1521 igb_create_lbtest_frame(skb, size);
1522 skb_put(skb, size);
1523
1524 /*
1525 * Calculate the loop count based on the largest descriptor ring
1582 * The idea is to wrap the largest ring a number of times using 64 1526 * The idea is to wrap the largest ring a number of times using 64
1583 * send/receive pairs during each loop 1527 * send/receive pairs during each loop
1584 */ 1528 */
@@ -1588,50 +1532,36 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1588 else 1532 else
1589 lc = ((rx_ring->count / 64) * 2) + 1; 1533 lc = ((rx_ring->count / 64) * 2) + 1;
1590 1534
1591 k = l = 0;
1592 for (j = 0; j <= lc; j++) { /* loop count loop */ 1535 for (j = 0; j <= lc; j++) { /* loop count loop */
1593 for (i = 0; i < 64; i++) { /* send the packets */ 1536 /* reset count of good packets */
1594 igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1595 1024);
1596 pci_dma_sync_single_for_device(pdev,
1597 tx_ring->buffer_info[k].dma,
1598 tx_ring->buffer_info[k].length,
1599 PCI_DMA_TODEVICE);
1600 k++;
1601 if (k == tx_ring->count)
1602 k = 0;
1603 }
1604 wr32(E1000_TDT(0), k);
1605 msleep(200);
1606 time = jiffies; /* set the start time for the receive */
1607 good_cnt = 0; 1537 good_cnt = 0;
1608 do { /* receive the sent packets */ 1538
1609 pci_dma_sync_single_for_cpu(pdev, 1539 /* place 64 packets on the transmit queue*/
1610 rx_ring->buffer_info[l].dma, 1540 for (i = 0; i < 64; i++) {
1611 IGB_RXBUFFER_2048, 1541 skb_get(skb);
1612 PCI_DMA_FROMDEVICE); 1542 tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
1613 1543 if (tx_ret_val == NETDEV_TX_OK)
1614 ret_val = igb_check_lbtest_frame(
1615 rx_ring->buffer_info[l].skb, 1024);
1616 if (!ret_val)
1617 good_cnt++; 1544 good_cnt++;
1618 l++; 1545 }
1619 if (l == rx_ring->count) 1546
1620 l = 0;
1621 /* time + 20 msecs (200 msecs on 2.4) is more than
1622 * enough time to complete the receives, if it's
1623 * exceeded, break and error off
1624 */
1625 } while (good_cnt < 64 && jiffies < (time + 20));
1626 if (good_cnt != 64) { 1547 if (good_cnt != 64) {
1627 ret_val = 13; /* ret_val is the same as mis-compare */ 1548 ret_val = 12;
1628 break; 1549 break;
1629 } 1550 }
1630 if (jiffies >= (time + 20)) { 1551
1631 ret_val = 14; /* error code for time out error */ 1552 /* allow 200 milliseconds for packets to go from tx to rx */
1553 msleep(200);
1554
1555 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
1556 if (good_cnt != 64) {
1557 ret_val = 13;
1632 break; 1558 break;
1633 } 1559 }
1634 } /* end loop count loop */ 1560 } /* end loop count loop */
1561
1562 /* free the original skb */
1563 kfree_skb(skb);
1564
1635 return ret_val; 1565 return ret_val;
1636} 1566}
1637 1567
@@ -1684,8 +1614,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1684 if (hw->mac.autoneg) 1614 if (hw->mac.autoneg)
1685 msleep(4000); 1615 msleep(4000);
1686 1616
1687 if (!(rd32(E1000_STATUS) & 1617 if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
1688 E1000_STATUS_LU))
1689 *data = 1; 1618 *data = 1;
1690 } 1619 }
1691 return *data; 1620 return *data;
@@ -1867,7 +1796,6 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1867 adapter->wol |= E1000_WUFC_BC; 1796 adapter->wol |= E1000_WUFC_BC;
1868 if (wol->wolopts & WAKE_MAGIC) 1797 if (wol->wolopts & WAKE_MAGIC)
1869 adapter->wol |= E1000_WUFC_MAG; 1798 adapter->wol |= E1000_WUFC_MAG;
1870
1871 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1799 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1872 1800
1873 return 0; 1801 return 0;
@@ -1880,12 +1808,19 @@ static int igb_phys_id(struct net_device *netdev, u32 data)
1880{ 1808{
1881 struct igb_adapter *adapter = netdev_priv(netdev); 1809 struct igb_adapter *adapter = netdev_priv(netdev);
1882 struct e1000_hw *hw = &adapter->hw; 1810 struct e1000_hw *hw = &adapter->hw;
1811 unsigned long timeout;
1812
1813 timeout = data * 1000;
1883 1814
1884 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 1815 /*
1885 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 1816 * msleep_interruptable only accepts unsigned int so we are limited
1817 * in how long a duration we can wait
1818 */
1819 if (!timeout || timeout > UINT_MAX)
1820 timeout = UINT_MAX;
1886 1821
1887 igb_blink_led(hw); 1822 igb_blink_led(hw);
1888 msleep_interruptible(data * 1000); 1823 msleep_interruptible(timeout);
1889 1824
1890 igb_led_off(hw); 1825 igb_led_off(hw);
1891 clear_bit(IGB_LED_ON, &adapter->led_status); 1826 clear_bit(IGB_LED_ON, &adapter->led_status);
@@ -1898,7 +1833,6 @@ static int igb_set_coalesce(struct net_device *netdev,
1898 struct ethtool_coalesce *ec) 1833 struct ethtool_coalesce *ec)
1899{ 1834{
1900 struct igb_adapter *adapter = netdev_priv(netdev); 1835 struct igb_adapter *adapter = netdev_priv(netdev);
1901 struct e1000_hw *hw = &adapter->hw;
1902 int i; 1836 int i;
1903 1837
1904 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1838 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
@@ -1907,17 +1841,39 @@ static int igb_set_coalesce(struct net_device *netdev,
1907 (ec->rx_coalesce_usecs == 2)) 1841 (ec->rx_coalesce_usecs == 2))
1908 return -EINVAL; 1842 return -EINVAL;
1909 1843
1844 if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1845 ((ec->tx_coalesce_usecs > 3) &&
1846 (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1847 (ec->tx_coalesce_usecs == 2))
1848 return -EINVAL;
1849
1850 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
1851 return -EINVAL;
1852
1910 /* convert to rate of irq's per second */ 1853 /* convert to rate of irq's per second */
1911 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { 1854 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
1912 adapter->itr_setting = ec->rx_coalesce_usecs; 1855 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
1913 adapter->itr = IGB_START_ITR; 1856 else
1914 } else { 1857 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
1915 adapter->itr_setting = ec->rx_coalesce_usecs << 2; 1858
1916 adapter->itr = adapter->itr_setting; 1859 /* convert to rate of irq's per second */
1917 } 1860 if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
1861 adapter->tx_itr_setting = adapter->rx_itr_setting;
1862 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
1863 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
1864 else
1865 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
1918 1866
1919 for (i = 0; i < adapter->num_rx_queues; i++) 1867 for (i = 0; i < adapter->num_q_vectors; i++) {
1920 wr32(adapter->rx_ring[i].itr_register, adapter->itr); 1868 struct igb_q_vector *q_vector = adapter->q_vector[i];
1869 if (q_vector->rx_ring)
1870 q_vector->itr_val = adapter->rx_itr_setting;
1871 else
1872 q_vector->itr_val = adapter->tx_itr_setting;
1873 if (q_vector->itr_val && q_vector->itr_val <= 3)
1874 q_vector->itr_val = IGB_START_ITR;
1875 q_vector->set_itr = 1;
1876 }
1921 1877
1922 return 0; 1878 return 0;
1923} 1879}
@@ -1927,15 +1883,21 @@ static int igb_get_coalesce(struct net_device *netdev,
1927{ 1883{
1928 struct igb_adapter *adapter = netdev_priv(netdev); 1884 struct igb_adapter *adapter = netdev_priv(netdev);
1929 1885
1930 if (adapter->itr_setting <= 3) 1886 if (adapter->rx_itr_setting <= 3)
1931 ec->rx_coalesce_usecs = adapter->itr_setting; 1887 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
1932 else 1888 else
1933 ec->rx_coalesce_usecs = adapter->itr_setting >> 2; 1889 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
1890
1891 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
1892 if (adapter->tx_itr_setting <= 3)
1893 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
1894 else
1895 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
1896 }
1934 1897
1935 return 0; 1898 return 0;
1936} 1899}
1937 1900
1938
1939static int igb_nway_reset(struct net_device *netdev) 1901static int igb_nway_reset(struct net_device *netdev)
1940{ 1902{
1941 struct igb_adapter *adapter = netdev_priv(netdev); 1903 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -1968,6 +1930,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1968 char *p = NULL; 1930 char *p = NULL;
1969 1931
1970 igb_update_stats(adapter); 1932 igb_update_stats(adapter);
1933
1971 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1934 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1972 switch (igb_gstrings_stats[i].type) { 1935 switch (igb_gstrings_stats[i].type) {
1973 case NETDEV_STATS: 1936 case NETDEV_STATS:
@@ -2021,6 +1984,8 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2021 p += ETH_GSTRING_LEN; 1984 p += ETH_GSTRING_LEN;
2022 sprintf(p, "tx_queue_%u_bytes", i); 1985 sprintf(p, "tx_queue_%u_bytes", i);
2023 p += ETH_GSTRING_LEN; 1986 p += ETH_GSTRING_LEN;
1987 sprintf(p, "tx_queue_%u_restart", i);
1988 p += ETH_GSTRING_LEN;
2024 } 1989 }
2025 for (i = 0; i < adapter->num_rx_queues; i++) { 1990 for (i = 0; i < adapter->num_rx_queues; i++) {
2026 sprintf(p, "rx_queue_%u_packets", i); 1991 sprintf(p, "rx_queue_%u_packets", i);
@@ -2029,6 +1994,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2029 p += ETH_GSTRING_LEN; 1994 p += ETH_GSTRING_LEN;
2030 sprintf(p, "rx_queue_%u_drops", i); 1995 sprintf(p, "rx_queue_%u_drops", i);
2031 p += ETH_GSTRING_LEN; 1996 p += ETH_GSTRING_LEN;
1997 sprintf(p, "rx_queue_%u_csum_err", i);
1998 p += ETH_GSTRING_LEN;
1999 sprintf(p, "rx_queue_%u_alloc_failed", i);
2000 p += ETH_GSTRING_LEN;
2032 } 2001 }
2033/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 2002/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2034 break; 2003 break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 2ffe0997b838..b044c985df0b 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -82,6 +82,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
82static int igb_setup_all_rx_resources(struct igb_adapter *); 82static int igb_setup_all_rx_resources(struct igb_adapter *);
83static void igb_free_all_tx_resources(struct igb_adapter *); 83static void igb_free_all_tx_resources(struct igb_adapter *);
84static void igb_free_all_rx_resources(struct igb_adapter *); 84static void igb_free_all_rx_resources(struct igb_adapter *);
85static void igb_setup_mrqc(struct igb_adapter *);
85void igb_update_stats(struct igb_adapter *); 86void igb_update_stats(struct igb_adapter *);
86static int igb_probe(struct pci_dev *, const struct pci_device_id *); 87static int igb_probe(struct pci_dev *, const struct pci_device_id *);
87static void __devexit igb_remove(struct pci_dev *pdev); 88static void __devexit igb_remove(struct pci_dev *pdev);
@@ -90,7 +91,6 @@ static int igb_open(struct net_device *);
90static int igb_close(struct net_device *); 91static int igb_close(struct net_device *);
91static void igb_configure_tx(struct igb_adapter *); 92static void igb_configure_tx(struct igb_adapter *);
92static void igb_configure_rx(struct igb_adapter *); 93static void igb_configure_rx(struct igb_adapter *);
93static void igb_setup_rctl(struct igb_adapter *);
94static void igb_clean_all_tx_rings(struct igb_adapter *); 94static void igb_clean_all_tx_rings(struct igb_adapter *);
95static void igb_clean_all_rx_rings(struct igb_adapter *); 95static void igb_clean_all_rx_rings(struct igb_adapter *);
96static void igb_clean_tx_ring(struct igb_ring *); 96static void igb_clean_tx_ring(struct igb_ring *);
@@ -99,11 +99,7 @@ static void igb_set_rx_mode(struct net_device *);
99static void igb_update_phy_info(unsigned long); 99static void igb_update_phy_info(unsigned long);
100static void igb_watchdog(unsigned long); 100static void igb_watchdog(unsigned long);
101static void igb_watchdog_task(struct work_struct *); 101static void igb_watchdog_task(struct work_struct *);
102static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, 102static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
103 struct net_device *,
104 struct igb_ring *);
105static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
106 struct net_device *);
107static struct net_device_stats *igb_get_stats(struct net_device *); 103static struct net_device_stats *igb_get_stats(struct net_device *);
108static int igb_change_mtu(struct net_device *, int); 104static int igb_change_mtu(struct net_device *, int);
109static int igb_set_mac(struct net_device *, void *); 105static int igb_set_mac(struct net_device *, void *);
@@ -111,17 +107,14 @@ static void igb_set_uta(struct igb_adapter *adapter);
111static irqreturn_t igb_intr(int irq, void *); 107static irqreturn_t igb_intr(int irq, void *);
112static irqreturn_t igb_intr_msi(int irq, void *); 108static irqreturn_t igb_intr_msi(int irq, void *);
113static irqreturn_t igb_msix_other(int irq, void *); 109static irqreturn_t igb_msix_other(int irq, void *);
114static irqreturn_t igb_msix_rx(int irq, void *); 110static irqreturn_t igb_msix_ring(int irq, void *);
115static irqreturn_t igb_msix_tx(int irq, void *);
116#ifdef CONFIG_IGB_DCA 111#ifdef CONFIG_IGB_DCA
117static void igb_update_rx_dca(struct igb_ring *); 112static void igb_update_dca(struct igb_q_vector *);
118static void igb_update_tx_dca(struct igb_ring *);
119static void igb_setup_dca(struct igb_adapter *); 113static void igb_setup_dca(struct igb_adapter *);
120#endif /* CONFIG_IGB_DCA */ 114#endif /* CONFIG_IGB_DCA */
121static bool igb_clean_tx_irq(struct igb_ring *); 115static bool igb_clean_tx_irq(struct igb_q_vector *);
122static int igb_poll(struct napi_struct *, int); 116static int igb_poll(struct napi_struct *, int);
123static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 117static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
124static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
125static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 118static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
126static void igb_tx_timeout(struct net_device *); 119static void igb_tx_timeout(struct net_device *);
127static void igb_reset_task(struct work_struct *); 120static void igb_reset_task(struct work_struct *);
@@ -132,43 +125,10 @@ static void igb_restore_vlan(struct igb_adapter *);
132static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); 125static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
133static void igb_ping_all_vfs(struct igb_adapter *); 126static void igb_ping_all_vfs(struct igb_adapter *);
134static void igb_msg_task(struct igb_adapter *); 127static void igb_msg_task(struct igb_adapter *);
135static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
136static void igb_vmm_control(struct igb_adapter *); 128static void igb_vmm_control(struct igb_adapter *);
137static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); 129static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
138static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 130static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
139 131
140static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
141{
142 u32 reg_data;
143
144 reg_data = rd32(E1000_VMOLR(vfn));
145 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
146 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
147 E1000_VMOLR_AUPE | /* Accept untagged packets */
148 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
149 wr32(E1000_VMOLR(vfn), reg_data);
150}
151
152static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
153 int vfn)
154{
155 struct e1000_hw *hw = &adapter->hw;
156 u32 vmolr;
157
158 /* if it isn't the PF check to see if VFs are enabled and
159 * increase the size to support vlan tags */
160 if (vfn < adapter->vfs_allocated_count &&
161 adapter->vf_data[vfn].vlans_enabled)
162 size += VLAN_TAG_SIZE;
163
164 vmolr = rd32(E1000_VMOLR(vfn));
165 vmolr &= ~E1000_VMOLR_RLPML_MASK;
166 vmolr |= size | E1000_VMOLR_LPE;
167 wr32(E1000_VMOLR(vfn), vmolr);
168
169 return 0;
170}
171
172#ifdef CONFIG_PM 132#ifdef CONFIG_PM
173static int igb_suspend(struct pci_dev *, pm_message_t); 133static int igb_suspend(struct pci_dev *, pm_message_t);
174static int igb_resume(struct pci_dev *); 134static int igb_resume(struct pci_dev *);
@@ -219,46 +179,12 @@ static struct pci_driver igb_driver = {
219 .err_handler = &igb_err_handler 179 .err_handler = &igb_err_handler
220}; 180};
221 181
222static int global_quad_port_a; /* global quad port a indication */
223
224MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 182MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
225MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); 183MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
226MODULE_LICENSE("GPL"); 184MODULE_LICENSE("GPL");
227MODULE_VERSION(DRV_VERSION); 185MODULE_VERSION(DRV_VERSION);
228 186
229/** 187/**
230 * Scale the NIC clock cycle by a large factor so that
231 * relatively small clock corrections can be added or
232 * substracted at each clock tick. The drawbacks of a
233 * large factor are a) that the clock register overflows
234 * more quickly (not such a big deal) and b) that the
235 * increment per tick has to fit into 24 bits.
236 *
237 * Note that
238 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
239 * IGB_TSYNC_SCALE
240 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
241 *
242 * The base scale factor is intentionally a power of two
243 * so that the division in %struct timecounter can be done with
244 * a shift.
245 */
246#define IGB_TSYNC_SHIFT (19)
247#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
248
249/**
250 * The duration of one clock cycle of the NIC.
251 *
252 * @todo This hard-coded value is part of the specification and might change
253 * in future hardware revisions. Add revision check.
254 */
255#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
256
257#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
258# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
259#endif
260
261/**
262 * igb_read_clock - read raw cycle counter (to be used by time counter) 188 * igb_read_clock - read raw cycle counter (to be used by time counter)
263 */ 189 */
264static cycle_t igb_read_clock(const struct cyclecounter *tc) 190static cycle_t igb_read_clock(const struct cyclecounter *tc)
@@ -266,11 +192,11 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
266 struct igb_adapter *adapter = 192 struct igb_adapter *adapter =
267 container_of(tc, struct igb_adapter, cycles); 193 container_of(tc, struct igb_adapter, cycles);
268 struct e1000_hw *hw = &adapter->hw; 194 struct e1000_hw *hw = &adapter->hw;
269 u64 stamp; 195 u64 stamp = 0;
270 196 int shift = 0;
271 stamp = rd32(E1000_SYSTIML);
272 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
273 197
198 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
199 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
274 return stamp; 200 return stamp;
275} 201}
276 202
@@ -311,17 +237,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter,
311#endif 237#endif
312 238
313/** 239/**
314 * igb_desc_unused - calculate if we have unused descriptors
315 **/
316static int igb_desc_unused(struct igb_ring *ring)
317{
318 if (ring->next_to_clean > ring->next_to_use)
319 return ring->next_to_clean - ring->next_to_use - 1;
320
321 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
322}
323
324/**
325 * igb_init_module - Driver Registration Routine 240 * igb_init_module - Driver Registration Routine
326 * 241 *
327 * igb_init_module is the first routine called when the driver is 242 * igb_init_module is the first routine called when the driver is
@@ -335,12 +250,9 @@ static int __init igb_init_module(void)
335 250
336 printk(KERN_INFO "%s\n", igb_copyright); 251 printk(KERN_INFO "%s\n", igb_copyright);
337 252
338 global_quad_port_a = 0;
339
340#ifdef CONFIG_IGB_DCA 253#ifdef CONFIG_IGB_DCA
341 dca_register_notify(&dca_notifier); 254 dca_register_notify(&dca_notifier);
342#endif 255#endif
343
344 ret = pci_register_driver(&igb_driver); 256 ret = pci_register_driver(&igb_driver);
345 return ret; 257 return ret;
346} 258}
@@ -373,8 +285,8 @@ module_exit(igb_exit_module);
373 **/ 285 **/
374static void igb_cache_ring_register(struct igb_adapter *adapter) 286static void igb_cache_ring_register(struct igb_adapter *adapter)
375{ 287{
376 int i; 288 int i = 0, j = 0;
377 unsigned int rbase_offset = adapter->vfs_allocated_count; 289 u32 rbase_offset = adapter->vfs_allocated_count;
378 290
379 switch (adapter->hw.mac.type) { 291 switch (adapter->hw.mac.type) {
380 case e1000_82576: 292 case e1000_82576:
@@ -383,23 +295,36 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
383 * In order to avoid collision we start at the first free queue 295 * In order to avoid collision we start at the first free queue
384 * and continue consuming queues in the same sequence 296 * and continue consuming queues in the same sequence
385 */ 297 */
386 for (i = 0; i < adapter->num_rx_queues; i++) 298 if (adapter->vfs_allocated_count) {
387 adapter->rx_ring[i].reg_idx = rbase_offset + 299 for (; i < adapter->num_rx_queues; i++)
388 Q_IDX_82576(i); 300 adapter->rx_ring[i].reg_idx = rbase_offset +
389 for (i = 0; i < adapter->num_tx_queues; i++) 301 Q_IDX_82576(i);
390 adapter->tx_ring[i].reg_idx = rbase_offset + 302 for (; j < adapter->num_tx_queues; j++)
391 Q_IDX_82576(i); 303 adapter->tx_ring[j].reg_idx = rbase_offset +
392 break; 304 Q_IDX_82576(j);
305 }
393 case e1000_82575: 306 case e1000_82575:
394 default: 307 default:
395 for (i = 0; i < adapter->num_rx_queues; i++) 308 for (; i < adapter->num_rx_queues; i++)
396 adapter->rx_ring[i].reg_idx = i; 309 adapter->rx_ring[i].reg_idx = rbase_offset + i;
397 for (i = 0; i < adapter->num_tx_queues; i++) 310 for (; j < adapter->num_tx_queues; j++)
398 adapter->tx_ring[i].reg_idx = i; 311 adapter->tx_ring[j].reg_idx = rbase_offset + j;
399 break; 312 break;
400 } 313 }
401} 314}
402 315
316static void igb_free_queues(struct igb_adapter *adapter)
317{
318 kfree(adapter->tx_ring);
319 kfree(adapter->rx_ring);
320
321 adapter->tx_ring = NULL;
322 adapter->rx_ring = NULL;
323
324 adapter->num_rx_queues = 0;
325 adapter->num_tx_queues = 0;
326}
327
403/** 328/**
404 * igb_alloc_queues - Allocate memory for all rings 329 * igb_alloc_queues - Allocate memory for all rings
405 * @adapter: board private structure to initialize 330 * @adapter: board private structure to initialize
@@ -414,59 +339,61 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
414 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 339 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
415 sizeof(struct igb_ring), GFP_KERNEL); 340 sizeof(struct igb_ring), GFP_KERNEL);
416 if (!adapter->tx_ring) 341 if (!adapter->tx_ring)
417 return -ENOMEM; 342 goto err;
418 343
419 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 344 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
420 sizeof(struct igb_ring), GFP_KERNEL); 345 sizeof(struct igb_ring), GFP_KERNEL);
421 if (!adapter->rx_ring) { 346 if (!adapter->rx_ring)
422 kfree(adapter->tx_ring); 347 goto err;
423 return -ENOMEM;
424 }
425
426 adapter->rx_ring->buddy = adapter->tx_ring;
427 348
428 for (i = 0; i < adapter->num_tx_queues; i++) { 349 for (i = 0; i < adapter->num_tx_queues; i++) {
429 struct igb_ring *ring = &(adapter->tx_ring[i]); 350 struct igb_ring *ring = &(adapter->tx_ring[i]);
430 ring->count = adapter->tx_ring_count; 351 ring->count = adapter->tx_ring_count;
431 ring->adapter = adapter;
432 ring->queue_index = i; 352 ring->queue_index = i;
353 ring->pdev = adapter->pdev;
354 ring->netdev = adapter->netdev;
355 /* For 82575, context index must be unique per ring. */
356 if (adapter->hw.mac.type == e1000_82575)
357 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
433 } 358 }
359
434 for (i = 0; i < adapter->num_rx_queues; i++) { 360 for (i = 0; i < adapter->num_rx_queues; i++) {
435 struct igb_ring *ring = &(adapter->rx_ring[i]); 361 struct igb_ring *ring = &(adapter->rx_ring[i]);
436 ring->count = adapter->rx_ring_count; 362 ring->count = adapter->rx_ring_count;
437 ring->adapter = adapter;
438 ring->queue_index = i; 363 ring->queue_index = i;
439 ring->itr_register = E1000_ITR; 364 ring->pdev = adapter->pdev;
440 365 ring->netdev = adapter->netdev;
441 /* set a default napi handler for each rx_ring */ 366 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
442 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); 367 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
368 /* set flag indicating ring supports SCTP checksum offload */
369 if (adapter->hw.mac.type >= e1000_82576)
370 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
443 } 371 }
444 372
445 igb_cache_ring_register(adapter); 373 igb_cache_ring_register(adapter);
446 return 0;
447}
448
449static void igb_free_queues(struct igb_adapter *adapter)
450{
451 int i;
452 374
453 for (i = 0; i < adapter->num_rx_queues; i++) 375 return 0;
454 netif_napi_del(&adapter->rx_ring[i].napi);
455 376
456 adapter->num_rx_queues = 0; 377err:
457 adapter->num_tx_queues = 0; 378 igb_free_queues(adapter);
458 379
459 kfree(adapter->tx_ring); 380 return -ENOMEM;
460 kfree(adapter->rx_ring);
461} 381}
462 382
463#define IGB_N0_QUEUE -1 383#define IGB_N0_QUEUE -1
464static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, 384static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
465 int tx_queue, int msix_vector)
466{ 385{
467 u32 msixbm = 0; 386 u32 msixbm = 0;
387 struct igb_adapter *adapter = q_vector->adapter;
468 struct e1000_hw *hw = &adapter->hw; 388 struct e1000_hw *hw = &adapter->hw;
469 u32 ivar, index; 389 u32 ivar, index;
390 int rx_queue = IGB_N0_QUEUE;
391 int tx_queue = IGB_N0_QUEUE;
392
393 if (q_vector->rx_ring)
394 rx_queue = q_vector->rx_ring->reg_idx;
395 if (q_vector->tx_ring)
396 tx_queue = q_vector->tx_ring->reg_idx;
470 397
471 switch (hw->mac.type) { 398 switch (hw->mac.type) {
472 case e1000_82575: 399 case e1000_82575:
@@ -474,16 +401,12 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
474 bitmask for the EICR/EIMS/EIMC registers. To assign one 401 bitmask for the EICR/EIMS/EIMC registers. To assign one
475 or more queues to a vector, we write the appropriate bits 402 or more queues to a vector, we write the appropriate bits
476 into the MSIXBM register for that vector. */ 403 into the MSIXBM register for that vector. */
477 if (rx_queue > IGB_N0_QUEUE) { 404 if (rx_queue > IGB_N0_QUEUE)
478 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 405 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
479 adapter->rx_ring[rx_queue].eims_value = msixbm; 406 if (tx_queue > IGB_N0_QUEUE)
480 }
481 if (tx_queue > IGB_N0_QUEUE) {
482 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 407 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
483 adapter->tx_ring[tx_queue].eims_value =
484 E1000_EICR_TX_QUEUE0 << tx_queue;
485 }
486 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 408 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
409 q_vector->eims_value = msixbm;
487 break; 410 break;
488 case e1000_82576: 411 case e1000_82576:
489 /* 82576 uses a table-based method for assigning vectors. 412 /* 82576 uses a table-based method for assigning vectors.
@@ -491,35 +414,34 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
491 a vector number along with a "valid" bit. Sadly, the layout 414 a vector number along with a "valid" bit. Sadly, the layout
492 of the table is somewhat counterintuitive. */ 415 of the table is somewhat counterintuitive. */
493 if (rx_queue > IGB_N0_QUEUE) { 416 if (rx_queue > IGB_N0_QUEUE) {
494 index = (rx_queue >> 1) + adapter->vfs_allocated_count; 417 index = (rx_queue & 0x7);
495 ivar = array_rd32(E1000_IVAR0, index); 418 ivar = array_rd32(E1000_IVAR0, index);
496 if (rx_queue & 0x1) { 419 if (rx_queue < 8) {
497 /* vector goes into third byte of register */
498 ivar = ivar & 0xFF00FFFF;
499 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
500 } else {
501 /* vector goes into low byte of register */ 420 /* vector goes into low byte of register */
502 ivar = ivar & 0xFFFFFF00; 421 ivar = ivar & 0xFFFFFF00;
503 ivar |= msix_vector | E1000_IVAR_VALID; 422 ivar |= msix_vector | E1000_IVAR_VALID;
423 } else {
424 /* vector goes into third byte of register */
425 ivar = ivar & 0xFF00FFFF;
426 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
504 } 427 }
505 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
506 array_wr32(E1000_IVAR0, index, ivar); 428 array_wr32(E1000_IVAR0, index, ivar);
507 } 429 }
508 if (tx_queue > IGB_N0_QUEUE) { 430 if (tx_queue > IGB_N0_QUEUE) {
509 index = (tx_queue >> 1) + adapter->vfs_allocated_count; 431 index = (tx_queue & 0x7);
510 ivar = array_rd32(E1000_IVAR0, index); 432 ivar = array_rd32(E1000_IVAR0, index);
511 if (tx_queue & 0x1) { 433 if (tx_queue < 8) {
512 /* vector goes into high byte of register */
513 ivar = ivar & 0x00FFFFFF;
514 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
515 } else {
516 /* vector goes into second byte of register */ 434 /* vector goes into second byte of register */
517 ivar = ivar & 0xFFFF00FF; 435 ivar = ivar & 0xFFFF00FF;
518 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 436 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
437 } else {
438 /* vector goes into high byte of register */
439 ivar = ivar & 0x00FFFFFF;
440 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
519 } 441 }
520 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
521 array_wr32(E1000_IVAR0, index, ivar); 442 array_wr32(E1000_IVAR0, index, ivar);
522 } 443 }
444 q_vector->eims_value = 1 << msix_vector;
523 break; 445 break;
524 default: 446 default:
525 BUG(); 447 BUG();
@@ -540,43 +462,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
540 struct e1000_hw *hw = &adapter->hw; 462 struct e1000_hw *hw = &adapter->hw;
541 463
542 adapter->eims_enable_mask = 0; 464 adapter->eims_enable_mask = 0;
543 if (hw->mac.type == e1000_82576)
544 /* Turn on MSI-X capability first, or our settings
545 * won't stick. And it will take days to debug. */
546 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
547 E1000_GPIE_PBA | E1000_GPIE_EIAME |
548 E1000_GPIE_NSICR);
549
550 for (i = 0; i < adapter->num_tx_queues; i++) {
551 struct igb_ring *tx_ring = &adapter->tx_ring[i];
552 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
553 adapter->eims_enable_mask |= tx_ring->eims_value;
554 if (tx_ring->itr_val)
555 writel(tx_ring->itr_val,
556 hw->hw_addr + tx_ring->itr_register);
557 else
558 writel(1, hw->hw_addr + tx_ring->itr_register);
559 }
560
561 for (i = 0; i < adapter->num_rx_queues; i++) {
562 struct igb_ring *rx_ring = &adapter->rx_ring[i];
563 rx_ring->buddy = NULL;
564 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
565 adapter->eims_enable_mask |= rx_ring->eims_value;
566 if (rx_ring->itr_val)
567 writel(rx_ring->itr_val,
568 hw->hw_addr + rx_ring->itr_register);
569 else
570 writel(1, hw->hw_addr + rx_ring->itr_register);
571 }
572
573 465
574 /* set vector for other causes, i.e. link changes */ 466 /* set vector for other causes, i.e. link changes */
575 switch (hw->mac.type) { 467 switch (hw->mac.type) {
576 case e1000_82575: 468 case e1000_82575:
577 array_wr32(E1000_MSIXBM(0), vector++,
578 E1000_EIMS_OTHER);
579
580 tmp = rd32(E1000_CTRL_EXT); 469 tmp = rd32(E1000_CTRL_EXT);
581 /* enable MSI-X PBA support*/ 470 /* enable MSI-X PBA support*/
582 tmp |= E1000_CTRL_EXT_PBA_CLR; 471 tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -586,22 +475,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
586 tmp |= E1000_CTRL_EXT_IRCA; 475 tmp |= E1000_CTRL_EXT_IRCA;
587 476
588 wr32(E1000_CTRL_EXT, tmp); 477 wr32(E1000_CTRL_EXT, tmp);
589 adapter->eims_enable_mask |= E1000_EIMS_OTHER; 478
479 /* enable msix_other interrupt */
480 array_wr32(E1000_MSIXBM(0), vector++,
481 E1000_EIMS_OTHER);
590 adapter->eims_other = E1000_EIMS_OTHER; 482 adapter->eims_other = E1000_EIMS_OTHER;
591 483
592 break; 484 break;
593 485
594 case e1000_82576: 486 case e1000_82576:
487 /* Turn on MSI-X capability first, or our settings
488 * won't stick. And it will take days to debug. */
489 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
490 E1000_GPIE_PBA | E1000_GPIE_EIAME |
491 E1000_GPIE_NSICR);
492
493 /* enable msix_other interrupt */
494 adapter->eims_other = 1 << vector;
595 tmp = (vector++ | E1000_IVAR_VALID) << 8; 495 tmp = (vector++ | E1000_IVAR_VALID) << 8;
596 wr32(E1000_IVAR_MISC, tmp);
597 496
598 adapter->eims_enable_mask = (1 << (vector)) - 1; 497 wr32(E1000_IVAR_MISC, tmp);
599 adapter->eims_other = 1 << (vector - 1);
600 break; 498 break;
601 default: 499 default:
602 /* do nothing, since nothing else supports MSI-X */ 500 /* do nothing, since nothing else supports MSI-X */
603 break; 501 break;
604 } /* switch (hw->mac.type) */ 502 } /* switch (hw->mac.type) */
503
504 adapter->eims_enable_mask |= adapter->eims_other;
505
506 for (i = 0; i < adapter->num_q_vectors; i++) {
507 struct igb_q_vector *q_vector = adapter->q_vector[i];
508 igb_assign_vector(q_vector, vector++);
509 adapter->eims_enable_mask |= q_vector->eims_value;
510 }
511
605 wrfl(); 512 wrfl();
606} 513}
607 514
@@ -614,43 +521,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
614static int igb_request_msix(struct igb_adapter *adapter) 521static int igb_request_msix(struct igb_adapter *adapter)
615{ 522{
616 struct net_device *netdev = adapter->netdev; 523 struct net_device *netdev = adapter->netdev;
524 struct e1000_hw *hw = &adapter->hw;
617 int i, err = 0, vector = 0; 525 int i, err = 0, vector = 0;
618 526
619 vector = 0; 527 err = request_irq(adapter->msix_entries[vector].vector,
620 528 &igb_msix_other, 0, netdev->name, adapter);
621 for (i = 0; i < adapter->num_tx_queues; i++) { 529 if (err)
622 struct igb_ring *ring = &(adapter->tx_ring[i]); 530 goto out;
623 sprintf(ring->name, "%s-tx-%d", netdev->name, i); 531 vector++;
624 err = request_irq(adapter->msix_entries[vector].vector, 532
625 &igb_msix_tx, 0, ring->name, 533 for (i = 0; i < adapter->num_q_vectors; i++) {
626 &(adapter->tx_ring[i])); 534 struct igb_q_vector *q_vector = adapter->q_vector[i];
627 if (err) 535
628 goto out; 536 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
629 ring->itr_register = E1000_EITR(0) + (vector << 2); 537
630 ring->itr_val = 976; /* ~4000 ints/sec */ 538 if (q_vector->rx_ring && q_vector->tx_ring)
631 vector++; 539 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
632 } 540 q_vector->rx_ring->queue_index);
633 for (i = 0; i < adapter->num_rx_queues; i++) { 541 else if (q_vector->tx_ring)
634 struct igb_ring *ring = &(adapter->rx_ring[i]); 542 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
635 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 543 q_vector->tx_ring->queue_index);
636 sprintf(ring->name, "%s-rx-%d", netdev->name, i); 544 else if (q_vector->rx_ring)
545 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
546 q_vector->rx_ring->queue_index);
637 else 547 else
638 memcpy(ring->name, netdev->name, IFNAMSIZ); 548 sprintf(q_vector->name, "%s-unused", netdev->name);
549
639 err = request_irq(adapter->msix_entries[vector].vector, 550 err = request_irq(adapter->msix_entries[vector].vector,
640 &igb_msix_rx, 0, ring->name, 551 &igb_msix_ring, 0, q_vector->name,
641 &(adapter->rx_ring[i])); 552 q_vector);
642 if (err) 553 if (err)
643 goto out; 554 goto out;
644 ring->itr_register = E1000_EITR(0) + (vector << 2);
645 ring->itr_val = adapter->itr;
646 vector++; 555 vector++;
647 } 556 }
648 557
649 err = request_irq(adapter->msix_entries[vector].vector,
650 &igb_msix_other, 0, netdev->name, netdev);
651 if (err)
652 goto out;
653
654 igb_configure_msix(adapter); 558 igb_configure_msix(adapter);
655 return 0; 559 return 0;
656out: 560out:
@@ -663,11 +567,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
663 pci_disable_msix(adapter->pdev); 567 pci_disable_msix(adapter->pdev);
664 kfree(adapter->msix_entries); 568 kfree(adapter->msix_entries);
665 adapter->msix_entries = NULL; 569 adapter->msix_entries = NULL;
666 } else if (adapter->flags & IGB_FLAG_HAS_MSI) 570 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
667 pci_disable_msi(adapter->pdev); 571 pci_disable_msi(adapter->pdev);
668 return; 572 }
669} 573}
670 574
575/**
576 * igb_free_q_vectors - Free memory allocated for interrupt vectors
577 * @adapter: board private structure to initialize
578 *
579 * This function frees the memory allocated to the q_vectors. In addition if
580 * NAPI is enabled it will delete any references to the NAPI struct prior
581 * to freeing the q_vector.
582 **/
583static void igb_free_q_vectors(struct igb_adapter *adapter)
584{
585 int v_idx;
586
587 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
588 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
589 adapter->q_vector[v_idx] = NULL;
590 netif_napi_del(&q_vector->napi);
591 kfree(q_vector);
592 }
593 adapter->num_q_vectors = 0;
594}
595
596/**
597 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
598 *
599 * This function resets the device so that it has 0 rx queues, tx queues, and
600 * MSI-X interrupts allocated.
601 */
602static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
603{
604 igb_free_queues(adapter);
605 igb_free_q_vectors(adapter);
606 igb_reset_interrupt_capability(adapter);
607}
671 608
672/** 609/**
673 * igb_set_interrupt_capability - set MSI or MSI-X if supported 610 * igb_set_interrupt_capability - set MSI or MSI-X if supported
@@ -681,11 +618,20 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
681 int numvecs, i; 618 int numvecs, i;
682 619
683 /* Number of supported queues. */ 620 /* Number of supported queues. */
684 /* Having more queues than CPUs doesn't make sense. */
685 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 621 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
686 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); 622 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
687 623
688 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; 624 /* start with one vector for every rx queue */
625 numvecs = adapter->num_rx_queues;
626
627 /* if tx handler is seperate add 1 for every tx queue */
628 numvecs += adapter->num_tx_queues;
629
630 /* store the number of vectors reserved for queues */
631 adapter->num_q_vectors = numvecs;
632
633 /* add 1 vector for link status interrupts */
634 numvecs++;
689 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 635 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
690 GFP_KERNEL); 636 GFP_KERNEL);
691 if (!adapter->msix_entries) 637 if (!adapter->msix_entries)
@@ -719,8 +665,11 @@ msi_only:
719 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); 665 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
720 } 666 }
721#endif 667#endif
668 adapter->vfs_allocated_count = 0;
669 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
722 adapter->num_rx_queues = 1; 670 adapter->num_rx_queues = 1;
723 adapter->num_tx_queues = 1; 671 adapter->num_tx_queues = 1;
672 adapter->num_q_vectors = 1;
724 if (!pci_enable_msi(adapter->pdev)) 673 if (!pci_enable_msi(adapter->pdev))
725 adapter->flags |= IGB_FLAG_HAS_MSI; 674 adapter->flags |= IGB_FLAG_HAS_MSI;
726out: 675out:
@@ -730,6 +679,143 @@ out:
730} 679}
731 680
732/** 681/**
682 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
683 * @adapter: board private structure to initialize
684 *
685 * We allocate one q_vector per queue interrupt. If allocation fails we
686 * return -ENOMEM.
687 **/
688static int igb_alloc_q_vectors(struct igb_adapter *adapter)
689{
690 struct igb_q_vector *q_vector;
691 struct e1000_hw *hw = &adapter->hw;
692 int v_idx;
693
694 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
695 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
696 if (!q_vector)
697 goto err_out;
698 q_vector->adapter = adapter;
699 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
700 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
701 q_vector->itr_val = IGB_START_ITR;
702 q_vector->set_itr = 1;
703 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
704 adapter->q_vector[v_idx] = q_vector;
705 }
706 return 0;
707
708err_out:
709 while (v_idx) {
710 v_idx--;
711 q_vector = adapter->q_vector[v_idx];
712 netif_napi_del(&q_vector->napi);
713 kfree(q_vector);
714 adapter->q_vector[v_idx] = NULL;
715 }
716 return -ENOMEM;
717}
718
719static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
720 int ring_idx, int v_idx)
721{
722 struct igb_q_vector *q_vector;
723
724 q_vector = adapter->q_vector[v_idx];
725 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
726 q_vector->rx_ring->q_vector = q_vector;
727 q_vector->itr_val = adapter->rx_itr_setting;
728 if (q_vector->itr_val && q_vector->itr_val <= 3)
729 q_vector->itr_val = IGB_START_ITR;
730}
731
732static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
733 int ring_idx, int v_idx)
734{
735 struct igb_q_vector *q_vector;
736
737 q_vector = adapter->q_vector[v_idx];
738 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
739 q_vector->tx_ring->q_vector = q_vector;
740 q_vector->itr_val = adapter->tx_itr_setting;
741 if (q_vector->itr_val && q_vector->itr_val <= 3)
742 q_vector->itr_val = IGB_START_ITR;
743}
744
745/**
746 * igb_map_ring_to_vector - maps allocated queues to vectors
747 *
748 * This function maps the recently allocated queues to vectors.
749 **/
750static int igb_map_ring_to_vector(struct igb_adapter *adapter)
751{
752 int i;
753 int v_idx = 0;
754
755 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
756 (adapter->num_q_vectors < adapter->num_tx_queues))
757 return -ENOMEM;
758
759 if (adapter->num_q_vectors >=
760 (adapter->num_rx_queues + adapter->num_tx_queues)) {
761 for (i = 0; i < adapter->num_rx_queues; i++)
762 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
763 for (i = 0; i < adapter->num_tx_queues; i++)
764 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
765 } else {
766 for (i = 0; i < adapter->num_rx_queues; i++) {
767 if (i < adapter->num_tx_queues)
768 igb_map_tx_ring_to_vector(adapter, i, v_idx);
769 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
770 }
771 for (; i < adapter->num_tx_queues; i++)
772 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
773 }
774 return 0;
775}
776
777/**
778 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
779 *
780 * This function initializes the interrupts and allocates all of the queues.
781 **/
782static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
783{
784 struct pci_dev *pdev = adapter->pdev;
785 int err;
786
787 igb_set_interrupt_capability(adapter);
788
789 err = igb_alloc_q_vectors(adapter);
790 if (err) {
791 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
792 goto err_alloc_q_vectors;
793 }
794
795 err = igb_alloc_queues(adapter);
796 if (err) {
797 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
798 goto err_alloc_queues;
799 }
800
801 err = igb_map_ring_to_vector(adapter);
802 if (err) {
803 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
804 goto err_map_queues;
805 }
806
807
808 return 0;
809err_map_queues:
810 igb_free_queues(adapter);
811err_alloc_queues:
812 igb_free_q_vectors(adapter);
813err_alloc_q_vectors:
814 igb_reset_interrupt_capability(adapter);
815 return err;
816}
817
818/**
733 * igb_request_irq - initialize interrupts 819 * igb_request_irq - initialize interrupts
734 * 820 *
735 * Attempts to configure interrupts using the best available 821 * Attempts to configure interrupts using the best available
@@ -738,6 +824,7 @@ out:
738static int igb_request_irq(struct igb_adapter *adapter) 824static int igb_request_irq(struct igb_adapter *adapter)
739{ 825{
740 struct net_device *netdev = adapter->netdev; 826 struct net_device *netdev = adapter->netdev;
827 struct pci_dev *pdev = adapter->pdev;
741 struct e1000_hw *hw = &adapter->hw; 828 struct e1000_hw *hw = &adapter->hw;
742 int err = 0; 829 int err = 0;
743 830
@@ -746,18 +833,36 @@ static int igb_request_irq(struct igb_adapter *adapter)
746 if (!err) 833 if (!err)
747 goto request_done; 834 goto request_done;
748 /* fall back to MSI */ 835 /* fall back to MSI */
749 igb_reset_interrupt_capability(adapter); 836 igb_clear_interrupt_scheme(adapter);
750 if (!pci_enable_msi(adapter->pdev)) 837 if (!pci_enable_msi(adapter->pdev))
751 adapter->flags |= IGB_FLAG_HAS_MSI; 838 adapter->flags |= IGB_FLAG_HAS_MSI;
752 igb_free_all_tx_resources(adapter); 839 igb_free_all_tx_resources(adapter);
753 igb_free_all_rx_resources(adapter); 840 igb_free_all_rx_resources(adapter);
841 adapter->num_tx_queues = 1;
754 adapter->num_rx_queues = 1; 842 adapter->num_rx_queues = 1;
755 igb_alloc_queues(adapter); 843 adapter->num_q_vectors = 1;
844 err = igb_alloc_q_vectors(adapter);
845 if (err) {
846 dev_err(&pdev->dev,
847 "Unable to allocate memory for vectors\n");
848 goto request_done;
849 }
850 err = igb_alloc_queues(adapter);
851 if (err) {
852 dev_err(&pdev->dev,
853 "Unable to allocate memory for queues\n");
854 igb_free_q_vectors(adapter);
855 goto request_done;
856 }
857 igb_setup_all_tx_resources(adapter);
858 igb_setup_all_rx_resources(adapter);
756 } else { 859 } else {
757 switch (hw->mac.type) { 860 switch (hw->mac.type) {
758 case e1000_82575: 861 case e1000_82575:
759 wr32(E1000_MSIXBM(0), 862 wr32(E1000_MSIXBM(0),
760 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER)); 863 (E1000_EICR_RX_QUEUE0 |
864 E1000_EICR_TX_QUEUE0 |
865 E1000_EIMS_OTHER));
761 break; 866 break;
762 case e1000_82576: 867 case e1000_82576:
763 wr32(E1000_IVAR0, E1000_IVAR_VALID); 868 wr32(E1000_IVAR0, E1000_IVAR_VALID);
@@ -769,16 +874,17 @@ static int igb_request_irq(struct igb_adapter *adapter)
769 874
770 if (adapter->flags & IGB_FLAG_HAS_MSI) { 875 if (adapter->flags & IGB_FLAG_HAS_MSI) {
771 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, 876 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
772 netdev->name, netdev); 877 netdev->name, adapter);
773 if (!err) 878 if (!err)
774 goto request_done; 879 goto request_done;
880
775 /* fall back to legacy interrupts */ 881 /* fall back to legacy interrupts */
776 igb_reset_interrupt_capability(adapter); 882 igb_reset_interrupt_capability(adapter);
777 adapter->flags &= ~IGB_FLAG_HAS_MSI; 883 adapter->flags &= ~IGB_FLAG_HAS_MSI;
778 } 884 }
779 885
780 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, 886 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
781 netdev->name, netdev); 887 netdev->name, adapter);
782 888
783 if (err) 889 if (err)
784 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", 890 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
@@ -790,23 +896,19 @@ request_done:
790 896
791static void igb_free_irq(struct igb_adapter *adapter) 897static void igb_free_irq(struct igb_adapter *adapter)
792{ 898{
793 struct net_device *netdev = adapter->netdev;
794
795 if (adapter->msix_entries) { 899 if (adapter->msix_entries) {
796 int vector = 0, i; 900 int vector = 0, i;
797 901
798 for (i = 0; i < adapter->num_tx_queues; i++) 902 free_irq(adapter->msix_entries[vector++].vector, adapter);
799 free_irq(adapter->msix_entries[vector++].vector,
800 &(adapter->tx_ring[i]));
801 for (i = 0; i < adapter->num_rx_queues; i++)
802 free_irq(adapter->msix_entries[vector++].vector,
803 &(adapter->rx_ring[i]));
804 903
805 free_irq(adapter->msix_entries[vector++].vector, netdev); 904 for (i = 0; i < adapter->num_q_vectors; i++) {
806 return; 905 struct igb_q_vector *q_vector = adapter->q_vector[i];
906 free_irq(adapter->msix_entries[vector++].vector,
907 q_vector);
908 }
909 } else {
910 free_irq(adapter->pdev->irq, adapter);
807 } 911 }
808
809 free_irq(adapter->pdev->irq, netdev);
810} 912}
811 913
812/** 914/**
@@ -817,6 +919,11 @@ static void igb_irq_disable(struct igb_adapter *adapter)
817{ 919{
818 struct e1000_hw *hw = &adapter->hw; 920 struct e1000_hw *hw = &adapter->hw;
819 921
922 /*
923 * we need to be careful when disabling interrupts. The VFs are also
924 * mapped into these registers and so clearing the bits can cause
925 * issues on the VF drivers so we only need to clear what we set
926 */
820 if (adapter->msix_entries) { 927 if (adapter->msix_entries) {
821 u32 regval = rd32(E1000_EIAM); 928 u32 regval = rd32(E1000_EIAM);
822 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); 929 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
@@ -840,15 +947,17 @@ static void igb_irq_enable(struct igb_adapter *adapter)
840 struct e1000_hw *hw = &adapter->hw; 947 struct e1000_hw *hw = &adapter->hw;
841 948
842 if (adapter->msix_entries) { 949 if (adapter->msix_entries) {
950 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
843 u32 regval = rd32(E1000_EIAC); 951 u32 regval = rd32(E1000_EIAC);
844 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); 952 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
845 regval = rd32(E1000_EIAM); 953 regval = rd32(E1000_EIAM);
846 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); 954 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
847 wr32(E1000_EIMS, adapter->eims_enable_mask); 955 wr32(E1000_EIMS, adapter->eims_enable_mask);
848 if (adapter->vfs_allocated_count) 956 if (adapter->vfs_allocated_count) {
849 wr32(E1000_MBVFIMR, 0xFF); 957 wr32(E1000_MBVFIMR, 0xFF);
850 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB | 958 ims |= E1000_IMS_VMMB;
851 E1000_IMS_DOUTSYNC)); 959 }
960 wr32(E1000_IMS, ims);
852 } else { 961 } else {
853 wr32(E1000_IMS, IMS_ENABLE_MASK); 962 wr32(E1000_IMS, IMS_ENABLE_MASK);
854 wr32(E1000_IAM, IMS_ENABLE_MASK); 963 wr32(E1000_IAM, IMS_ENABLE_MASK);
@@ -857,24 +966,23 @@ static void igb_irq_enable(struct igb_adapter *adapter)
857 966
858static void igb_update_mng_vlan(struct igb_adapter *adapter) 967static void igb_update_mng_vlan(struct igb_adapter *adapter)
859{ 968{
860 struct net_device *netdev = adapter->netdev; 969 struct e1000_hw *hw = &adapter->hw;
861 u16 vid = adapter->hw.mng_cookie.vlan_id; 970 u16 vid = adapter->hw.mng_cookie.vlan_id;
862 u16 old_vid = adapter->mng_vlan_id; 971 u16 old_vid = adapter->mng_vlan_id;
863 if (adapter->vlgrp) {
864 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
865 if (adapter->hw.mng_cookie.status &
866 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
867 igb_vlan_rx_add_vid(netdev, vid);
868 adapter->mng_vlan_id = vid;
869 } else
870 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
871 972
872 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && 973 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
873 (vid != old_vid) && 974 /* add VID to filter table */
874 !vlan_group_get_device(adapter->vlgrp, old_vid)) 975 igb_vfta_set(hw, vid, true);
875 igb_vlan_rx_kill_vid(netdev, old_vid); 976 adapter->mng_vlan_id = vid;
876 } else 977 } else {
877 adapter->mng_vlan_id = vid; 978 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
979 }
980
981 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
982 (vid != old_vid) &&
983 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
984 /* remove VID from filter table */
985 igb_vfta_set(hw, old_vid, false);
878 } 986 }
879} 987}
880 988
@@ -898,7 +1006,6 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
898 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1006 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
899} 1007}
900 1008
901
902/** 1009/**
903 * igb_get_hw_control - get control of the h/w from f/w 1010 * igb_get_hw_control - get control of the h/w from f/w
904 * @adapter: address of board private structure 1011 * @adapter: address of board private structure
@@ -933,8 +1040,11 @@ static void igb_configure(struct igb_adapter *adapter)
933 1040
934 igb_restore_vlan(adapter); 1041 igb_restore_vlan(adapter);
935 1042
936 igb_configure_tx(adapter); 1043 igb_setup_tctl(adapter);
1044 igb_setup_mrqc(adapter);
937 igb_setup_rctl(adapter); 1045 igb_setup_rctl(adapter);
1046
1047 igb_configure_tx(adapter);
938 igb_configure_rx(adapter); 1048 igb_configure_rx(adapter);
939 1049
940 igb_rx_fifo_flush_82575(&adapter->hw); 1050 igb_rx_fifo_flush_82575(&adapter->hw);
@@ -956,7 +1066,6 @@ static void igb_configure(struct igb_adapter *adapter)
956 * igb_up - Open the interface and prepare it to handle traffic 1066 * igb_up - Open the interface and prepare it to handle traffic
957 * @adapter: board private structure 1067 * @adapter: board private structure
958 **/ 1068 **/
959
960int igb_up(struct igb_adapter *adapter) 1069int igb_up(struct igb_adapter *adapter)
961{ 1070{
962 struct e1000_hw *hw = &adapter->hw; 1071 struct e1000_hw *hw = &adapter->hw;
@@ -967,29 +1076,37 @@ int igb_up(struct igb_adapter *adapter)
967 1076
968 clear_bit(__IGB_DOWN, &adapter->state); 1077 clear_bit(__IGB_DOWN, &adapter->state);
969 1078
970 for (i = 0; i < adapter->num_rx_queues; i++) 1079 for (i = 0; i < adapter->num_q_vectors; i++) {
971 napi_enable(&adapter->rx_ring[i].napi); 1080 struct igb_q_vector *q_vector = adapter->q_vector[i];
1081 napi_enable(&q_vector->napi);
1082 }
972 if (adapter->msix_entries) 1083 if (adapter->msix_entries)
973 igb_configure_msix(adapter); 1084 igb_configure_msix(adapter);
974 1085
975 igb_vmm_control(adapter);
976 igb_set_vmolr(hw, adapter->vfs_allocated_count);
977
978 /* Clear any pending interrupts. */ 1086 /* Clear any pending interrupts. */
979 rd32(E1000_ICR); 1087 rd32(E1000_ICR);
980 igb_irq_enable(adapter); 1088 igb_irq_enable(adapter);
981 1089
1090 /* notify VFs that reset has been completed */
1091 if (adapter->vfs_allocated_count) {
1092 u32 reg_data = rd32(E1000_CTRL_EXT);
1093 reg_data |= E1000_CTRL_EXT_PFRSTD;
1094 wr32(E1000_CTRL_EXT, reg_data);
1095 }
1096
982 netif_tx_start_all_queues(adapter->netdev); 1097 netif_tx_start_all_queues(adapter->netdev);
983 1098
984 /* Fire a link change interrupt to start the watchdog. */ 1099 /* start the watchdog. */
985 wr32(E1000_ICS, E1000_ICS_LSC); 1100 hw->mac.get_link_status = 1;
1101 schedule_work(&adapter->watchdog_task);
1102
986 return 0; 1103 return 0;
987} 1104}
988 1105
989void igb_down(struct igb_adapter *adapter) 1106void igb_down(struct igb_adapter *adapter)
990{ 1107{
991 struct e1000_hw *hw = &adapter->hw;
992 struct net_device *netdev = adapter->netdev; 1108 struct net_device *netdev = adapter->netdev;
1109 struct e1000_hw *hw = &adapter->hw;
993 u32 tctl, rctl; 1110 u32 tctl, rctl;
994 int i; 1111 int i;
995 1112
@@ -1012,8 +1129,10 @@ void igb_down(struct igb_adapter *adapter)
1012 wrfl(); 1129 wrfl();
1013 msleep(10); 1130 msleep(10);
1014 1131
1015 for (i = 0; i < adapter->num_rx_queues; i++) 1132 for (i = 0; i < adapter->num_q_vectors; i++) {
1016 napi_disable(&adapter->rx_ring[i].napi); 1133 struct igb_q_vector *q_vector = adapter->q_vector[i];
1134 napi_disable(&q_vector->napi);
1135 }
1017 1136
1018 igb_irq_disable(adapter); 1137 igb_irq_disable(adapter);
1019 1138
@@ -1052,6 +1171,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
1052 1171
1053void igb_reset(struct igb_adapter *adapter) 1172void igb_reset(struct igb_adapter *adapter)
1054{ 1173{
1174 struct pci_dev *pdev = adapter->pdev;
1055 struct e1000_hw *hw = &adapter->hw; 1175 struct e1000_hw *hw = &adapter->hw;
1056 struct e1000_mac_info *mac = &hw->mac; 1176 struct e1000_mac_info *mac = &hw->mac;
1057 struct e1000_fc_info *fc = &hw->fc; 1177 struct e1000_fc_info *fc = &hw->fc;
@@ -1063,7 +1183,8 @@ void igb_reset(struct igb_adapter *adapter)
1063 */ 1183 */
1064 switch (mac->type) { 1184 switch (mac->type) {
1065 case e1000_82576: 1185 case e1000_82576:
1066 pba = E1000_PBA_64K; 1186 pba = rd32(E1000_RXPBS);
1187 pba &= E1000_RXPBS_SIZE_MASK_82576;
1067 break; 1188 break;
1068 case e1000_82575: 1189 case e1000_82575:
1069 default: 1190 default:
@@ -1138,10 +1259,10 @@ void igb_reset(struct igb_adapter *adapter)
1138 if (adapter->vfs_allocated_count) { 1259 if (adapter->vfs_allocated_count) {
1139 int i; 1260 int i;
1140 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1261 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1141 adapter->vf_data[i].clear_to_send = false; 1262 adapter->vf_data[i].flags = 0;
1142 1263
1143 /* ping all the active vfs to let them know we are going down */ 1264 /* ping all the active vfs to let them know we are going down */
1144 igb_ping_all_vfs(adapter); 1265 igb_ping_all_vfs(adapter);
1145 1266
1146 /* disable transmits and receives */ 1267 /* disable transmits and receives */
1147 wr32(E1000_VFRE, 0); 1268 wr32(E1000_VFRE, 0);
@@ -1149,23 +1270,23 @@ void igb_reset(struct igb_adapter *adapter)
1149 } 1270 }
1150 1271
1151 /* Allow time for pending master requests to run */ 1272 /* Allow time for pending master requests to run */
1152 adapter->hw.mac.ops.reset_hw(&adapter->hw); 1273 hw->mac.ops.reset_hw(hw);
1153 wr32(E1000_WUC, 0); 1274 wr32(E1000_WUC, 0);
1154 1275
1155 if (adapter->hw.mac.ops.init_hw(&adapter->hw)) 1276 if (hw->mac.ops.init_hw(hw))
1156 dev_err(&adapter->pdev->dev, "Hardware Error\n"); 1277 dev_err(&pdev->dev, "Hardware Error\n");
1157 1278
1158 igb_update_mng_vlan(adapter); 1279 igb_update_mng_vlan(adapter);
1159 1280
1160 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 1281 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1161 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 1282 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1162 1283
1163 igb_reset_adaptive(&adapter->hw); 1284 igb_reset_adaptive(hw);
1164 igb_get_phy_info(&adapter->hw); 1285 igb_get_phy_info(hw);
1165} 1286}
1166 1287
1167static const struct net_device_ops igb_netdev_ops = { 1288static const struct net_device_ops igb_netdev_ops = {
1168 .ndo_open = igb_open, 1289 .ndo_open = igb_open,
1169 .ndo_stop = igb_close, 1290 .ndo_stop = igb_close,
1170 .ndo_start_xmit = igb_xmit_frame_adv, 1291 .ndo_start_xmit = igb_xmit_frame_adv,
1171 .ndo_get_stats = igb_get_stats, 1292 .ndo_get_stats = igb_get_stats,
@@ -1201,10 +1322,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1201 struct net_device *netdev; 1322 struct net_device *netdev;
1202 struct igb_adapter *adapter; 1323 struct igb_adapter *adapter;
1203 struct e1000_hw *hw; 1324 struct e1000_hw *hw;
1325 u16 eeprom_data = 0;
1326 static int global_quad_port_a; /* global quad port a indication */
1204 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1327 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1205 unsigned long mmio_start, mmio_len; 1328 unsigned long mmio_start, mmio_len;
1206 int err, pci_using_dac; 1329 int err, pci_using_dac;
1207 u16 eeprom_data = 0;
1208 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1330 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1209 u32 part_num; 1331 u32 part_num;
1210 1332
@@ -1281,8 +1403,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1281 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1403 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1282 hw->subsystem_device_id = pdev->subsystem_device; 1404 hw->subsystem_device_id = pdev->subsystem_device;
1283 1405
1284 /* setup the private structure */
1285 hw->back = adapter;
1286 /* Copy the default MAC, PHY and NVM function pointers */ 1406 /* Copy the default MAC, PHY and NVM function pointers */
1287 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 1407 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1288 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 1408 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -1292,46 +1412,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1292 if (err) 1412 if (err)
1293 goto err_sw_init; 1413 goto err_sw_init;
1294 1414
1295#ifdef CONFIG_PCI_IOV
1296 /* since iov functionality isn't critical to base device function we
1297 * can accept failure. If it fails we don't allow iov to be enabled */
1298 if (hw->mac.type == e1000_82576) {
1299 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1300 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1301 int i;
1302 unsigned char mac_addr[ETH_ALEN];
1303
1304 if (num_vfs) {
1305 adapter->vf_data = kcalloc(num_vfs,
1306 sizeof(struct vf_data_storage),
1307 GFP_KERNEL);
1308 if (!adapter->vf_data) {
1309 dev_err(&pdev->dev,
1310 "Could not allocate VF private data - "
1311 "IOV enable failed\n");
1312 } else {
1313 err = pci_enable_sriov(pdev, num_vfs);
1314 if (!err) {
1315 adapter->vfs_allocated_count = num_vfs;
1316 dev_info(&pdev->dev,
1317 "%d vfs allocated\n",
1318 num_vfs);
1319 for (i = 0;
1320 i < adapter->vfs_allocated_count;
1321 i++) {
1322 random_ether_addr(mac_addr);
1323 igb_set_vf_mac(adapter, i,
1324 mac_addr);
1325 }
1326 } else {
1327 kfree(adapter->vf_data);
1328 adapter->vf_data = NULL;
1329 }
1330 }
1331 }
1332 }
1333
1334#endif
1335 /* setup the private structure */ 1415 /* setup the private structure */
1336 err = igb_sw_init(adapter); 1416 err = igb_sw_init(adapter);
1337 if (err) 1417 if (err)
@@ -1339,16 +1419,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1339 1419
1340 igb_get_bus_info_pcie(hw); 1420 igb_get_bus_info_pcie(hw);
1341 1421
1342 /* set flags */
1343 switch (hw->mac.type) {
1344 case e1000_82575:
1345 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1346 break;
1347 case e1000_82576:
1348 default:
1349 break;
1350 }
1351
1352 hw->phy.autoneg_wait_to_complete = false; 1422 hw->phy.autoneg_wait_to_complete = false;
1353 hw->mac.adaptive_ifs = true; 1423 hw->mac.adaptive_ifs = true;
1354 1424
@@ -1372,7 +1442,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1372 netdev->features |= NETIF_F_IPV6_CSUM; 1442 netdev->features |= NETIF_F_IPV6_CSUM;
1373 netdev->features |= NETIF_F_TSO; 1443 netdev->features |= NETIF_F_TSO;
1374 netdev->features |= NETIF_F_TSO6; 1444 netdev->features |= NETIF_F_TSO6;
1375
1376 netdev->features |= NETIF_F_GRO; 1445 netdev->features |= NETIF_F_GRO;
1377 1446
1378 netdev->vlan_features |= NETIF_F_TSO; 1447 netdev->vlan_features |= NETIF_F_TSO;
@@ -1384,10 +1453,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1384 if (pci_using_dac) 1453 if (pci_using_dac)
1385 netdev->features |= NETIF_F_HIGHDMA; 1454 netdev->features |= NETIF_F_HIGHDMA;
1386 1455
1387 if (adapter->hw.mac.type == e1000_82576) 1456 if (hw->mac.type >= e1000_82576)
1388 netdev->features |= NETIF_F_SCTP_CSUM; 1457 netdev->features |= NETIF_F_SCTP_CSUM;
1389 1458
1390 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1459 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
1391 1460
1392 /* before reading the NVM, reset the controller to put the device in a 1461 /* before reading the NVM, reset the controller to put the device in a
1393 * known good starting state */ 1462 * known good starting state */
@@ -1429,9 +1498,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1429 hw->fc.requested_mode = e1000_fc_default; 1498 hw->fc.requested_mode = e1000_fc_default;
1430 hw->fc.current_mode = e1000_fc_default; 1499 hw->fc.current_mode = e1000_fc_default;
1431 1500
1432 adapter->itr_setting = IGB_DEFAULT_ITR;
1433 adapter->itr = IGB_START_ITR;
1434
1435 igb_validate_mdi_setting(hw); 1501 igb_validate_mdi_setting(hw);
1436 1502
1437 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, 1503 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
@@ -1498,66 +1564,64 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1498 dev_info(&pdev->dev, "DCA enabled\n"); 1564 dev_info(&pdev->dev, "DCA enabled\n");
1499 igb_setup_dca(adapter); 1565 igb_setup_dca(adapter);
1500 } 1566 }
1501#endif
1502 1567
1503 /*
1504 * Initialize hardware timer: we keep it running just in case
1505 * that some program needs it later on.
1506 */
1507 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1508 adapter->cycles.read = igb_read_clock;
1509 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1510 adapter->cycles.mult = 1;
1511 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1512 wr32(E1000_TIMINCA,
1513 (1<<24) |
1514 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1515#if 0
1516 /*
1517 * Avoid rollover while we initialize by resetting the time counter.
1518 */
1519 wr32(E1000_SYSTIML, 0x00000000);
1520 wr32(E1000_SYSTIMH, 0x00000000);
1521#else
1522 /*
1523 * Set registers so that rollover occurs soon to test this.
1524 */
1525 wr32(E1000_SYSTIML, 0x00000000);
1526 wr32(E1000_SYSTIMH, 0xFF800000);
1527#endif 1568#endif
1528 wrfl(); 1569 switch (hw->mac.type) {
1529 timecounter_init(&adapter->clock, 1570 case e1000_82576:
1530 &adapter->cycles, 1571 /*
1531 ktime_to_ns(ktime_get_real())); 1572 * Initialize hardware timer: we keep it running just in case
1532 1573 * that some program needs it later on.
1533 /* 1574 */
1534 * Synchronize our NIC clock against system wall clock. NIC 1575 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1535 * time stamp reading requires ~3us per sample, each sample 1576 adapter->cycles.read = igb_read_clock;
1536 * was pretty stable even under load => only require 10 1577 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1537 * samples for each offset comparison. 1578 adapter->cycles.mult = 1;
1538 */ 1579 /**
1539 memset(&adapter->compare, 0, sizeof(adapter->compare)); 1580 * Scale the NIC clock cycle by a large factor so that
1540 adapter->compare.source = &adapter->clock; 1581 * relatively small clock corrections can be added or
1541 adapter->compare.target = ktime_get_real; 1582 * substracted at each clock tick. The drawbacks of a large
1542 adapter->compare.num_samples = 10; 1583 * factor are a) that the clock register overflows more quickly
1543 timecompare_update(&adapter->compare, 0); 1584 * (not such a big deal) and b) that the increment per tick has
1544 1585 * to fit into 24 bits. As a result we need to use a shift of
1545#ifdef DEBUG 1586 * 19 so we can fit a value of 16 into the TIMINCA register.
1546 { 1587 */
1547 char buffer[160]; 1588 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1548 printk(KERN_DEBUG 1589 wr32(E1000_TIMINCA,
1549 "igb: %s: hw %p initialized timer\n", 1590 (1 << E1000_TIMINCA_16NS_SHIFT) |
1550 igb_get_time_str(adapter, buffer), 1591 (16 << IGB_82576_TSYNC_SHIFT));
1551 &adapter->hw); 1592
1593 /* Set registers so that rollover occurs soon to test this. */
1594 wr32(E1000_SYSTIML, 0x00000000);
1595 wr32(E1000_SYSTIMH, 0xFF800000);
1596 wrfl();
1597
1598 timecounter_init(&adapter->clock,
1599 &adapter->cycles,
1600 ktime_to_ns(ktime_get_real()));
1601 /*
1602 * Synchronize our NIC clock against system wall clock. NIC
1603 * time stamp reading requires ~3us per sample, each sample
1604 * was pretty stable even under load => only require 10
1605 * samples for each offset comparison.
1606 */
1607 memset(&adapter->compare, 0, sizeof(adapter->compare));
1608 adapter->compare.source = &adapter->clock;
1609 adapter->compare.target = ktime_get_real;
1610 adapter->compare.num_samples = 10;
1611 timecompare_update(&adapter->compare, 0);
1612 break;
1613 case e1000_82575:
1614 /* 82575 does not support timesync */
1615 default:
1616 break;
1552 } 1617 }
1553#endif
1554 1618
1555 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1619 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1556 /* print bus type/speed/width info */ 1620 /* print bus type/speed/width info */
1557 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1621 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1558 netdev->name, 1622 netdev->name,
1559 ((hw->bus.speed == e1000_bus_speed_2500) 1623 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1560 ? "2.5Gb/s" : "unknown"), 1624 "unknown"),
1561 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 1625 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1562 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 1626 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1563 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : 1627 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
@@ -1584,15 +1648,14 @@ err_eeprom:
1584 1648
1585 if (hw->flash_address) 1649 if (hw->flash_address)
1586 iounmap(hw->flash_address); 1650 iounmap(hw->flash_address);
1587
1588 igb_free_queues(adapter);
1589err_sw_init: 1651err_sw_init:
1652 igb_clear_interrupt_scheme(adapter);
1590 iounmap(hw->hw_addr); 1653 iounmap(hw->hw_addr);
1591err_ioremap: 1654err_ioremap:
1592 free_netdev(netdev); 1655 free_netdev(netdev);
1593err_alloc_etherdev: 1656err_alloc_etherdev:
1594 pci_release_selected_regions(pdev, pci_select_bars(pdev, 1657 pci_release_selected_regions(pdev,
1595 IORESOURCE_MEM)); 1658 pci_select_bars(pdev, IORESOURCE_MEM));
1596err_pci_reg: 1659err_pci_reg:
1597err_dma: 1660err_dma:
1598 pci_disable_device(pdev); 1661 pci_disable_device(pdev);
@@ -1637,12 +1700,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1637 1700
1638 unregister_netdev(netdev); 1701 unregister_netdev(netdev);
1639 1702
1640 if (!igb_check_reset_block(&adapter->hw)) 1703 if (!igb_check_reset_block(hw))
1641 igb_reset_phy(&adapter->hw); 1704 igb_reset_phy(hw);
1642
1643 igb_reset_interrupt_capability(adapter);
1644 1705
1645 igb_free_queues(adapter); 1706 igb_clear_interrupt_scheme(adapter);
1646 1707
1647#ifdef CONFIG_PCI_IOV 1708#ifdef CONFIG_PCI_IOV
1648 /* reclaim resources allocated to VFs */ 1709 /* reclaim resources allocated to VFs */
@@ -1658,11 +1719,12 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1658 dev_info(&pdev->dev, "IOV Disabled\n"); 1719 dev_info(&pdev->dev, "IOV Disabled\n");
1659 } 1720 }
1660#endif 1721#endif
1722
1661 iounmap(hw->hw_addr); 1723 iounmap(hw->hw_addr);
1662 if (hw->flash_address) 1724 if (hw->flash_address)
1663 iounmap(hw->flash_address); 1725 iounmap(hw->flash_address);
1664 pci_release_selected_regions(pdev, pci_select_bars(pdev, 1726 pci_release_selected_regions(pdev,
1665 IORESOURCE_MEM)); 1727 pci_select_bars(pdev, IORESOURCE_MEM));
1666 1728
1667 free_netdev(netdev); 1729 free_netdev(netdev);
1668 1730
@@ -1672,6 +1734,54 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1672} 1734}
1673 1735
1674/** 1736/**
1737 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1738 * @adapter: board private structure to initialize
1739 *
1740 * This function initializes the vf specific data storage and then attempts to
1741 * allocate the VFs. The reason for ordering it this way is because it is much
1742 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1743 * the memory for the VFs.
1744 **/
1745static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1746{
1747#ifdef CONFIG_PCI_IOV
1748 struct pci_dev *pdev = adapter->pdev;
1749
1750 if (adapter->vfs_allocated_count > 7)
1751 adapter->vfs_allocated_count = 7;
1752
1753 if (adapter->vfs_allocated_count) {
1754 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1755 sizeof(struct vf_data_storage),
1756 GFP_KERNEL);
1757 /* if allocation failed then we do not support SR-IOV */
1758 if (!adapter->vf_data) {
1759 adapter->vfs_allocated_count = 0;
1760 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1761 "Data Storage\n");
1762 }
1763 }
1764
1765 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1766 kfree(adapter->vf_data);
1767 adapter->vf_data = NULL;
1768#endif /* CONFIG_PCI_IOV */
1769 adapter->vfs_allocated_count = 0;
1770#ifdef CONFIG_PCI_IOV
1771 } else {
1772 unsigned char mac_addr[ETH_ALEN];
1773 int i;
1774 dev_info(&pdev->dev, "%d vfs allocated\n",
1775 adapter->vfs_allocated_count);
1776 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1777 random_ether_addr(mac_addr);
1778 igb_set_vf_mac(adapter, i, mac_addr);
1779 }
1780 }
1781#endif /* CONFIG_PCI_IOV */
1782}
1783
1784/**
1675 * igb_sw_init - Initialize general software structures (struct igb_adapter) 1785 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1676 * @adapter: board private structure to initialize 1786 * @adapter: board private structure to initialize
1677 * 1787 *
@@ -1689,20 +1799,25 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1689 1799
1690 adapter->tx_ring_count = IGB_DEFAULT_TXD; 1800 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1691 adapter->rx_ring_count = IGB_DEFAULT_RXD; 1801 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1692 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1802 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1693 adapter->rx_ps_hdr_size = 0; /* disable packet split */ 1803 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1804
1694 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1805 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1695 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1806 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1696 1807
1697 /* This call may decrease the number of queues depending on 1808#ifdef CONFIG_PCI_IOV
1698 * interrupt mode. */ 1809 if (hw->mac.type == e1000_82576)
1699 igb_set_interrupt_capability(adapter); 1810 adapter->vfs_allocated_count = max_vfs;
1700 1811
1701 if (igb_alloc_queues(adapter)) { 1812#endif /* CONFIG_PCI_IOV */
1813 /* This call may decrease the number of queues */
1814 if (igb_init_interrupt_scheme(adapter)) {
1702 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 1815 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1703 return -ENOMEM; 1816 return -ENOMEM;
1704 } 1817 }
1705 1818
1819 igb_probe_vfs(adapter);
1820
1706 /* Explicitly disable IRQ since the NIC can be in any state. */ 1821 /* Explicitly disable IRQ since the NIC can be in any state. */
1707 igb_irq_disable(adapter); 1822 igb_irq_disable(adapter);
1708 1823
@@ -1747,20 +1862,12 @@ static int igb_open(struct net_device *netdev)
1747 1862
1748 /* e1000_power_up_phy(adapter); */ 1863 /* e1000_power_up_phy(adapter); */
1749 1864
1750 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1751 if ((adapter->hw.mng_cookie.status &
1752 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1753 igb_update_mng_vlan(adapter);
1754
1755 /* before we allocate an interrupt, we must be ready to handle it. 1865 /* before we allocate an interrupt, we must be ready to handle it.
1756 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1866 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1757 * as soon as we call pci_request_irq, so we have to setup our 1867 * as soon as we call pci_request_irq, so we have to setup our
1758 * clean_rx handler before we do so. */ 1868 * clean_rx handler before we do so. */
1759 igb_configure(adapter); 1869 igb_configure(adapter);
1760 1870
1761 igb_vmm_control(adapter);
1762 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1763
1764 err = igb_request_irq(adapter); 1871 err = igb_request_irq(adapter);
1765 if (err) 1872 if (err)
1766 goto err_req_irq; 1873 goto err_req_irq;
@@ -1768,18 +1875,28 @@ static int igb_open(struct net_device *netdev)
1768 /* From here on the code is the same as igb_up() */ 1875 /* From here on the code is the same as igb_up() */
1769 clear_bit(__IGB_DOWN, &adapter->state); 1876 clear_bit(__IGB_DOWN, &adapter->state);
1770 1877
1771 for (i = 0; i < adapter->num_rx_queues; i++) 1878 for (i = 0; i < adapter->num_q_vectors; i++) {
1772 napi_enable(&adapter->rx_ring[i].napi); 1879 struct igb_q_vector *q_vector = adapter->q_vector[i];
1880 napi_enable(&q_vector->napi);
1881 }
1773 1882
1774 /* Clear any pending interrupts. */ 1883 /* Clear any pending interrupts. */
1775 rd32(E1000_ICR); 1884 rd32(E1000_ICR);
1776 1885
1777 igb_irq_enable(adapter); 1886 igb_irq_enable(adapter);
1778 1887
1888 /* notify VFs that reset has been completed */
1889 if (adapter->vfs_allocated_count) {
1890 u32 reg_data = rd32(E1000_CTRL_EXT);
1891 reg_data |= E1000_CTRL_EXT_PFRSTD;
1892 wr32(E1000_CTRL_EXT, reg_data);
1893 }
1894
1779 netif_tx_start_all_queues(netdev); 1895 netif_tx_start_all_queues(netdev);
1780 1896
1781 /* Fire a link status change interrupt to start the watchdog. */ 1897 /* start the watchdog. */
1782 wr32(E1000_ICS, E1000_ICS_LSC); 1898 hw->mac.get_link_status = 1;
1899 schedule_work(&adapter->watchdog_task);
1783 1900
1784 return 0; 1901 return 0;
1785 1902
@@ -1818,28 +1935,18 @@ static int igb_close(struct net_device *netdev)
1818 igb_free_all_tx_resources(adapter); 1935 igb_free_all_tx_resources(adapter);
1819 igb_free_all_rx_resources(adapter); 1936 igb_free_all_rx_resources(adapter);
1820 1937
1821 /* kill manageability vlan ID if supported, but not if a vlan with
1822 * the same ID is registered on the host OS (let 8021q kill it) */
1823 if ((adapter->hw.mng_cookie.status &
1824 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1825 !(adapter->vlgrp &&
1826 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1827 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1828
1829 return 0; 1938 return 0;
1830} 1939}
1831 1940
1832/** 1941/**
1833 * igb_setup_tx_resources - allocate Tx resources (Descriptors) 1942 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1834 * @adapter: board private structure
1835 * @tx_ring: tx descriptor ring (for a specific queue) to setup 1943 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1836 * 1944 *
1837 * Return 0 on success, negative on failure 1945 * Return 0 on success, negative on failure
1838 **/ 1946 **/
1839int igb_setup_tx_resources(struct igb_adapter *adapter, 1947int igb_setup_tx_resources(struct igb_ring *tx_ring)
1840 struct igb_ring *tx_ring)
1841{ 1948{
1842 struct pci_dev *pdev = adapter->pdev; 1949 struct pci_dev *pdev = tx_ring->pdev;
1843 int size; 1950 int size;
1844 1951
1845 size = sizeof(struct igb_buffer) * tx_ring->count; 1952 size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -1852,20 +1959,20 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
1852 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1959 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1853 tx_ring->size = ALIGN(tx_ring->size, 4096); 1960 tx_ring->size = ALIGN(tx_ring->size, 4096);
1854 1961
1855 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1962 tx_ring->desc = pci_alloc_consistent(pdev,
1963 tx_ring->size,
1856 &tx_ring->dma); 1964 &tx_ring->dma);
1857 1965
1858 if (!tx_ring->desc) 1966 if (!tx_ring->desc)
1859 goto err; 1967 goto err;
1860 1968
1861 tx_ring->adapter = adapter;
1862 tx_ring->next_to_use = 0; 1969 tx_ring->next_to_use = 0;
1863 tx_ring->next_to_clean = 0; 1970 tx_ring->next_to_clean = 0;
1864 return 0; 1971 return 0;
1865 1972
1866err: 1973err:
1867 vfree(tx_ring->buffer_info); 1974 vfree(tx_ring->buffer_info);
1868 dev_err(&adapter->pdev->dev, 1975 dev_err(&pdev->dev,
1869 "Unable to allocate memory for the transmit descriptor ring\n"); 1976 "Unable to allocate memory for the transmit descriptor ring\n");
1870 return -ENOMEM; 1977 return -ENOMEM;
1871} 1978}
@@ -1879,13 +1986,13 @@ err:
1879 **/ 1986 **/
1880static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 1987static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1881{ 1988{
1989 struct pci_dev *pdev = adapter->pdev;
1882 int i, err = 0; 1990 int i, err = 0;
1883 int r_idx;
1884 1991
1885 for (i = 0; i < adapter->num_tx_queues; i++) { 1992 for (i = 0; i < adapter->num_tx_queues; i++) {
1886 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1993 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
1887 if (err) { 1994 if (err) {
1888 dev_err(&adapter->pdev->dev, 1995 dev_err(&pdev->dev,
1889 "Allocation for Tx Queue %u failed\n", i); 1996 "Allocation for Tx Queue %u failed\n", i);
1890 for (i--; i >= 0; i--) 1997 for (i--; i >= 0; i--)
1891 igb_free_tx_resources(&adapter->tx_ring[i]); 1998 igb_free_tx_resources(&adapter->tx_ring[i]);
@@ -1894,56 +2001,23 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1894 } 2001 }
1895 2002
1896 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 2003 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1897 r_idx = i % adapter->num_tx_queues; 2004 int r_idx = i % adapter->num_tx_queues;
1898 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 2005 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1899 } 2006 }
1900 return err; 2007 return err;
1901} 2008}
1902 2009
1903/** 2010/**
1904 * igb_configure_tx - Configure transmit Unit after Reset 2011 * igb_setup_tctl - configure the transmit control registers
1905 * @adapter: board private structure 2012 * @adapter: Board private structure
1906 *
1907 * Configure the Tx unit of the MAC after a reset.
1908 **/ 2013 **/
1909static void igb_configure_tx(struct igb_adapter *adapter) 2014void igb_setup_tctl(struct igb_adapter *adapter)
1910{ 2015{
1911 u64 tdba;
1912 struct e1000_hw *hw = &adapter->hw; 2016 struct e1000_hw *hw = &adapter->hw;
1913 u32 tctl; 2017 u32 tctl;
1914 u32 txdctl, txctrl;
1915 int i, j;
1916 2018
1917 for (i = 0; i < adapter->num_tx_queues; i++) { 2019 /* disable queue 0 which is enabled by default on 82575 and 82576 */
1918 struct igb_ring *ring = &adapter->tx_ring[i]; 2020 wr32(E1000_TXDCTL(0), 0);
1919 j = ring->reg_idx;
1920 wr32(E1000_TDLEN(j),
1921 ring->count * sizeof(union e1000_adv_tx_desc));
1922 tdba = ring->dma;
1923 wr32(E1000_TDBAL(j),
1924 tdba & 0x00000000ffffffffULL);
1925 wr32(E1000_TDBAH(j), tdba >> 32);
1926
1927 ring->head = E1000_TDH(j);
1928 ring->tail = E1000_TDT(j);
1929 writel(0, hw->hw_addr + ring->tail);
1930 writel(0, hw->hw_addr + ring->head);
1931 txdctl = rd32(E1000_TXDCTL(j));
1932 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1933 wr32(E1000_TXDCTL(j), txdctl);
1934
1935 /* Turn off Relaxed Ordering on head write-backs. The
1936 * writebacks MUST be delivered in order or it will
1937 * completely screw up our bookeeping.
1938 */
1939 txctrl = rd32(E1000_DCA_TXCTRL(j));
1940 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1941 wr32(E1000_DCA_TXCTRL(j), txctrl);
1942 }
1943
1944 /* disable queue 0 to prevent tail bump w/o re-configuration */
1945 if (adapter->vfs_allocated_count)
1946 wr32(E1000_TXDCTL(0), 0);
1947 2021
1948 /* Program the Transmit Control Register */ 2022 /* Program the Transmit Control Register */
1949 tctl = rd32(E1000_TCTL); 2023 tctl = rd32(E1000_TCTL);
@@ -1953,9 +2027,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1953 2027
1954 igb_config_collision_dist(hw); 2028 igb_config_collision_dist(hw);
1955 2029
1956 /* Setup Transmit Descriptor Settings for eop descriptor */
1957 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1958
1959 /* Enable transmits */ 2030 /* Enable transmits */
1960 tctl |= E1000_TCTL_EN; 2031 tctl |= E1000_TCTL_EN;
1961 2032
@@ -1963,16 +2034,69 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1963} 2034}
1964 2035
1965/** 2036/**
1966 * igb_setup_rx_resources - allocate Rx resources (Descriptors) 2037 * igb_configure_tx_ring - Configure transmit ring after Reset
2038 * @adapter: board private structure
2039 * @ring: tx ring to configure
2040 *
2041 * Configure a transmit ring after a reset.
2042 **/
2043void igb_configure_tx_ring(struct igb_adapter *adapter,
2044 struct igb_ring *ring)
2045{
2046 struct e1000_hw *hw = &adapter->hw;
2047 u32 txdctl;
2048 u64 tdba = ring->dma;
2049 int reg_idx = ring->reg_idx;
2050
2051 /* disable the queue */
2052 txdctl = rd32(E1000_TXDCTL(reg_idx));
2053 wr32(E1000_TXDCTL(reg_idx),
2054 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2055 wrfl();
2056 mdelay(10);
2057
2058 wr32(E1000_TDLEN(reg_idx),
2059 ring->count * sizeof(union e1000_adv_tx_desc));
2060 wr32(E1000_TDBAL(reg_idx),
2061 tdba & 0x00000000ffffffffULL);
2062 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2063
2064 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2065 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2066 writel(0, ring->head);
2067 writel(0, ring->tail);
2068
2069 txdctl |= IGB_TX_PTHRESH;
2070 txdctl |= IGB_TX_HTHRESH << 8;
2071 txdctl |= IGB_TX_WTHRESH << 16;
2072
2073 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2074 wr32(E1000_TXDCTL(reg_idx), txdctl);
2075}
2076
2077/**
2078 * igb_configure_tx - Configure transmit Unit after Reset
1967 * @adapter: board private structure 2079 * @adapter: board private structure
2080 *
2081 * Configure the Tx unit of the MAC after a reset.
2082 **/
2083static void igb_configure_tx(struct igb_adapter *adapter)
2084{
2085 int i;
2086
2087 for (i = 0; i < adapter->num_tx_queues; i++)
2088 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
2089}
2090
2091/**
2092 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1968 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2093 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1969 * 2094 *
1970 * Returns 0 on success, negative on failure 2095 * Returns 0 on success, negative on failure
1971 **/ 2096 **/
1972int igb_setup_rx_resources(struct igb_adapter *adapter, 2097int igb_setup_rx_resources(struct igb_ring *rx_ring)
1973 struct igb_ring *rx_ring)
1974{ 2098{
1975 struct pci_dev *pdev = adapter->pdev; 2099 struct pci_dev *pdev = rx_ring->pdev;
1976 int size, desc_len; 2100 int size, desc_len;
1977 2101
1978 size = sizeof(struct igb_buffer) * rx_ring->count; 2102 size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -1996,13 +2120,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1996 rx_ring->next_to_clean = 0; 2120 rx_ring->next_to_clean = 0;
1997 rx_ring->next_to_use = 0; 2121 rx_ring->next_to_use = 0;
1998 2122
1999 rx_ring->adapter = adapter;
2000
2001 return 0; 2123 return 0;
2002 2124
2003err: 2125err:
2004 vfree(rx_ring->buffer_info); 2126 vfree(rx_ring->buffer_info);
2005 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 2127 rx_ring->buffer_info = NULL;
2128 dev_err(&pdev->dev, "Unable to allocate memory for "
2006 "the receive descriptor ring\n"); 2129 "the receive descriptor ring\n");
2007 return -ENOMEM; 2130 return -ENOMEM;
2008} 2131}
@@ -2016,12 +2139,13 @@ err:
2016 **/ 2139 **/
2017static int igb_setup_all_rx_resources(struct igb_adapter *adapter) 2140static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2018{ 2141{
2142 struct pci_dev *pdev = adapter->pdev;
2019 int i, err = 0; 2143 int i, err = 0;
2020 2144
2021 for (i = 0; i < adapter->num_rx_queues; i++) { 2145 for (i = 0; i < adapter->num_rx_queues; i++) {
2022 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2146 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
2023 if (err) { 2147 if (err) {
2024 dev_err(&adapter->pdev->dev, 2148 dev_err(&pdev->dev,
2025 "Allocation for Rx Queue %u failed\n", i); 2149 "Allocation for Rx Queue %u failed\n", i);
2026 for (i--; i >= 0; i--) 2150 for (i--; i >= 0; i--)
2027 igb_free_rx_resources(&adapter->rx_ring[i]); 2151 igb_free_rx_resources(&adapter->rx_ring[i]);
@@ -2033,15 +2157,118 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2033} 2157}
2034 2158
2035/** 2159/**
2160 * igb_setup_mrqc - configure the multiple receive queue control registers
2161 * @adapter: Board private structure
2162 **/
2163static void igb_setup_mrqc(struct igb_adapter *adapter)
2164{
2165 struct e1000_hw *hw = &adapter->hw;
2166 u32 mrqc, rxcsum;
2167 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2168 union e1000_reta {
2169 u32 dword;
2170 u8 bytes[4];
2171 } reta;
2172 static const u8 rsshash[40] = {
2173 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2174 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2175 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2176 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2177
2178 /* Fill out hash function seeds */
2179 for (j = 0; j < 10; j++) {
2180 u32 rsskey = rsshash[(j * 4)];
2181 rsskey |= rsshash[(j * 4) + 1] << 8;
2182 rsskey |= rsshash[(j * 4) + 2] << 16;
2183 rsskey |= rsshash[(j * 4) + 3] << 24;
2184 array_wr32(E1000_RSSRK(0), j, rsskey);
2185 }
2186
2187 num_rx_queues = adapter->num_rx_queues;
2188
2189 if (adapter->vfs_allocated_count) {
2190 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2191 switch (hw->mac.type) {
2192 case e1000_82576:
2193 shift = 3;
2194 num_rx_queues = 2;
2195 break;
2196 case e1000_82575:
2197 shift = 2;
2198 shift2 = 6;
2199 default:
2200 break;
2201 }
2202 } else {
2203 if (hw->mac.type == e1000_82575)
2204 shift = 6;
2205 }
2206
2207 for (j = 0; j < (32 * 4); j++) {
2208 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2209 if (shift2)
2210 reta.bytes[j & 3] |= num_rx_queues << shift2;
2211 if ((j & 3) == 3)
2212 wr32(E1000_RETA(j >> 2), reta.dword);
2213 }
2214
2215 /*
2216 * Disable raw packet checksumming so that RSS hash is placed in
2217 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2218 * offloads as they are enabled by default
2219 */
2220 rxcsum = rd32(E1000_RXCSUM);
2221 rxcsum |= E1000_RXCSUM_PCSD;
2222
2223 if (adapter->hw.mac.type >= e1000_82576)
2224 /* Enable Receive Checksum Offload for SCTP */
2225 rxcsum |= E1000_RXCSUM_CRCOFL;
2226
2227 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2228 wr32(E1000_RXCSUM, rxcsum);
2229
2230 /* If VMDq is enabled then we set the appropriate mode for that, else
2231 * we default to RSS so that an RSS hash is calculated per packet even
2232 * if we are only using one queue */
2233 if (adapter->vfs_allocated_count) {
2234 if (hw->mac.type > e1000_82575) {
2235 /* Set the default pool for the PF's first queue */
2236 u32 vtctl = rd32(E1000_VT_CTL);
2237 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2238 E1000_VT_CTL_DISABLE_DEF_POOL);
2239 vtctl |= adapter->vfs_allocated_count <<
2240 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2241 wr32(E1000_VT_CTL, vtctl);
2242 }
2243 if (adapter->num_rx_queues > 1)
2244 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2245 else
2246 mrqc = E1000_MRQC_ENABLE_VMDQ;
2247 } else {
2248 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2249 }
2250 igb_vmm_control(adapter);
2251
2252 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2253 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2254 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2255 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2256 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2257 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2258 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2259 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2260
2261 wr32(E1000_MRQC, mrqc);
2262}
2263
2264/**
2036 * igb_setup_rctl - configure the receive control registers 2265 * igb_setup_rctl - configure the receive control registers
2037 * @adapter: Board private structure 2266 * @adapter: Board private structure
2038 **/ 2267 **/
2039static void igb_setup_rctl(struct igb_adapter *adapter) 2268void igb_setup_rctl(struct igb_adapter *adapter)
2040{ 2269{
2041 struct e1000_hw *hw = &adapter->hw; 2270 struct e1000_hw *hw = &adapter->hw;
2042 u32 rctl; 2271 u32 rctl;
2043 u32 srrctl = 0;
2044 int i;
2045 2272
2046 rctl = rd32(E1000_RCTL); 2273 rctl = rd32(E1000_RCTL);
2047 2274
@@ -2058,75 +2285,45 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2058 */ 2285 */
2059 rctl |= E1000_RCTL_SECRC; 2286 rctl |= E1000_RCTL_SECRC;
2060 2287
2061 /* 2288 /* disable store bad packets and clear size bits. */
2062 * disable store bad packets and clear size bits.
2063 */
2064 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); 2289 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2065 2290
2066 /* enable LPE when to prevent packets larger than max_frame_size */ 2291 /* enable LPE to prevent packets larger than max_frame_size */
2067 rctl |= E1000_RCTL_LPE; 2292 rctl |= E1000_RCTL_LPE;
2068
2069 /* Setup buffer sizes */
2070 switch (adapter->rx_buffer_len) {
2071 case IGB_RXBUFFER_256:
2072 rctl |= E1000_RCTL_SZ_256;
2073 break;
2074 case IGB_RXBUFFER_512:
2075 rctl |= E1000_RCTL_SZ_512;
2076 break;
2077 default:
2078 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
2079 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2080 break;
2081 }
2082 2293
2083 /* 82575 and greater support packet-split where the protocol 2294 /* disable queue 0 to prevent tail write w/o re-config */
2084 * header is placed in skb->data and the packet data is 2295 wr32(E1000_RXDCTL(0), 0);
2085 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2086 * In the case of a non-split, skb->data is linearly filled,
2087 * followed by the page buffers. Therefore, skb->data is
2088 * sized to hold the largest protocol header.
2089 */
2090 /* allocations using alloc_page take too long for regular MTU
2091 * so only enable packet split for jumbo frames */
2092 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2093 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
2094 srrctl |= adapter->rx_ps_hdr_size <<
2095 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2096 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2097 } else {
2098 adapter->rx_ps_hdr_size = 0;
2099 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2100 }
2101 2296
2102 /* Attention!!! For SR-IOV PF driver operations you must enable 2297 /* Attention!!! For SR-IOV PF driver operations you must enable
2103 * queue drop for all VF and PF queues to prevent head of line blocking 2298 * queue drop for all VF and PF queues to prevent head of line blocking
2104 * if an un-trusted VF does not provide descriptors to hardware. 2299 * if an un-trusted VF does not provide descriptors to hardware.
2105 */ 2300 */
2106 if (adapter->vfs_allocated_count) { 2301 if (adapter->vfs_allocated_count) {
2107 u32 vmolr;
2108
2109 /* set all queue drop enable bits */ 2302 /* set all queue drop enable bits */
2110 wr32(E1000_QDE, ALL_QUEUES); 2303 wr32(E1000_QDE, ALL_QUEUES);
2111 srrctl |= E1000_SRRCTL_DROP_EN; 2304 }
2112 2305
2113 /* disable queue 0 to prevent tail write w/o re-config */ 2306 wr32(E1000_RCTL, rctl);
2114 wr32(E1000_RXDCTL(0), 0); 2307}
2115 2308
2116 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); 2309static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2117 if (rctl & E1000_RCTL_LPE) 2310 int vfn)
2118 vmolr |= E1000_VMOLR_LPE; 2311{
2119 if (adapter->num_rx_queues > 1) 2312 struct e1000_hw *hw = &adapter->hw;
2120 vmolr |= E1000_VMOLR_RSSE; 2313 u32 vmolr;
2121 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
2122 }
2123 2314
2124 for (i = 0; i < adapter->num_rx_queues; i++) { 2315 /* if it isn't the PF check to see if VFs are enabled and
2125 int j = adapter->rx_ring[i].reg_idx; 2316 * increase the size to support vlan tags */
2126 wr32(E1000_SRRCTL(j), srrctl); 2317 if (vfn < adapter->vfs_allocated_count &&
2127 } 2318 adapter->vf_data[vfn].vlans_enabled)
2319 size += VLAN_TAG_SIZE;
2128 2320
2129 wr32(E1000_RCTL, rctl); 2321 vmolr = rd32(E1000_VMOLR(vfn));
2322 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2323 vmolr |= size | E1000_VMOLR_LPE;
2324 wr32(E1000_VMOLR(vfn), vmolr);
2325
2326 return 0;
2130} 2327}
2131 2328
2132/** 2329/**
@@ -2148,144 +2345,118 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
2148 * size and set the VMOLR RLPML to the size we need */ 2345 * size and set the VMOLR RLPML to the size we need */
2149 if (pf_id) { 2346 if (pf_id) {
2150 igb_set_vf_rlpml(adapter, max_frame_size, pf_id); 2347 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2151 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE; 2348 max_frame_size = MAX_JUMBO_FRAME_SIZE;
2152 } 2349 }
2153 2350
2154 wr32(E1000_RLPML, max_frame_size); 2351 wr32(E1000_RLPML, max_frame_size);
2155} 2352}
2156 2353
2157/** 2354static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2158 * igb_configure_vt_default_pool - Configure VT default pool
2159 * @adapter: board private structure
2160 *
2161 * Configure the default pool
2162 **/
2163static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2164{ 2355{
2165 struct e1000_hw *hw = &adapter->hw; 2356 struct e1000_hw *hw = &adapter->hw;
2166 u16 pf_id = adapter->vfs_allocated_count; 2357 u32 vmolr;
2167 u32 vtctl;
2168 2358
2169 /* not in sr-iov mode - do nothing */ 2359 /*
2170 if (!pf_id) 2360 * This register exists only on 82576 and newer so if we are older then
2361 * we should exit and do nothing
2362 */
2363 if (hw->mac.type < e1000_82576)
2171 return; 2364 return;
2172 2365
2173 vtctl = rd32(E1000_VT_CTL); 2366 vmolr = rd32(E1000_VMOLR(vfn));
2174 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | 2367 vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
2175 E1000_VT_CTL_DISABLE_DEF_POOL); 2368 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2176 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT; 2369
2177 wr32(E1000_VT_CTL, vtctl); 2370 /* clear all bits that might not be set */
2371 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2372
2373 if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count)
2374 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2375 /*
2376 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2377 * multicast packets
2378 */
2379 if (vfn <= adapter->vfs_allocated_count)
2380 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2381
2382 wr32(E1000_VMOLR(vfn), vmolr);
2178} 2383}
2179 2384
2180/** 2385/**
2181 * igb_configure_rx - Configure receive Unit after Reset 2386 * igb_configure_rx_ring - Configure a receive ring after Reset
2182 * @adapter: board private structure 2387 * @adapter: board private structure
2388 * @ring: receive ring to be configured
2183 * 2389 *
2184 * Configure the Rx unit of the MAC after a reset. 2390 * Configure the Rx unit of the MAC after a reset.
2185 **/ 2391 **/
2186static void igb_configure_rx(struct igb_adapter *adapter) 2392void igb_configure_rx_ring(struct igb_adapter *adapter,
2393 struct igb_ring *ring)
2187{ 2394{
2188 u64 rdba;
2189 struct e1000_hw *hw = &adapter->hw; 2395 struct e1000_hw *hw = &adapter->hw;
2190 u32 rctl, rxcsum; 2396 u64 rdba = ring->dma;
2191 u32 rxdctl; 2397 int reg_idx = ring->reg_idx;
2192 int i; 2398 u32 srrctl, rxdctl;
2193 2399
2194 /* disable receives while setting up the descriptors */ 2400 /* disable the queue */
2195 rctl = rd32(E1000_RCTL); 2401 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2196 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); 2402 wr32(E1000_RXDCTL(reg_idx),
2197 wrfl(); 2403 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2198 mdelay(10); 2404
2199 2405 /* Set DMA base address registers */
2200 if (adapter->itr_setting > 3) 2406 wr32(E1000_RDBAL(reg_idx),
2201 wr32(E1000_ITR, adapter->itr); 2407 rdba & 0x00000000ffffffffULL);
2202 2408 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2203 /* Setup the HW Rx Head and Tail Descriptor Pointers and 2409 wr32(E1000_RDLEN(reg_idx),
2204 * the Base and Length of the Rx Descriptor Ring */ 2410 ring->count * sizeof(union e1000_adv_rx_desc));
2205 for (i = 0; i < adapter->num_rx_queues; i++) { 2411
2206 struct igb_ring *ring = &adapter->rx_ring[i]; 2412 /* initialize head and tail */
2207 int j = ring->reg_idx; 2413 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2208 rdba = ring->dma; 2414 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2209 wr32(E1000_RDBAL(j), 2415 writel(0, ring->head);
2210 rdba & 0x00000000ffffffffULL); 2416 writel(0, ring->tail);
2211 wr32(E1000_RDBAH(j), rdba >> 32); 2417
2212 wr32(E1000_RDLEN(j), 2418 /* set descriptor configuration */
2213 ring->count * sizeof(union e1000_adv_rx_desc)); 2419 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2214 2420 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
2215 ring->head = E1000_RDH(j); 2421 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2216 ring->tail = E1000_RDT(j); 2422#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2217 writel(0, hw->hw_addr + ring->tail); 2423 srrctl |= IGB_RXBUFFER_16384 >>
2218 writel(0, hw->hw_addr + ring->head); 2424 E1000_SRRCTL_BSIZEPKT_SHIFT;
2219 2425#else
2220 rxdctl = rd32(E1000_RXDCTL(j)); 2426 srrctl |= (PAGE_SIZE / 2) >>
2221 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2427 E1000_SRRCTL_BSIZEPKT_SHIFT;
2222 rxdctl &= 0xFFF00000; 2428#endif
2223 rxdctl |= IGB_RX_PTHRESH; 2429 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2224 rxdctl |= IGB_RX_HTHRESH << 8; 2430 } else {
2225 rxdctl |= IGB_RX_WTHRESH << 16; 2431 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
2226 wr32(E1000_RXDCTL(j), rxdctl); 2432 E1000_SRRCTL_BSIZEPKT_SHIFT;
2227 } 2433 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2228
2229 if (adapter->num_rx_queues > 1) {
2230 u32 random[10];
2231 u32 mrqc;
2232 u32 j, shift;
2233 union e1000_reta {
2234 u32 dword;
2235 u8 bytes[4];
2236 } reta;
2237
2238 get_random_bytes(&random[0], 40);
2239
2240 if (hw->mac.type >= e1000_82576)
2241 shift = 0;
2242 else
2243 shift = 6;
2244 for (j = 0; j < (32 * 4); j++) {
2245 reta.bytes[j & 3] =
2246 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
2247 if ((j & 3) == 3)
2248 writel(reta.dword,
2249 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2250 }
2251 if (adapter->vfs_allocated_count)
2252 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2253 else
2254 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2255
2256 /* Fill out hash function seeds */
2257 for (j = 0; j < 10; j++)
2258 array_wr32(E1000_RSSRK(0), j, random[j]);
2259
2260 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2261 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2262 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2263 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2264 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2265 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2266 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2267 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2268
2269 wr32(E1000_MRQC, mrqc);
2270 } else if (adapter->vfs_allocated_count) {
2271 /* Enable multi-queue for sr-iov */
2272 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2273 } 2434 }
2274 2435
2275 /* Enable Receive Checksum Offload for TCP and UDP */ 2436 wr32(E1000_SRRCTL(reg_idx), srrctl);
2276 rxcsum = rd32(E1000_RXCSUM);
2277 /* Disable raw packet checksumming */
2278 rxcsum |= E1000_RXCSUM_PCSD;
2279 2437
2280 if (adapter->hw.mac.type == e1000_82576) 2438 /* set filtering for VMDQ pools */
2281 /* Enable Receive Checksum Offload for SCTP */ 2439 igb_set_vmolr(adapter, reg_idx & 0x7);
2282 rxcsum |= E1000_RXCSUM_CRCOFL;
2283 2440
2284 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 2441 /* enable receive descriptor fetching */
2285 wr32(E1000_RXCSUM, rxcsum); 2442 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2443 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2444 rxdctl &= 0xFFF00000;
2445 rxdctl |= IGB_RX_PTHRESH;
2446 rxdctl |= IGB_RX_HTHRESH << 8;
2447 rxdctl |= IGB_RX_WTHRESH << 16;
2448 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2449}
2286 2450
2287 /* Set the default pool for the PF's first queue */ 2451/**
2288 igb_configure_vt_default_pool(adapter); 2452 * igb_configure_rx - Configure receive Unit after Reset
2453 * @adapter: board private structure
2454 *
2455 * Configure the Rx unit of the MAC after a reset.
2456 **/
2457static void igb_configure_rx(struct igb_adapter *adapter)
2458{
2459 int i;
2289 2460
2290 /* set UTA to appropriate mode */ 2461 /* set UTA to appropriate mode */
2291 igb_set_uta(adapter); 2462 igb_set_uta(adapter);
@@ -2294,10 +2465,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2294 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, 2465 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2295 adapter->vfs_allocated_count); 2466 adapter->vfs_allocated_count);
2296 2467
2297 igb_rlpml_set(adapter); 2468 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2298 2469 * the Base and Length of the Rx Descriptor Ring */
2299 /* Enable Receives */ 2470 for (i = 0; i < adapter->num_rx_queues; i++)
2300 wr32(E1000_RCTL, rctl); 2471 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2301} 2472}
2302 2473
2303/** 2474/**
@@ -2308,14 +2479,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2308 **/ 2479 **/
2309void igb_free_tx_resources(struct igb_ring *tx_ring) 2480void igb_free_tx_resources(struct igb_ring *tx_ring)
2310{ 2481{
2311 struct pci_dev *pdev = tx_ring->adapter->pdev;
2312
2313 igb_clean_tx_ring(tx_ring); 2482 igb_clean_tx_ring(tx_ring);
2314 2483
2315 vfree(tx_ring->buffer_info); 2484 vfree(tx_ring->buffer_info);
2316 tx_ring->buffer_info = NULL; 2485 tx_ring->buffer_info = NULL;
2317 2486
2318 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2487 /* if not set, then don't free */
2488 if (!tx_ring->desc)
2489 return;
2490
2491 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2492 tx_ring->desc, tx_ring->dma);
2319 2493
2320 tx_ring->desc = NULL; 2494 tx_ring->desc = NULL;
2321} 2495}
@@ -2334,12 +2508,13 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2334 igb_free_tx_resources(&adapter->tx_ring[i]); 2508 igb_free_tx_resources(&adapter->tx_ring[i]);
2335} 2509}
2336 2510
2337static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, 2511void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2338 struct igb_buffer *buffer_info) 2512 struct igb_buffer *buffer_info)
2339{ 2513{
2340 buffer_info->dma = 0; 2514 buffer_info->dma = 0;
2341 if (buffer_info->skb) { 2515 if (buffer_info->skb) {
2342 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, 2516 skb_dma_unmap(&tx_ring->pdev->dev,
2517 buffer_info->skb,
2343 DMA_TO_DEVICE); 2518 DMA_TO_DEVICE);
2344 dev_kfree_skb_any(buffer_info->skb); 2519 dev_kfree_skb_any(buffer_info->skb);
2345 buffer_info->skb = NULL; 2520 buffer_info->skb = NULL;
@@ -2354,7 +2529,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2354 **/ 2529 **/
2355static void igb_clean_tx_ring(struct igb_ring *tx_ring) 2530static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2356{ 2531{
2357 struct igb_adapter *adapter = tx_ring->adapter;
2358 struct igb_buffer *buffer_info; 2532 struct igb_buffer *buffer_info;
2359 unsigned long size; 2533 unsigned long size;
2360 unsigned int i; 2534 unsigned int i;
@@ -2365,21 +2539,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2365 2539
2366 for (i = 0; i < tx_ring->count; i++) { 2540 for (i = 0; i < tx_ring->count; i++) {
2367 buffer_info = &tx_ring->buffer_info[i]; 2541 buffer_info = &tx_ring->buffer_info[i];
2368 igb_unmap_and_free_tx_resource(adapter, buffer_info); 2542 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
2369 } 2543 }
2370 2544
2371 size = sizeof(struct igb_buffer) * tx_ring->count; 2545 size = sizeof(struct igb_buffer) * tx_ring->count;
2372 memset(tx_ring->buffer_info, 0, size); 2546 memset(tx_ring->buffer_info, 0, size);
2373 2547
2374 /* Zero out the descriptor ring */ 2548 /* Zero out the descriptor ring */
2375
2376 memset(tx_ring->desc, 0, tx_ring->size); 2549 memset(tx_ring->desc, 0, tx_ring->size);
2377 2550
2378 tx_ring->next_to_use = 0; 2551 tx_ring->next_to_use = 0;
2379 tx_ring->next_to_clean = 0; 2552 tx_ring->next_to_clean = 0;
2380
2381 writel(0, adapter->hw.hw_addr + tx_ring->head);
2382 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2383} 2553}
2384 2554
2385/** 2555/**
@@ -2402,14 +2572,17 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2402 **/ 2572 **/
2403void igb_free_rx_resources(struct igb_ring *rx_ring) 2573void igb_free_rx_resources(struct igb_ring *rx_ring)
2404{ 2574{
2405 struct pci_dev *pdev = rx_ring->adapter->pdev;
2406
2407 igb_clean_rx_ring(rx_ring); 2575 igb_clean_rx_ring(rx_ring);
2408 2576
2409 vfree(rx_ring->buffer_info); 2577 vfree(rx_ring->buffer_info);
2410 rx_ring->buffer_info = NULL; 2578 rx_ring->buffer_info = NULL;
2411 2579
2412 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2580 /* if not set, then don't free */
2581 if (!rx_ring->desc)
2582 return;
2583
2584 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2585 rx_ring->desc, rx_ring->dma);
2413 2586
2414 rx_ring->desc = NULL; 2587 rx_ring->desc = NULL;
2415} 2588}
@@ -2434,26 +2607,21 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2434 **/ 2607 **/
2435static void igb_clean_rx_ring(struct igb_ring *rx_ring) 2608static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2436{ 2609{
2437 struct igb_adapter *adapter = rx_ring->adapter;
2438 struct igb_buffer *buffer_info; 2610 struct igb_buffer *buffer_info;
2439 struct pci_dev *pdev = adapter->pdev;
2440 unsigned long size; 2611 unsigned long size;
2441 unsigned int i; 2612 unsigned int i;
2442 2613
2443 if (!rx_ring->buffer_info) 2614 if (!rx_ring->buffer_info)
2444 return; 2615 return;
2616
2445 /* Free all the Rx ring sk_buffs */ 2617 /* Free all the Rx ring sk_buffs */
2446 for (i = 0; i < rx_ring->count; i++) { 2618 for (i = 0; i < rx_ring->count; i++) {
2447 buffer_info = &rx_ring->buffer_info[i]; 2619 buffer_info = &rx_ring->buffer_info[i];
2448 if (buffer_info->dma) { 2620 if (buffer_info->dma) {
2449 if (adapter->rx_ps_hdr_size) 2621 pci_unmap_single(rx_ring->pdev,
2450 pci_unmap_single(pdev, buffer_info->dma, 2622 buffer_info->dma,
2451 adapter->rx_ps_hdr_size, 2623 rx_ring->rx_buffer_len,
2452 PCI_DMA_FROMDEVICE); 2624 PCI_DMA_FROMDEVICE);
2453 else
2454 pci_unmap_single(pdev, buffer_info->dma,
2455 adapter->rx_buffer_len,
2456 PCI_DMA_FROMDEVICE);
2457 buffer_info->dma = 0; 2625 buffer_info->dma = 0;
2458 } 2626 }
2459 2627
@@ -2461,14 +2629,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2461 dev_kfree_skb(buffer_info->skb); 2629 dev_kfree_skb(buffer_info->skb);
2462 buffer_info->skb = NULL; 2630 buffer_info->skb = NULL;
2463 } 2631 }
2632 if (buffer_info->page_dma) {
2633 pci_unmap_page(rx_ring->pdev,
2634 buffer_info->page_dma,
2635 PAGE_SIZE / 2,
2636 PCI_DMA_FROMDEVICE);
2637 buffer_info->page_dma = 0;
2638 }
2464 if (buffer_info->page) { 2639 if (buffer_info->page) {
2465 if (buffer_info->page_dma)
2466 pci_unmap_page(pdev, buffer_info->page_dma,
2467 PAGE_SIZE / 2,
2468 PCI_DMA_FROMDEVICE);
2469 put_page(buffer_info->page); 2640 put_page(buffer_info->page);
2470 buffer_info->page = NULL; 2641 buffer_info->page = NULL;
2471 buffer_info->page_dma = 0;
2472 buffer_info->page_offset = 0; 2642 buffer_info->page_offset = 0;
2473 } 2643 }
2474 } 2644 }
@@ -2481,9 +2651,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2481 2651
2482 rx_ring->next_to_clean = 0; 2652 rx_ring->next_to_clean = 0;
2483 rx_ring->next_to_use = 0; 2653 rx_ring->next_to_use = 0;
2484
2485 writel(0, adapter->hw.hw_addr + rx_ring->head);
2486 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2487} 2654}
2488 2655
2489/** 2656/**
@@ -2744,37 +2911,34 @@ static void igb_watchdog(unsigned long data)
2744static void igb_watchdog_task(struct work_struct *work) 2911static void igb_watchdog_task(struct work_struct *work)
2745{ 2912{
2746 struct igb_adapter *adapter = container_of(work, 2913 struct igb_adapter *adapter = container_of(work,
2747 struct igb_adapter, watchdog_task); 2914 struct igb_adapter,
2915 watchdog_task);
2748 struct e1000_hw *hw = &adapter->hw; 2916 struct e1000_hw *hw = &adapter->hw;
2749 struct net_device *netdev = adapter->netdev; 2917 struct net_device *netdev = adapter->netdev;
2750 struct igb_ring *tx_ring = adapter->tx_ring; 2918 struct igb_ring *tx_ring = adapter->tx_ring;
2751 u32 link; 2919 u32 link;
2752 u32 eics = 0;
2753 int i; 2920 int i;
2754 2921
2755 link = igb_has_link(adapter); 2922 link = igb_has_link(adapter);
2756 if ((netif_carrier_ok(netdev)) && link)
2757 goto link_up;
2758
2759 if (link) { 2923 if (link) {
2760 if (!netif_carrier_ok(netdev)) { 2924 if (!netif_carrier_ok(netdev)) {
2761 u32 ctrl; 2925 u32 ctrl;
2762 hw->mac.ops.get_speed_and_duplex(&adapter->hw, 2926 hw->mac.ops.get_speed_and_duplex(hw,
2763 &adapter->link_speed, 2927 &adapter->link_speed,
2764 &adapter->link_duplex); 2928 &adapter->link_duplex);
2765 2929
2766 ctrl = rd32(E1000_CTRL); 2930 ctrl = rd32(E1000_CTRL);
2767 /* Links status message must follow this format */ 2931 /* Links status message must follow this format */
2768 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " 2932 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2769 "Flow Control: %s\n", 2933 "Flow Control: %s\n",
2770 netdev->name, 2934 netdev->name,
2771 adapter->link_speed, 2935 adapter->link_speed,
2772 adapter->link_duplex == FULL_DUPLEX ? 2936 adapter->link_duplex == FULL_DUPLEX ?
2773 "Full Duplex" : "Half Duplex", 2937 "Full Duplex" : "Half Duplex",
2774 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2938 ((ctrl & E1000_CTRL_TFCE) &&
2775 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2939 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
2776 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2940 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
2777 E1000_CTRL_TFCE) ? "TX" : "None"))); 2941 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
2778 2942
2779 /* tweak tx_queue_len according to speed/duplex and 2943 /* tweak tx_queue_len according to speed/duplex and
2780 * adjust the timeout factor */ 2944 * adjust the timeout factor */
@@ -2818,20 +2982,8 @@ static void igb_watchdog_task(struct work_struct *work)
2818 } 2982 }
2819 } 2983 }
2820 2984
2821link_up:
2822 igb_update_stats(adapter); 2985 igb_update_stats(adapter);
2823 2986 igb_update_adaptive(hw);
2824 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2825 adapter->tpt_old = adapter->stats.tpt;
2826 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
2827 adapter->colc_old = adapter->stats.colc;
2828
2829 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2830 adapter->gorc_old = adapter->stats.gorc;
2831 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2832 adapter->gotc_old = adapter->stats.gotc;
2833
2834 igb_update_adaptive(&adapter->hw);
2835 2987
2836 if (!netif_carrier_ok(netdev)) { 2988 if (!netif_carrier_ok(netdev)) {
2837 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { 2989 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
@@ -2846,18 +2998,22 @@ link_up:
2846 } 2998 }
2847 } 2999 }
2848 3000
3001 /* Force detection of hung controller every watchdog period */
3002 for (i = 0; i < adapter->num_tx_queues; i++)
3003 adapter->tx_ring[i].detect_tx_hung = true;
3004
2849 /* Cause software interrupt to ensure rx ring is cleaned */ 3005 /* Cause software interrupt to ensure rx ring is cleaned */
2850 if (adapter->msix_entries) { 3006 if (adapter->msix_entries) {
2851 for (i = 0; i < adapter->num_rx_queues; i++) 3007 u32 eics = 0;
2852 eics |= adapter->rx_ring[i].eims_value; 3008 for (i = 0; i < adapter->num_q_vectors; i++) {
3009 struct igb_q_vector *q_vector = adapter->q_vector[i];
3010 eics |= q_vector->eims_value;
3011 }
2853 wr32(E1000_EICS, eics); 3012 wr32(E1000_EICS, eics);
2854 } else { 3013 } else {
2855 wr32(E1000_ICS, E1000_ICS_RXDMT0); 3014 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2856 } 3015 }
2857 3016
2858 /* Force detection of hung controller every watchdog period */
2859 tx_ring->detect_tx_hung = true;
2860
2861 /* Reset the timer */ 3017 /* Reset the timer */
2862 if (!test_bit(__IGB_DOWN, &adapter->state)) 3018 if (!test_bit(__IGB_DOWN, &adapter->state))
2863 mod_timer(&adapter->watchdog_timer, 3019 mod_timer(&adapter->watchdog_timer,
@@ -2871,7 +3027,6 @@ enum latency_range {
2871 latency_invalid = 255 3027 latency_invalid = 255
2872}; 3028};
2873 3029
2874
2875/** 3030/**
2876 * igb_update_ring_itr - update the dynamic ITR value based on packet size 3031 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2877 * 3032 *
@@ -2886,25 +3041,37 @@ enum latency_range {
2886 * parameter (see igb_param.c) 3041 * parameter (see igb_param.c)
2887 * NOTE: This function is called only when operating in a multiqueue 3042 * NOTE: This function is called only when operating in a multiqueue
2888 * receive environment. 3043 * receive environment.
2889 * @rx_ring: pointer to ring 3044 * @q_vector: pointer to q_vector
2890 **/ 3045 **/
2891static void igb_update_ring_itr(struct igb_ring *rx_ring) 3046static void igb_update_ring_itr(struct igb_q_vector *q_vector)
2892{ 3047{
2893 int new_val = rx_ring->itr_val; 3048 int new_val = q_vector->itr_val;
2894 int avg_wire_size = 0; 3049 int avg_wire_size = 0;
2895 struct igb_adapter *adapter = rx_ring->adapter; 3050 struct igb_adapter *adapter = q_vector->adapter;
2896
2897 if (!rx_ring->total_packets)
2898 goto clear_counts; /* no packets, so don't do anything */
2899 3051
2900 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3052 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2901 * ints/sec - ITR timer value of 120 ticks. 3053 * ints/sec - ITR timer value of 120 ticks.
2902 */ 3054 */
2903 if (adapter->link_speed != SPEED_1000) { 3055 if (adapter->link_speed != SPEED_1000) {
2904 new_val = 120; 3056 new_val = 976;
2905 goto set_itr_val; 3057 goto set_itr_val;
2906 } 3058 }
2907 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets; 3059
3060 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3061 struct igb_ring *ring = q_vector->rx_ring;
3062 avg_wire_size = ring->total_bytes / ring->total_packets;
3063 }
3064
3065 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3066 struct igb_ring *ring = q_vector->tx_ring;
3067 avg_wire_size = max_t(u32, avg_wire_size,
3068 (ring->total_bytes /
3069 ring->total_packets));
3070 }
3071
3072 /* if avg_wire_size isn't set no work was done */
3073 if (!avg_wire_size)
3074 goto clear_counts;
2908 3075
2909 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 3076 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2910 avg_wire_size += 24; 3077 avg_wire_size += 24;
@@ -2919,13 +3086,19 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring)
2919 new_val = avg_wire_size / 2; 3086 new_val = avg_wire_size / 2;
2920 3087
2921set_itr_val: 3088set_itr_val:
2922 if (new_val != rx_ring->itr_val) { 3089 if (new_val != q_vector->itr_val) {
2923 rx_ring->itr_val = new_val; 3090 q_vector->itr_val = new_val;
2924 rx_ring->set_itr = 1; 3091 q_vector->set_itr = 1;
2925 } 3092 }
2926clear_counts: 3093clear_counts:
2927 rx_ring->total_bytes = 0; 3094 if (q_vector->rx_ring) {
2928 rx_ring->total_packets = 0; 3095 q_vector->rx_ring->total_bytes = 0;
3096 q_vector->rx_ring->total_packets = 0;
3097 }
3098 if (q_vector->tx_ring) {
3099 q_vector->tx_ring->total_bytes = 0;
3100 q_vector->tx_ring->total_packets = 0;
3101 }
2929} 3102}
2930 3103
2931/** 3104/**
@@ -2942,7 +3115,7 @@ clear_counts:
2942 * NOTE: These calculations are only valid when operating in a single- 3115 * NOTE: These calculations are only valid when operating in a single-
2943 * queue environment. 3116 * queue environment.
2944 * @adapter: pointer to adapter 3117 * @adapter: pointer to adapter
2945 * @itr_setting: current adapter->itr 3118 * @itr_setting: current q_vector->itr_val
2946 * @packets: the number of packets during this measurement interval 3119 * @packets: the number of packets during this measurement interval
2947 * @bytes: the number of bytes during this measurement interval 3120 * @bytes: the number of bytes during this measurement interval
2948 **/ 3121 **/
@@ -2994,8 +3167,9 @@ update_itr_done:
2994 3167
2995static void igb_set_itr(struct igb_adapter *adapter) 3168static void igb_set_itr(struct igb_adapter *adapter)
2996{ 3169{
3170 struct igb_q_vector *q_vector = adapter->q_vector[0];
2997 u16 current_itr; 3171 u16 current_itr;
2998 u32 new_itr = adapter->itr; 3172 u32 new_itr = q_vector->itr_val;
2999 3173
3000 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 3174 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3001 if (adapter->link_speed != SPEED_1000) { 3175 if (adapter->link_speed != SPEED_1000) {
@@ -3009,18 +3183,14 @@ static void igb_set_itr(struct igb_adapter *adapter)
3009 adapter->rx_ring->total_packets, 3183 adapter->rx_ring->total_packets,
3010 adapter->rx_ring->total_bytes); 3184 adapter->rx_ring->total_bytes);
3011 3185
3012 if (adapter->rx_ring->buddy) { 3186 adapter->tx_itr = igb_update_itr(adapter,
3013 adapter->tx_itr = igb_update_itr(adapter, 3187 adapter->tx_itr,
3014 adapter->tx_itr, 3188 adapter->tx_ring->total_packets,
3015 adapter->tx_ring->total_packets, 3189 adapter->tx_ring->total_bytes);
3016 adapter->tx_ring->total_bytes); 3190 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3017 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3018 } else {
3019 current_itr = adapter->rx_itr;
3020 }
3021 3191
3022 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3192 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3023 if (adapter->itr_setting == 3 && current_itr == lowest_latency) 3193 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
3024 current_itr = low_latency; 3194 current_itr = low_latency;
3025 3195
3026 switch (current_itr) { 3196 switch (current_itr) {
@@ -3041,18 +3211,17 @@ static void igb_set_itr(struct igb_adapter *adapter)
3041set_itr_now: 3211set_itr_now:
3042 adapter->rx_ring->total_bytes = 0; 3212 adapter->rx_ring->total_bytes = 0;
3043 adapter->rx_ring->total_packets = 0; 3213 adapter->rx_ring->total_packets = 0;
3044 if (adapter->rx_ring->buddy) { 3214 adapter->tx_ring->total_bytes = 0;
3045 adapter->rx_ring->buddy->total_bytes = 0; 3215 adapter->tx_ring->total_packets = 0;
3046 adapter->rx_ring->buddy->total_packets = 0;
3047 }
3048 3216
3049 if (new_itr != adapter->itr) { 3217 if (new_itr != q_vector->itr_val) {
3050 /* this attempts to bias the interrupt rate towards Bulk 3218 /* this attempts to bias the interrupt rate towards Bulk
3051 * by adding intermediate steps when interrupt rate is 3219 * by adding intermediate steps when interrupt rate is
3052 * increasing */ 3220 * increasing */
3053 new_itr = new_itr > adapter->itr ? 3221 new_itr = new_itr > q_vector->itr_val ?
3054 max((new_itr * adapter->itr) / 3222 max((new_itr * q_vector->itr_val) /
3055 (new_itr + (adapter->itr >> 2)), new_itr) : 3223 (new_itr + (q_vector->itr_val >> 2)),
3224 new_itr) :
3056 new_itr; 3225 new_itr;
3057 /* Don't write the value here; it resets the adapter's 3226 /* Don't write the value here; it resets the adapter's
3058 * internal timer, and causes us to delay far longer than 3227 * internal timer, and causes us to delay far longer than
@@ -3060,25 +3229,22 @@ set_itr_now:
3060 * value at the beginning of the next interrupt so the timing 3229 * value at the beginning of the next interrupt so the timing
3061 * ends up being correct. 3230 * ends up being correct.
3062 */ 3231 */
3063 adapter->itr = new_itr; 3232 q_vector->itr_val = new_itr;
3064 adapter->rx_ring->itr_val = new_itr; 3233 q_vector->set_itr = 1;
3065 adapter->rx_ring->set_itr = 1;
3066 } 3234 }
3067 3235
3068 return; 3236 return;
3069} 3237}
3070 3238
3071
3072#define IGB_TX_FLAGS_CSUM 0x00000001 3239#define IGB_TX_FLAGS_CSUM 0x00000001
3073#define IGB_TX_FLAGS_VLAN 0x00000002 3240#define IGB_TX_FLAGS_VLAN 0x00000002
3074#define IGB_TX_FLAGS_TSO 0x00000004 3241#define IGB_TX_FLAGS_TSO 0x00000004
3075#define IGB_TX_FLAGS_IPV4 0x00000008 3242#define IGB_TX_FLAGS_IPV4 0x00000008
3076#define IGB_TX_FLAGS_TSTAMP 0x00000010 3243#define IGB_TX_FLAGS_TSTAMP 0x00000010
3077#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 3244#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3078#define IGB_TX_FLAGS_VLAN_SHIFT 16 3245#define IGB_TX_FLAGS_VLAN_SHIFT 16
3079 3246
3080static inline int igb_tso_adv(struct igb_adapter *adapter, 3247static inline int igb_tso_adv(struct igb_ring *tx_ring,
3081 struct igb_ring *tx_ring,
3082 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 3248 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3083{ 3249{
3084 struct e1000_adv_tx_context_desc *context_desc; 3250 struct e1000_adv_tx_context_desc *context_desc;
@@ -3140,8 +3306,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
3140 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 3306 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3141 3307
3142 /* For 82575, context index must be unique per ring. */ 3308 /* For 82575, context index must be unique per ring. */
3143 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3309 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3144 mss_l4len_idx |= tx_ring->queue_index << 4; 3310 mss_l4len_idx |= tx_ring->reg_idx << 4;
3145 3311
3146 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 3312 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3147 context_desc->seqnum_seed = 0; 3313 context_desc->seqnum_seed = 0;
@@ -3158,14 +3324,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
3158 return true; 3324 return true;
3159} 3325}
3160 3326
3161static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, 3327static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3162 struct igb_ring *tx_ring, 3328 struct sk_buff *skb, u32 tx_flags)
3163 struct sk_buff *skb, u32 tx_flags)
3164{ 3329{
3165 struct e1000_adv_tx_context_desc *context_desc; 3330 struct e1000_adv_tx_context_desc *context_desc;
3166 unsigned int i; 3331 struct pci_dev *pdev = tx_ring->pdev;
3167 struct igb_buffer *buffer_info; 3332 struct igb_buffer *buffer_info;
3168 u32 info = 0, tu_cmd = 0; 3333 u32 info = 0, tu_cmd = 0;
3334 unsigned int i;
3169 3335
3170 if ((skb->ip_summed == CHECKSUM_PARTIAL) || 3336 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3171 (tx_flags & IGB_TX_FLAGS_VLAN)) { 3337 (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -3175,6 +3341,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3175 3341
3176 if (tx_flags & IGB_TX_FLAGS_VLAN) 3342 if (tx_flags & IGB_TX_FLAGS_VLAN)
3177 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); 3343 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3344
3178 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 3345 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3179 if (skb->ip_summed == CHECKSUM_PARTIAL) 3346 if (skb->ip_summed == CHECKSUM_PARTIAL)
3180 info |= skb_network_header_len(skb); 3347 info |= skb_network_header_len(skb);
@@ -3212,7 +3379,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3212 break; 3379 break;
3213 default: 3380 default:
3214 if (unlikely(net_ratelimit())) 3381 if (unlikely(net_ratelimit()))
3215 dev_warn(&adapter->pdev->dev, 3382 dev_warn(&pdev->dev,
3216 "partial checksum but proto=%x!\n", 3383 "partial checksum but proto=%x!\n",
3217 skb->protocol); 3384 skb->protocol);
3218 break; 3385 break;
@@ -3221,11 +3388,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3221 3388
3222 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 3389 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3223 context_desc->seqnum_seed = 0; 3390 context_desc->seqnum_seed = 0;
3224 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3391 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3225 context_desc->mss_l4len_idx = 3392 context_desc->mss_l4len_idx =
3226 cpu_to_le32(tx_ring->queue_index << 4); 3393 cpu_to_le32(tx_ring->reg_idx << 4);
3227 else
3228 context_desc->mss_l4len_idx = 0;
3229 3394
3230 buffer_info->time_stamp = jiffies; 3395 buffer_info->time_stamp = jiffies;
3231 buffer_info->next_to_watch = i; 3396 buffer_info->next_to_watch = i;
@@ -3244,11 +3409,11 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3244#define IGB_MAX_TXD_PWR 16 3409#define IGB_MAX_TXD_PWR 16
3245#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) 3410#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3246 3411
3247static inline int igb_tx_map_adv(struct igb_adapter *adapter, 3412static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3248 struct igb_ring *tx_ring, struct sk_buff *skb,
3249 unsigned int first) 3413 unsigned int first)
3250{ 3414{
3251 struct igb_buffer *buffer_info; 3415 struct igb_buffer *buffer_info;
3416 struct pci_dev *pdev = tx_ring->pdev;
3252 unsigned int len = skb_headlen(skb); 3417 unsigned int len = skb_headlen(skb);
3253 unsigned int count = 0, i; 3418 unsigned int count = 0, i;
3254 unsigned int f; 3419 unsigned int f;
@@ -3256,8 +3421,8 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3256 3421
3257 i = tx_ring->next_to_use; 3422 i = tx_ring->next_to_use;
3258 3423
3259 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { 3424 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3260 dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); 3425 dev_err(&pdev->dev, "TX DMA map failed\n");
3261 return 0; 3426 return 0;
3262 } 3427 }
3263 3428
@@ -3293,18 +3458,17 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3293 tx_ring->buffer_info[i].skb = skb; 3458 tx_ring->buffer_info[i].skb = skb;
3294 tx_ring->buffer_info[first].next_to_watch = i; 3459 tx_ring->buffer_info[first].next_to_watch = i;
3295 3460
3296 return count + 1; 3461 return ++count;
3297} 3462}
3298 3463
3299static inline void igb_tx_queue_adv(struct igb_adapter *adapter, 3464static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3300 struct igb_ring *tx_ring,
3301 int tx_flags, int count, u32 paylen, 3465 int tx_flags, int count, u32 paylen,
3302 u8 hdr_len) 3466 u8 hdr_len)
3303{ 3467{
3304 union e1000_adv_tx_desc *tx_desc = NULL; 3468 union e1000_adv_tx_desc *tx_desc;
3305 struct igb_buffer *buffer_info; 3469 struct igb_buffer *buffer_info;
3306 u32 olinfo_status = 0, cmd_type_len; 3470 u32 olinfo_status = 0, cmd_type_len;
3307 unsigned int i; 3471 unsigned int i = tx_ring->next_to_use;
3308 3472
3309 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 3473 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3310 E1000_ADVTXD_DCMD_DEXT); 3474 E1000_ADVTXD_DCMD_DEXT);
@@ -3329,27 +3493,28 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3329 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3493 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3330 } 3494 }
3331 3495
3332 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) && 3496 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3333 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | 3497 (tx_flags & (IGB_TX_FLAGS_CSUM |
3498 IGB_TX_FLAGS_TSO |
3334 IGB_TX_FLAGS_VLAN))) 3499 IGB_TX_FLAGS_VLAN)))
3335 olinfo_status |= tx_ring->queue_index << 4; 3500 olinfo_status |= tx_ring->reg_idx << 4;
3336 3501
3337 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 3502 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3338 3503
3339 i = tx_ring->next_to_use; 3504 do {
3340 while (count--) {
3341 buffer_info = &tx_ring->buffer_info[i]; 3505 buffer_info = &tx_ring->buffer_info[i];
3342 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 3506 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3343 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 3507 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3344 tx_desc->read.cmd_type_len = 3508 tx_desc->read.cmd_type_len =
3345 cpu_to_le32(cmd_type_len | buffer_info->length); 3509 cpu_to_le32(cmd_type_len | buffer_info->length);
3346 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3510 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3511 count--;
3347 i++; 3512 i++;
3348 if (i == tx_ring->count) 3513 if (i == tx_ring->count)
3349 i = 0; 3514 i = 0;
3350 } 3515 } while (count > 0);
3351 3516
3352 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); 3517 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
3353 /* Force memory writes to complete before letting h/w 3518 /* Force memory writes to complete before letting h/w
3354 * know there are new descriptors to fetch. (Only 3519 * know there are new descriptors to fetch. (Only
3355 * applicable for weak-ordered memory model archs, 3520 * applicable for weak-ordered memory model archs,
@@ -3357,16 +3522,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3357 wmb(); 3522 wmb();
3358 3523
3359 tx_ring->next_to_use = i; 3524 tx_ring->next_to_use = i;
3360 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3525 writel(i, tx_ring->tail);
3361 /* we need this if more than one processor can write to our tail 3526 /* we need this if more than one processor can write to our tail
3362 * at a time, it syncronizes IO on IA64/Altix systems */ 3527 * at a time, it syncronizes IO on IA64/Altix systems */
3363 mmiowb(); 3528 mmiowb();
3364} 3529}
3365 3530
3366static int __igb_maybe_stop_tx(struct net_device *netdev, 3531static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3367 struct igb_ring *tx_ring, int size)
3368{ 3532{
3369 struct igb_adapter *adapter = netdev_priv(netdev); 3533 struct net_device *netdev = tx_ring->netdev;
3370 3534
3371 netif_stop_subqueue(netdev, tx_ring->queue_index); 3535 netif_stop_subqueue(netdev, tx_ring->queue_index);
3372 3536
@@ -3382,66 +3546,43 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
3382 3546
3383 /* A reprieve! */ 3547 /* A reprieve! */
3384 netif_wake_subqueue(netdev, tx_ring->queue_index); 3548 netif_wake_subqueue(netdev, tx_ring->queue_index);
3385 ++adapter->restart_queue; 3549 tx_ring->tx_stats.restart_queue++;
3386 return 0; 3550 return 0;
3387} 3551}
3388 3552
3389static int igb_maybe_stop_tx(struct net_device *netdev, 3553static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3390 struct igb_ring *tx_ring, int size)
3391{ 3554{
3392 if (igb_desc_unused(tx_ring) >= size) 3555 if (igb_desc_unused(tx_ring) >= size)
3393 return 0; 3556 return 0;
3394 return __igb_maybe_stop_tx(netdev, tx_ring, size); 3557 return __igb_maybe_stop_tx(tx_ring, size);
3395} 3558}
3396 3559
3397static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, 3560netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3398 struct net_device *netdev, 3561 struct igb_ring *tx_ring)
3399 struct igb_ring *tx_ring)
3400{ 3562{
3401 struct igb_adapter *adapter = netdev_priv(netdev); 3563 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
3402 unsigned int first; 3564 unsigned int first;
3403 unsigned int tx_flags = 0; 3565 unsigned int tx_flags = 0;
3404 u8 hdr_len = 0; 3566 u8 hdr_len = 0;
3405 int count = 0; 3567 int tso = 0, count;
3406 int tso = 0; 3568 union skb_shared_tx *shtx = skb_tx(skb);
3407 union skb_shared_tx *shtx;
3408
3409 if (test_bit(__IGB_DOWN, &adapter->state)) {
3410 dev_kfree_skb_any(skb);
3411 return NETDEV_TX_OK;
3412 }
3413
3414 if (skb->len <= 0) {
3415 dev_kfree_skb_any(skb);
3416 return NETDEV_TX_OK;
3417 }
3418 3569
3419 /* need: 1 descriptor per page, 3570 /* need: 1 descriptor per page,
3420 * + 2 desc gap to keep tail from touching head, 3571 * + 2 desc gap to keep tail from touching head,
3421 * + 1 desc for skb->data, 3572 * + 1 desc for skb->data,
3422 * + 1 desc for context descriptor, 3573 * + 1 desc for context descriptor,
3423 * otherwise try next time */ 3574 * otherwise try next time */
3424 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { 3575 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3425 /* this is a hard error */ 3576 /* this is a hard error */
3426 return NETDEV_TX_BUSY; 3577 return NETDEV_TX_BUSY;
3427 } 3578 }
3428 3579
3429 /*
3430 * TODO: check that there currently is no other packet with
3431 * time stamping in the queue
3432 *
3433 * When doing time stamping, keep the connection to the socket
3434 * a while longer: it is still needed by skb_hwtstamp_tx(),
3435 * called either in igb_tx_hwtstamp() or by our caller when
3436 * doing software time stamping.
3437 */
3438 shtx = skb_tx(skb);
3439 if (unlikely(shtx->hardware)) { 3580 if (unlikely(shtx->hardware)) {
3440 shtx->in_progress = 1; 3581 shtx->in_progress = 1;
3441 tx_flags |= IGB_TX_FLAGS_TSTAMP; 3582 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3442 } 3583 }
3443 3584
3444 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3585 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
3445 tx_flags |= IGB_TX_FLAGS_VLAN; 3586 tx_flags |= IGB_TX_FLAGS_VLAN;
3446 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 3587 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3447 } 3588 }
@@ -3450,37 +3591,38 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3450 tx_flags |= IGB_TX_FLAGS_IPV4; 3591 tx_flags |= IGB_TX_FLAGS_IPV4;
3451 3592
3452 first = tx_ring->next_to_use; 3593 first = tx_ring->next_to_use;
3453 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, 3594 if (skb_is_gso(skb)) {
3454 &hdr_len) : 0; 3595 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3455 3596
3456 if (tso < 0) { 3597 if (tso < 0) {
3457 dev_kfree_skb_any(skb); 3598 dev_kfree_skb_any(skb);
3458 return NETDEV_TX_OK; 3599 return NETDEV_TX_OK;
3600 }
3459 } 3601 }
3460 3602
3461 if (tso) 3603 if (tso)
3462 tx_flags |= IGB_TX_FLAGS_TSO; 3604 tx_flags |= IGB_TX_FLAGS_TSO;
3463 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) && 3605 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
3464 (skb->ip_summed == CHECKSUM_PARTIAL)) 3606 (skb->ip_summed == CHECKSUM_PARTIAL))
3465 tx_flags |= IGB_TX_FLAGS_CSUM; 3607 tx_flags |= IGB_TX_FLAGS_CSUM;
3466 3608
3467 /* 3609 /*
3468 * count reflects descriptors mapped, if 0 then mapping error 3610 * count reflects descriptors mapped, if 0 or less then mapping error
3469 * has occured and we need to rewind the descriptor queue 3611 * has occured and we need to rewind the descriptor queue
3470 */ 3612 */
3471 count = igb_tx_map_adv(adapter, tx_ring, skb, first); 3613 count = igb_tx_map_adv(tx_ring, skb, first);
3472 3614 if (count <= 0) {
3473 if (count) {
3474 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
3475 skb->len, hdr_len);
3476 /* Make sure there is space in the ring for the next send. */
3477 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3478 } else {
3479 dev_kfree_skb_any(skb); 3615 dev_kfree_skb_any(skb);
3480 tx_ring->buffer_info[first].time_stamp = 0; 3616 tx_ring->buffer_info[first].time_stamp = 0;
3481 tx_ring->next_to_use = first; 3617 tx_ring->next_to_use = first;
3618 return NETDEV_TX_OK;
3482 } 3619 }
3483 3620
3621 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3622
3623 /* Make sure there is space in the ring for the next send. */
3624 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
3625
3484 return NETDEV_TX_OK; 3626 return NETDEV_TX_OK;
3485} 3627}
3486 3628
@@ -3489,8 +3631,18 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3489{ 3631{
3490 struct igb_adapter *adapter = netdev_priv(netdev); 3632 struct igb_adapter *adapter = netdev_priv(netdev);
3491 struct igb_ring *tx_ring; 3633 struct igb_ring *tx_ring;
3492
3493 int r_idx = 0; 3634 int r_idx = 0;
3635
3636 if (test_bit(__IGB_DOWN, &adapter->state)) {
3637 dev_kfree_skb_any(skb);
3638 return NETDEV_TX_OK;
3639 }
3640
3641 if (skb->len <= 0) {
3642 dev_kfree_skb_any(skb);
3643 return NETDEV_TX_OK;
3644 }
3645
3494 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); 3646 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3495 tx_ring = adapter->multi_tx_table[r_idx]; 3647 tx_ring = adapter->multi_tx_table[r_idx];
3496 3648
@@ -3498,7 +3650,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3498 * to a flow. Right now, performance is impacted slightly negatively 3650 * to a flow. Right now, performance is impacted slightly negatively
3499 * if using multiple tx queues. If the stack breaks away from a 3651 * if using multiple tx queues. If the stack breaks away from a
3500 * single qdisc implementation, we can look at this again. */ 3652 * single qdisc implementation, we can look at this again. */
3501 return igb_xmit_frame_ring_adv(skb, netdev, tx_ring); 3653 return igb_xmit_frame_ring_adv(skb, tx_ring);
3502} 3654}
3503 3655
3504/** 3656/**
@@ -3512,6 +3664,7 @@ static void igb_tx_timeout(struct net_device *netdev)
3512 3664
3513 /* Do the reset outside of interrupt context */ 3665 /* Do the reset outside of interrupt context */
3514 adapter->tx_timeout_count++; 3666 adapter->tx_timeout_count++;
3667
3515 schedule_work(&adapter->reset_task); 3668 schedule_work(&adapter->reset_task);
3516 wr32(E1000_EICS, 3669 wr32(E1000_EICS,
3517 (adapter->eims_enable_mask & ~adapter->eims_other)); 3670 (adapter->eims_enable_mask & ~adapter->eims_other));
@@ -3548,16 +3701,17 @@ static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3548static int igb_change_mtu(struct net_device *netdev, int new_mtu) 3701static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3549{ 3702{
3550 struct igb_adapter *adapter = netdev_priv(netdev); 3703 struct igb_adapter *adapter = netdev_priv(netdev);
3704 struct pci_dev *pdev = adapter->pdev;
3551 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3705 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3706 u32 rx_buffer_len, i;
3552 3707
3553 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3708 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3554 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3709 dev_err(&pdev->dev, "Invalid MTU setting\n");
3555 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3556 return -EINVAL; 3710 return -EINVAL;
3557 } 3711 }
3558 3712
3559 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3713 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3560 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 3714 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
3561 return -EINVAL; 3715 return -EINVAL;
3562 } 3716 }
3563 3717
@@ -3566,8 +3720,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3566 3720
3567 /* igb_down has a dependency on max_frame_size */ 3721 /* igb_down has a dependency on max_frame_size */
3568 adapter->max_frame_size = max_frame; 3722 adapter->max_frame_size = max_frame;
3569 if (netif_running(netdev))
3570 igb_down(adapter);
3571 3723
3572 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3724 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3573 * means we reserve 2 more, this pushes us to allocate from the next 3725 * means we reserve 2 more, this pushes us to allocate from the next
@@ -3575,35 +3727,23 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3575 * i.e. RXBUFFER_2048 --> size-4096 slab 3727 * i.e. RXBUFFER_2048 --> size-4096 slab
3576 */ 3728 */
3577 3729
3578 if (max_frame <= IGB_RXBUFFER_256) 3730 if (max_frame <= IGB_RXBUFFER_1024)
3579 adapter->rx_buffer_len = IGB_RXBUFFER_256; 3731 rx_buffer_len = IGB_RXBUFFER_1024;
3580 else if (max_frame <= IGB_RXBUFFER_512) 3732 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3581 adapter->rx_buffer_len = IGB_RXBUFFER_512; 3733 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3582 else if (max_frame <= IGB_RXBUFFER_1024)
3583 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3584 else if (max_frame <= IGB_RXBUFFER_2048)
3585 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3586 else 3734 else
3587#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 3735 rx_buffer_len = IGB_RXBUFFER_128;
3588 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3589#else
3590 adapter->rx_buffer_len = PAGE_SIZE / 2;
3591#endif
3592
3593 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3594 if (adapter->vfs_allocated_count &&
3595 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3596 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3597 3736
3598 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3737 if (netif_running(netdev))
3599 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3738 igb_down(adapter);
3600 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3601 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3602 3739
3603 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 3740 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
3604 netdev->mtu, new_mtu); 3741 netdev->mtu, new_mtu);
3605 netdev->mtu = new_mtu; 3742 netdev->mtu = new_mtu;
3606 3743
3744 for (i = 0; i < adapter->num_rx_queues; i++)
3745 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3746
3607 if (netif_running(netdev)) 3747 if (netif_running(netdev))
3608 igb_up(adapter); 3748 igb_up(adapter);
3609 else 3749 else
@@ -3624,7 +3764,10 @@ void igb_update_stats(struct igb_adapter *adapter)
3624 struct net_device *netdev = adapter->netdev; 3764 struct net_device *netdev = adapter->netdev;
3625 struct e1000_hw *hw = &adapter->hw; 3765 struct e1000_hw *hw = &adapter->hw;
3626 struct pci_dev *pdev = adapter->pdev; 3766 struct pci_dev *pdev = adapter->pdev;
3767 u32 rnbc;
3627 u16 phy_tmp; 3768 u16 phy_tmp;
3769 int i;
3770 u64 bytes, packets;
3628 3771
3629#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3772#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3630 3773
@@ -3637,6 +3780,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3637 if (pci_channel_offline(pdev)) 3780 if (pci_channel_offline(pdev))
3638 return; 3781 return;
3639 3782
3783 bytes = 0;
3784 packets = 0;
3785 for (i = 0; i < adapter->num_rx_queues; i++) {
3786 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3787 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3788 netdev->stats.rx_fifo_errors += rqdpc_tmp;
3789 bytes += adapter->rx_ring[i].rx_stats.bytes;
3790 packets += adapter->rx_ring[i].rx_stats.packets;
3791 }
3792
3793 netdev->stats.rx_bytes = bytes;
3794 netdev->stats.rx_packets = packets;
3795
3796 bytes = 0;
3797 packets = 0;
3798 for (i = 0; i < adapter->num_tx_queues; i++) {
3799 bytes += adapter->tx_ring[i].tx_stats.bytes;
3800 packets += adapter->tx_ring[i].tx_stats.packets;
3801 }
3802 netdev->stats.tx_bytes = bytes;
3803 netdev->stats.tx_packets = packets;
3804
3805 /* read stats registers */
3640 adapter->stats.crcerrs += rd32(E1000_CRCERRS); 3806 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3641 adapter->stats.gprc += rd32(E1000_GPRC); 3807 adapter->stats.gprc += rd32(E1000_GPRC);
3642 adapter->stats.gorc += rd32(E1000_GORCL); 3808 adapter->stats.gorc += rd32(E1000_GORCL);
@@ -3669,7 +3835,9 @@ void igb_update_stats(struct igb_adapter *adapter)
3669 adapter->stats.gptc += rd32(E1000_GPTC); 3835 adapter->stats.gptc += rd32(E1000_GPTC);
3670 adapter->stats.gotc += rd32(E1000_GOTCL); 3836 adapter->stats.gotc += rd32(E1000_GOTCL);
3671 rd32(E1000_GOTCH); /* clear GOTCL */ 3837 rd32(E1000_GOTCH); /* clear GOTCL */
3672 adapter->stats.rnbc += rd32(E1000_RNBC); 3838 rnbc = rd32(E1000_RNBC);
3839 adapter->stats.rnbc += rnbc;
3840 netdev->stats.rx_fifo_errors += rnbc;
3673 adapter->stats.ruc += rd32(E1000_RUC); 3841 adapter->stats.ruc += rd32(E1000_RUC);
3674 adapter->stats.rfc += rd32(E1000_RFC); 3842 adapter->stats.rfc += rd32(E1000_RFC);
3675 adapter->stats.rjc += rd32(E1000_RJC); 3843 adapter->stats.rjc += rd32(E1000_RJC);
@@ -3688,7 +3856,6 @@ void igb_update_stats(struct igb_adapter *adapter)
3688 adapter->stats.bptc += rd32(E1000_BPTC); 3856 adapter->stats.bptc += rd32(E1000_BPTC);
3689 3857
3690 /* used for adaptive IFS */ 3858 /* used for adaptive IFS */
3691
3692 hw->mac.tx_packet_delta = rd32(E1000_TPT); 3859 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3693 adapter->stats.tpt += hw->mac.tx_packet_delta; 3860 adapter->stats.tpt += hw->mac.tx_packet_delta;
3694 hw->mac.collision_delta = rd32(E1000_COLC); 3861 hw->mac.collision_delta = rd32(E1000_COLC);
@@ -3716,33 +3883,6 @@ void igb_update_stats(struct igb_adapter *adapter)
3716 3883
3717 /* Rx Errors */ 3884 /* Rx Errors */
3718 3885
3719 if (hw->mac.type != e1000_82575) {
3720 u32 rqdpc_tmp;
3721 u64 rqdpc_total = 0;
3722 int i;
3723 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3724 * Queue Drop Packet Count) stats only gets incremented, if
3725 * the DROP_EN but it set (in the SRRCTL register for that
3726 * queue). If DROP_EN bit is NOT set, then the some what
3727 * equivalent count is stored in RNBC (not per queue basis).
3728 * Also note the drop count is due to lack of available
3729 * descriptors.
3730 */
3731 for (i = 0; i < adapter->num_rx_queues; i++) {
3732 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
3733 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3734 rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
3735 }
3736 netdev->stats.rx_fifo_errors = rqdpc_total;
3737 }
3738
3739 /* Note RNBC (Receive No Buffers Count) is an not an exact
3740 * drop count as the hardware FIFO might save the day. Thats
3741 * one of the reason for saving it in rx_fifo_errors, as its
3742 * potentially not a true drop.
3743 */
3744 netdev->stats.rx_fifo_errors += adapter->stats.rnbc;
3745
3746 /* RLEC on some newer hardware can be incorrect so build 3886 /* RLEC on some newer hardware can be incorrect so build
3747 * our own version based on RUC and ROC */ 3887 * our own version based on RUC and ROC */
3748 netdev->stats.rx_errors = adapter->stats.rxerrc + 3888 netdev->stats.rx_errors = adapter->stats.rxerrc +
@@ -3781,14 +3921,12 @@ void igb_update_stats(struct igb_adapter *adapter)
3781 3921
3782static irqreturn_t igb_msix_other(int irq, void *data) 3922static irqreturn_t igb_msix_other(int irq, void *data)
3783{ 3923{
3784 struct net_device *netdev = data; 3924 struct igb_adapter *adapter = data;
3785 struct igb_adapter *adapter = netdev_priv(netdev);
3786 struct e1000_hw *hw = &adapter->hw; 3925 struct e1000_hw *hw = &adapter->hw;
3787 u32 icr = rd32(E1000_ICR); 3926 u32 icr = rd32(E1000_ICR);
3788
3789 /* reading ICR causes bit 31 of EICR to be cleared */ 3927 /* reading ICR causes bit 31 of EICR to be cleared */
3790 3928
3791 if(icr & E1000_ICR_DOUTSYNC) { 3929 if (icr & E1000_ICR_DOUTSYNC) {
3792 /* HW is reporting DMA is out of sync */ 3930 /* HW is reporting DMA is out of sync */
3793 adapter->stats.doosync++; 3931 adapter->stats.doosync++;
3794 } 3932 }
@@ -3804,125 +3942,90 @@ static irqreturn_t igb_msix_other(int irq, void *data)
3804 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3942 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3805 } 3943 }
3806 3944
3807 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB); 3945 if (adapter->vfs_allocated_count)
3946 wr32(E1000_IMS, E1000_IMS_LSC |
3947 E1000_IMS_VMMB |
3948 E1000_IMS_DOUTSYNC);
3949 else
3950 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
3808 wr32(E1000_EIMS, adapter->eims_other); 3951 wr32(E1000_EIMS, adapter->eims_other);
3809 3952
3810 return IRQ_HANDLED; 3953 return IRQ_HANDLED;
3811} 3954}
3812 3955
3813static irqreturn_t igb_msix_tx(int irq, void *data) 3956static void igb_write_itr(struct igb_q_vector *q_vector)
3814{ 3957{
3815 struct igb_ring *tx_ring = data; 3958 u32 itr_val = q_vector->itr_val & 0x7FFC;
3816 struct igb_adapter *adapter = tx_ring->adapter;
3817 struct e1000_hw *hw = &adapter->hw;
3818 3959
3819#ifdef CONFIG_IGB_DCA 3960 if (!q_vector->set_itr)
3820 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3961 return;
3821 igb_update_tx_dca(tx_ring);
3822#endif
3823 3962
3824 tx_ring->total_bytes = 0; 3963 if (!itr_val)
3825 tx_ring->total_packets = 0; 3964 itr_val = 0x4;
3826 3965
3827 /* auto mask will automatically reenable the interrupt when we write 3966 if (q_vector->itr_shift)
3828 * EICS */ 3967 itr_val |= itr_val << q_vector->itr_shift;
3829 if (!igb_clean_tx_irq(tx_ring))
3830 /* Ring was not completely cleaned, so fire another interrupt */
3831 wr32(E1000_EICS, tx_ring->eims_value);
3832 else 3968 else
3833 wr32(E1000_EIMS, tx_ring->eims_value); 3969 itr_val |= 0x8000000;
3834
3835 return IRQ_HANDLED;
3836}
3837 3970
3838static void igb_write_itr(struct igb_ring *ring) 3971 writel(itr_val, q_vector->itr_register);
3839{ 3972 q_vector->set_itr = 0;
3840 struct e1000_hw *hw = &ring->adapter->hw;
3841 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3842 switch (hw->mac.type) {
3843 case e1000_82576:
3844 wr32(ring->itr_register, ring->itr_val |
3845 0x80000000);
3846 break;
3847 default:
3848 wr32(ring->itr_register, ring->itr_val |
3849 (ring->itr_val << 16));
3850 break;
3851 }
3852 ring->set_itr = 0;
3853 }
3854} 3973}
3855 3974
3856static irqreturn_t igb_msix_rx(int irq, void *data) 3975static irqreturn_t igb_msix_ring(int irq, void *data)
3857{ 3976{
3858 struct igb_ring *rx_ring = data; 3977 struct igb_q_vector *q_vector = data;
3859
3860 /* Write the ITR value calculated at the end of the
3861 * previous interrupt.
3862 */
3863 3978
3864 igb_write_itr(rx_ring); 3979 /* Write the ITR value calculated from the previous interrupt. */
3980 igb_write_itr(q_vector);
3865 3981
3866 if (napi_schedule_prep(&rx_ring->napi)) 3982 napi_schedule(&q_vector->napi);
3867 __napi_schedule(&rx_ring->napi);
3868 3983
3869#ifdef CONFIG_IGB_DCA 3984 return IRQ_HANDLED;
3870 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3871 igb_update_rx_dca(rx_ring);
3872#endif
3873 return IRQ_HANDLED;
3874} 3985}
3875 3986
3876#ifdef CONFIG_IGB_DCA 3987#ifdef CONFIG_IGB_DCA
3877static void igb_update_rx_dca(struct igb_ring *rx_ring) 3988static void igb_update_dca(struct igb_q_vector *q_vector)
3878{ 3989{
3879 u32 dca_rxctrl; 3990 struct igb_adapter *adapter = q_vector->adapter;
3880 struct igb_adapter *adapter = rx_ring->adapter;
3881 struct e1000_hw *hw = &adapter->hw; 3991 struct e1000_hw *hw = &adapter->hw;
3882 int cpu = get_cpu(); 3992 int cpu = get_cpu();
3883 int q = rx_ring->reg_idx;
3884 3993
3885 if (rx_ring->cpu != cpu) { 3994 if (q_vector->cpu == cpu)
3886 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); 3995 goto out_no_update;
3887 if (hw->mac.type == e1000_82576) { 3996
3888 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; 3997 if (q_vector->tx_ring) {
3889 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << 3998 int q = q_vector->tx_ring->reg_idx;
3890 E1000_DCA_RXCTRL_CPUID_SHIFT; 3999 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4000 if (hw->mac.type == e1000_82575) {
4001 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4002 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3891 } else { 4003 } else {
4004 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4005 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4006 E1000_DCA_TXCTRL_CPUID_SHIFT;
4007 }
4008 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4009 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4010 }
4011 if (q_vector->rx_ring) {
4012 int q = q_vector->rx_ring->reg_idx;
4013 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4014 if (hw->mac.type == e1000_82575) {
3892 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; 4015 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3893 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 4016 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4017 } else {
4018 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4019 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4020 E1000_DCA_RXCTRL_CPUID_SHIFT;
3894 } 4021 }
3895 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; 4022 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3896 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; 4023 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3897 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; 4024 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3898 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); 4025 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3899 rx_ring->cpu = cpu;
3900 }
3901 put_cpu();
3902}
3903
3904static void igb_update_tx_dca(struct igb_ring *tx_ring)
3905{
3906 u32 dca_txctrl;
3907 struct igb_adapter *adapter = tx_ring->adapter;
3908 struct e1000_hw *hw = &adapter->hw;
3909 int cpu = get_cpu();
3910 int q = tx_ring->reg_idx;
3911
3912 if (tx_ring->cpu != cpu) {
3913 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3914 if (hw->mac.type == e1000_82576) {
3915 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3916 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3917 E1000_DCA_TXCTRL_CPUID_SHIFT;
3918 } else {
3919 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3920 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3921 }
3922 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3923 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3924 tx_ring->cpu = cpu;
3925 } 4026 }
4027 q_vector->cpu = cpu;
4028out_no_update:
3926 put_cpu(); 4029 put_cpu();
3927} 4030}
3928 4031
@@ -3937,13 +4040,10 @@ static void igb_setup_dca(struct igb_adapter *adapter)
3937 /* Always use CB2 mode, difference is masked in the CB driver. */ 4040 /* Always use CB2 mode, difference is masked in the CB driver. */
3938 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); 4041 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3939 4042
3940 for (i = 0; i < adapter->num_tx_queues; i++) { 4043 for (i = 0; i < adapter->num_q_vectors; i++) {
3941 adapter->tx_ring[i].cpu = -1; 4044 struct igb_q_vector *q_vector = adapter->q_vector[i];
3942 igb_update_tx_dca(&adapter->tx_ring[i]); 4045 q_vector->cpu = -1;
3943 } 4046 igb_update_dca(q_vector);
3944 for (i = 0; i < adapter->num_rx_queues; i++) {
3945 adapter->rx_ring[i].cpu = -1;
3946 igb_update_rx_dca(&adapter->rx_ring[i]);
3947 } 4047 }
3948} 4048}
3949 4049
@@ -3951,6 +4051,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
3951{ 4051{
3952 struct net_device *netdev = dev_get_drvdata(dev); 4052 struct net_device *netdev = dev_get_drvdata(dev);
3953 struct igb_adapter *adapter = netdev_priv(netdev); 4053 struct igb_adapter *adapter = netdev_priv(netdev);
4054 struct pci_dev *pdev = adapter->pdev;
3954 struct e1000_hw *hw = &adapter->hw; 4055 struct e1000_hw *hw = &adapter->hw;
3955 unsigned long event = *(unsigned long *)data; 4056 unsigned long event = *(unsigned long *)data;
3956 4057
@@ -3959,12 +4060,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
3959 /* if already enabled, don't do it again */ 4060 /* if already enabled, don't do it again */
3960 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 4061 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3961 break; 4062 break;
3962 /* Always use CB2 mode, difference is masked
3963 * in the CB driver. */
3964 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3965 if (dca_add_requester(dev) == 0) { 4063 if (dca_add_requester(dev) == 0) {
3966 adapter->flags |= IGB_FLAG_DCA_ENABLED; 4064 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3967 dev_info(&adapter->pdev->dev, "DCA enabled\n"); 4065 dev_info(&pdev->dev, "DCA enabled\n");
3968 igb_setup_dca(adapter); 4066 igb_setup_dca(adapter);
3969 break; 4067 break;
3970 } 4068 }
@@ -3972,9 +4070,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
3972 case DCA_PROVIDER_REMOVE: 4070 case DCA_PROVIDER_REMOVE:
3973 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 4071 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3974 /* without this a class_device is left 4072 /* without this a class_device is left
3975 * hanging around in the sysfs model */ 4073 * hanging around in the sysfs model */
3976 dca_remove_requester(dev); 4074 dca_remove_requester(dev);
3977 dev_info(&adapter->pdev->dev, "DCA disabled\n"); 4075 dev_info(&pdev->dev, "DCA disabled\n");
3978 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 4076 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3979 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); 4077 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3980 } 4078 }
@@ -4004,12 +4102,51 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter)
4004 4102
4005 for (i = 0 ; i < adapter->vfs_allocated_count; i++) { 4103 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4006 ping = E1000_PF_CONTROL_MSG; 4104 ping = E1000_PF_CONTROL_MSG;
4007 if (adapter->vf_data[i].clear_to_send) 4105 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4008 ping |= E1000_VT_MSGTYPE_CTS; 4106 ping |= E1000_VT_MSGTYPE_CTS;
4009 igb_write_mbx(hw, &ping, 1, i); 4107 igb_write_mbx(hw, &ping, 1, i);
4010 } 4108 }
4011} 4109}
4012 4110
4111static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4112{
4113 struct e1000_hw *hw = &adapter->hw;
4114 u32 vmolr = rd32(E1000_VMOLR(vf));
4115 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4116
4117 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4118 IGB_VF_FLAG_MULTI_PROMISC);
4119 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4120
4121 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4122 vmolr |= E1000_VMOLR_MPME;
4123 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4124 } else {
4125 /*
4126 * if we have hashes and we are clearing a multicast promisc
4127 * flag we need to write the hashes to the MTA as this step
4128 * was previously skipped
4129 */
4130 if (vf_data->num_vf_mc_hashes > 30) {
4131 vmolr |= E1000_VMOLR_MPME;
4132 } else if (vf_data->num_vf_mc_hashes) {
4133 int j;
4134 vmolr |= E1000_VMOLR_ROMPE;
4135 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4136 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4137 }
4138 }
4139
4140 wr32(E1000_VMOLR(vf), vmolr);
4141
4142 /* there are flags left unprocessed, likely not supported */
4143 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4144 return -EINVAL;
4145
4146 return 0;
4147
4148}
4149
4013static int igb_set_vf_multicasts(struct igb_adapter *adapter, 4150static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4014 u32 *msgbuf, u32 vf) 4151 u32 *msgbuf, u32 vf)
4015{ 4152{
@@ -4018,18 +4155,17 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4018 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 4155 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4019 int i; 4156 int i;
4020 4157
4021 /* only up to 30 hash values supported */ 4158 /* salt away the number of multicast addresses assigned
4022 if (n > 30)
4023 n = 30;
4024
4025 /* salt away the number of multi cast addresses assigned
4026 * to this VF for later use to restore when the PF multi cast 4159 * to this VF for later use to restore when the PF multi cast
4027 * list changes 4160 * list changes
4028 */ 4161 */
4029 vf_data->num_vf_mc_hashes = n; 4162 vf_data->num_vf_mc_hashes = n;
4030 4163
4031 /* VFs are limited to using the MTA hash table for their multicast 4164 /* only up to 30 hash values supported */
4032 * addresses */ 4165 if (n > 30)
4166 n = 30;
4167
4168 /* store the hashes for later use */
4033 for (i = 0; i < n; i++) 4169 for (i = 0; i < n; i++)
4034 vf_data->vf_mc_hashes[i] = hash_list[i]; 4170 vf_data->vf_mc_hashes[i] = hash_list[i];
4035 4171
@@ -4046,9 +4182,20 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4046 int i, j; 4182 int i, j;
4047 4183
4048 for (i = 0; i < adapter->vfs_allocated_count; i++) { 4184 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4185 u32 vmolr = rd32(E1000_VMOLR(i));
4186 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4187
4049 vf_data = &adapter->vf_data[i]; 4188 vf_data = &adapter->vf_data[i];
4050 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) 4189
4051 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 4190 if ((vf_data->num_vf_mc_hashes > 30) ||
4191 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4192 vmolr |= E1000_VMOLR_MPME;
4193 } else if (vf_data->num_vf_mc_hashes) {
4194 vmolr |= E1000_VMOLR_ROMPE;
4195 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4196 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4197 }
4198 wr32(E1000_VMOLR(i), vmolr);
4052 } 4199 }
4053} 4200}
4054 4201
@@ -4086,7 +4233,11 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4086 struct e1000_hw *hw = &adapter->hw; 4233 struct e1000_hw *hw = &adapter->hw;
4087 u32 reg, i; 4234 u32 reg, i;
4088 4235
4089 /* It is an error to call this function when VFs are not enabled */ 4236 /* The vlvf table only exists on 82576 hardware and newer */
4237 if (hw->mac.type < e1000_82576)
4238 return -1;
4239
4240 /* we only need to do this if VMDq is enabled */
4090 if (!adapter->vfs_allocated_count) 4241 if (!adapter->vfs_allocated_count)
4091 return -1; 4242 return -1;
4092 4243
@@ -4116,16 +4267,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4116 4267
4117 /* if !enabled we need to set this up in vfta */ 4268 /* if !enabled we need to set this up in vfta */
4118 if (!(reg & E1000_VLVF_VLANID_ENABLE)) { 4269 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4119 /* add VID to filter table, if bit already set 4270 /* add VID to filter table */
4120 * PF must have added it outside of table */ 4271 igb_vfta_set(hw, vid, true);
4121 if (igb_vfta_set(hw, vid, true))
4122 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
4123 adapter->vfs_allocated_count);
4124 reg |= E1000_VLVF_VLANID_ENABLE; 4272 reg |= E1000_VLVF_VLANID_ENABLE;
4125 } 4273 }
4126 reg &= ~E1000_VLVF_VLANID_MASK; 4274 reg &= ~E1000_VLVF_VLANID_MASK;
4127 reg |= vid; 4275 reg |= vid;
4128
4129 wr32(E1000_VLVF(i), reg); 4276 wr32(E1000_VLVF(i), reg);
4130 4277
4131 /* do not modify RLPML for PF devices */ 4278 /* do not modify RLPML for PF devices */
@@ -4141,8 +4288,8 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4141 reg |= size; 4288 reg |= size;
4142 wr32(E1000_VMOLR(vf), reg); 4289 wr32(E1000_VMOLR(vf), reg);
4143 } 4290 }
4144 adapter->vf_data[vf].vlans_enabled++;
4145 4291
4292 adapter->vf_data[vf].vlans_enabled++;
4146 return 0; 4293 return 0;
4147 } 4294 }
4148 } else { 4295 } else {
@@ -4184,15 +4331,14 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4184 return igb_vlvf_set(adapter, vid, add, vf); 4331 return igb_vlvf_set(adapter, vid, add, vf);
4185} 4332}
4186 4333
4187static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) 4334static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4188{ 4335{
4189 struct e1000_hw *hw = &adapter->hw; 4336 /* clear all flags */
4190 4337 adapter->vf_data[vf].flags = 0;
4191 /* disable mailbox functionality for vf */ 4338 adapter->vf_data[vf].last_nack = jiffies;
4192 adapter->vf_data[vf].clear_to_send = false;
4193 4339
4194 /* reset offloads to defaults */ 4340 /* reset offloads to defaults */
4195 igb_set_vmolr(hw, vf); 4341 igb_set_vmolr(adapter, vf);
4196 4342
4197 /* reset vlans for device */ 4343 /* reset vlans for device */
4198 igb_clear_vf_vfta(adapter, vf); 4344 igb_clear_vf_vfta(adapter, vf);
@@ -4204,7 +4350,18 @@ static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4204 igb_set_rx_mode(adapter->netdev); 4350 igb_set_rx_mode(adapter->netdev);
4205} 4351}
4206 4352
4207static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) 4353static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4354{
4355 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4356
4357 /* generate a new mac address as we were hotplug removed/added */
4358 random_ether_addr(vf_mac);
4359
4360 /* process remaining reset events */
4361 igb_vf_reset(adapter, vf);
4362}
4363
4364static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4208{ 4365{
4209 struct e1000_hw *hw = &adapter->hw; 4366 struct e1000_hw *hw = &adapter->hw;
4210 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; 4367 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
@@ -4213,7 +4370,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4213 u8 *addr = (u8 *)(&msgbuf[1]); 4370 u8 *addr = (u8 *)(&msgbuf[1]);
4214 4371
4215 /* process all the same items cleared in a function level reset */ 4372 /* process all the same items cleared in a function level reset */
4216 igb_vf_reset_event(adapter, vf); 4373 igb_vf_reset(adapter, vf);
4217 4374
4218 /* set vf mac address */ 4375 /* set vf mac address */
4219 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); 4376 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
@@ -4224,8 +4381,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4224 reg = rd32(E1000_VFRE); 4381 reg = rd32(E1000_VFRE);
4225 wr32(E1000_VFRE, reg | (1 << vf)); 4382 wr32(E1000_VFRE, reg | (1 << vf));
4226 4383
4227 /* enable mailbox functionality for vf */ 4384 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
4228 adapter->vf_data[vf].clear_to_send = true;
4229 4385
4230 /* reply to reset with ack and vf mac address */ 4386 /* reply to reset with ack and vf mac address */
4231 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 4387 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -4235,66 +4391,45 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4235 4391
4236static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 4392static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4237{ 4393{
4238 unsigned char *addr = (char *)&msg[1]; 4394 unsigned char *addr = (char *)&msg[1];
4239 int err = -1; 4395 int err = -1;
4240 4396
4241 if (is_valid_ether_addr(addr)) 4397 if (is_valid_ether_addr(addr))
4242 err = igb_set_vf_mac(adapter, vf, addr); 4398 err = igb_set_vf_mac(adapter, vf, addr);
4243
4244 return err;
4245 4399
4400 return err;
4246} 4401}
4247 4402
4248static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) 4403static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4249{ 4404{
4250 struct e1000_hw *hw = &adapter->hw; 4405 struct e1000_hw *hw = &adapter->hw;
4406 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4251 u32 msg = E1000_VT_MSGTYPE_NACK; 4407 u32 msg = E1000_VT_MSGTYPE_NACK;
4252 4408
4253 /* if device isn't clear to send it shouldn't be reading either */ 4409 /* if device isn't clear to send it shouldn't be reading either */
4254 if (!adapter->vf_data[vf].clear_to_send) 4410 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4411 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4255 igb_write_mbx(hw, &msg, 1, vf); 4412 igb_write_mbx(hw, &msg, 1, vf);
4256} 4413 vf_data->last_nack = jiffies;
4257
4258
4259static void igb_msg_task(struct igb_adapter *adapter)
4260{
4261 struct e1000_hw *hw = &adapter->hw;
4262 u32 vf;
4263
4264 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4265 /* process any reset requests */
4266 if (!igb_check_for_rst(hw, vf)) {
4267 adapter->vf_data[vf].clear_to_send = false;
4268 igb_vf_reset_event(adapter, vf);
4269 }
4270
4271 /* process any messages pending */
4272 if (!igb_check_for_msg(hw, vf))
4273 igb_rcv_msg_from_vf(adapter, vf);
4274
4275 /* process any acks */
4276 if (!igb_check_for_ack(hw, vf))
4277 igb_rcv_ack_from_vf(adapter, vf);
4278
4279 } 4414 }
4280} 4415}
4281 4416
4282static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) 4417static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4283{ 4418{
4284 u32 mbx_size = E1000_VFMAILBOX_SIZE; 4419 struct pci_dev *pdev = adapter->pdev;
4285 u32 msgbuf[mbx_size]; 4420 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4286 struct e1000_hw *hw = &adapter->hw; 4421 struct e1000_hw *hw = &adapter->hw;
4422 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4287 s32 retval; 4423 s32 retval;
4288 4424
4289 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf); 4425 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4290 4426
4291 if (retval) 4427 if (retval)
4292 dev_err(&adapter->pdev->dev, 4428 dev_err(&pdev->dev, "Error receiving message from VF\n");
4293 "Error receiving message from VF\n");
4294 4429
4295 /* this is a message we already processed, do nothing */ 4430 /* this is a message we already processed, do nothing */
4296 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) 4431 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4297 return retval; 4432 return;
4298 4433
4299 /* 4434 /*
4300 * until the vf completes a reset it should not be 4435 * until the vf completes a reset it should not be
@@ -4303,20 +4438,25 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4303 4438
4304 if (msgbuf[0] == E1000_VF_RESET) { 4439 if (msgbuf[0] == E1000_VF_RESET) {
4305 igb_vf_reset_msg(adapter, vf); 4440 igb_vf_reset_msg(adapter, vf);
4306 4441 return;
4307 return retval;
4308 } 4442 }
4309 4443
4310 if (!adapter->vf_data[vf].clear_to_send) { 4444 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
4311 msgbuf[0] |= E1000_VT_MSGTYPE_NACK; 4445 msgbuf[0] = E1000_VT_MSGTYPE_NACK;
4312 igb_write_mbx(hw, msgbuf, 1, vf); 4446 if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4313 return retval; 4447 igb_write_mbx(hw, msgbuf, 1, vf);
4448 vf_data->last_nack = jiffies;
4449 }
4450 return;
4314 } 4451 }
4315 4452
4316 switch ((msgbuf[0] & 0xFFFF)) { 4453 switch ((msgbuf[0] & 0xFFFF)) {
4317 case E1000_VF_SET_MAC_ADDR: 4454 case E1000_VF_SET_MAC_ADDR:
4318 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 4455 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4319 break; 4456 break;
4457 case E1000_VF_SET_PROMISC:
4458 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4459 break;
4320 case E1000_VF_SET_MULTICAST: 4460 case E1000_VF_SET_MULTICAST:
4321 retval = igb_set_vf_multicasts(adapter, msgbuf, vf); 4461 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4322 break; 4462 break;
@@ -4327,7 +4467,7 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4327 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 4467 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4328 break; 4468 break;
4329 default: 4469 default:
4330 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); 4470 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4331 retval = -1; 4471 retval = -1;
4332 break; 4472 break;
4333 } 4473 }
@@ -4341,8 +4481,26 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4341 msgbuf[0] |= E1000_VT_MSGTYPE_CTS; 4481 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4342 4482
4343 igb_write_mbx(hw, msgbuf, 1, vf); 4483 igb_write_mbx(hw, msgbuf, 1, vf);
4484}
4344 4485
4345 return retval; 4486static void igb_msg_task(struct igb_adapter *adapter)
4487{
4488 struct e1000_hw *hw = &adapter->hw;
4489 u32 vf;
4490
4491 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4492 /* process any reset requests */
4493 if (!igb_check_for_rst(hw, vf))
4494 igb_vf_reset_event(adapter, vf);
4495
4496 /* process any messages pending */
4497 if (!igb_check_for_msg(hw, vf))
4498 igb_rcv_msg_from_vf(adapter, vf);
4499
4500 /* process any acks */
4501 if (!igb_check_for_ack(hw, vf))
4502 igb_rcv_ack_from_vf(adapter, vf);
4503 }
4346} 4504}
4347 4505
4348/** 4506/**
@@ -4379,15 +4537,15 @@ static void igb_set_uta(struct igb_adapter *adapter)
4379 **/ 4537 **/
4380static irqreturn_t igb_intr_msi(int irq, void *data) 4538static irqreturn_t igb_intr_msi(int irq, void *data)
4381{ 4539{
4382 struct net_device *netdev = data; 4540 struct igb_adapter *adapter = data;
4383 struct igb_adapter *adapter = netdev_priv(netdev); 4541 struct igb_q_vector *q_vector = adapter->q_vector[0];
4384 struct e1000_hw *hw = &adapter->hw; 4542 struct e1000_hw *hw = &adapter->hw;
4385 /* read ICR disables interrupts using IAM */ 4543 /* read ICR disables interrupts using IAM */
4386 u32 icr = rd32(E1000_ICR); 4544 u32 icr = rd32(E1000_ICR);
4387 4545
4388 igb_write_itr(adapter->rx_ring); 4546 igb_write_itr(q_vector);
4389 4547
4390 if(icr & E1000_ICR_DOUTSYNC) { 4548 if (icr & E1000_ICR_DOUTSYNC) {
4391 /* HW is reporting DMA is out of sync */ 4549 /* HW is reporting DMA is out of sync */
4392 adapter->stats.doosync++; 4550 adapter->stats.doosync++;
4393 } 4551 }
@@ -4398,7 +4556,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4398 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4556 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4399 } 4557 }
4400 4558
4401 napi_schedule(&adapter->rx_ring[0].napi); 4559 napi_schedule(&q_vector->napi);
4402 4560
4403 return IRQ_HANDLED; 4561 return IRQ_HANDLED;
4404} 4562}
@@ -4410,8 +4568,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4410 **/ 4568 **/
4411static irqreturn_t igb_intr(int irq, void *data) 4569static irqreturn_t igb_intr(int irq, void *data)
4412{ 4570{
4413 struct net_device *netdev = data; 4571 struct igb_adapter *adapter = data;
4414 struct igb_adapter *adapter = netdev_priv(netdev); 4572 struct igb_q_vector *q_vector = adapter->q_vector[0];
4415 struct e1000_hw *hw = &adapter->hw; 4573 struct e1000_hw *hw = &adapter->hw;
4416 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 4574 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4417 * need for the IMC write */ 4575 * need for the IMC write */
@@ -4419,14 +4577,14 @@ static irqreturn_t igb_intr(int irq, void *data)
4419 if (!icr) 4577 if (!icr)
4420 return IRQ_NONE; /* Not our interrupt */ 4578 return IRQ_NONE; /* Not our interrupt */
4421 4579
4422 igb_write_itr(adapter->rx_ring); 4580 igb_write_itr(q_vector);
4423 4581
4424 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 4582 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4425 * not set, then the adapter didn't send an interrupt */ 4583 * not set, then the adapter didn't send an interrupt */
4426 if (!(icr & E1000_ICR_INT_ASSERTED)) 4584 if (!(icr & E1000_ICR_INT_ASSERTED))
4427 return IRQ_NONE; 4585 return IRQ_NONE;
4428 4586
4429 if(icr & E1000_ICR_DOUTSYNC) { 4587 if (icr & E1000_ICR_DOUTSYNC) {
4430 /* HW is reporting DMA is out of sync */ 4588 /* HW is reporting DMA is out of sync */
4431 adapter->stats.doosync++; 4589 adapter->stats.doosync++;
4432 } 4590 }
@@ -4438,26 +4596,27 @@ static irqreturn_t igb_intr(int irq, void *data)
4438 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4596 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4439 } 4597 }
4440 4598
4441 napi_schedule(&adapter->rx_ring[0].napi); 4599 napi_schedule(&q_vector->napi);
4442 4600
4443 return IRQ_HANDLED; 4601 return IRQ_HANDLED;
4444} 4602}
4445 4603
4446static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) 4604static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
4447{ 4605{
4448 struct igb_adapter *adapter = rx_ring->adapter; 4606 struct igb_adapter *adapter = q_vector->adapter;
4449 struct e1000_hw *hw = &adapter->hw; 4607 struct e1000_hw *hw = &adapter->hw;
4450 4608
4451 if (adapter->itr_setting & 3) { 4609 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4452 if (adapter->num_rx_queues == 1) 4610 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
4611 if (!adapter->msix_entries)
4453 igb_set_itr(adapter); 4612 igb_set_itr(adapter);
4454 else 4613 else
4455 igb_update_ring_itr(rx_ring); 4614 igb_update_ring_itr(q_vector);
4456 } 4615 }
4457 4616
4458 if (!test_bit(__IGB_DOWN, &adapter->state)) { 4617 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4459 if (adapter->msix_entries) 4618 if (adapter->msix_entries)
4460 wr32(E1000_EIMS, rx_ring->eims_value); 4619 wr32(E1000_EIMS, q_vector->eims_value);
4461 else 4620 else
4462 igb_irq_enable(adapter); 4621 igb_irq_enable(adapter);
4463 } 4622 }
@@ -4470,76 +4629,94 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
4470 **/ 4629 **/
4471static int igb_poll(struct napi_struct *napi, int budget) 4630static int igb_poll(struct napi_struct *napi, int budget)
4472{ 4631{
4473 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); 4632 struct igb_q_vector *q_vector = container_of(napi,
4474 int work_done = 0; 4633 struct igb_q_vector,
4634 napi);
4635 int tx_clean_complete = 1, work_done = 0;
4475 4636
4476#ifdef CONFIG_IGB_DCA 4637#ifdef CONFIG_IGB_DCA
4477 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 4638 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4478 igb_update_rx_dca(rx_ring); 4639 igb_update_dca(q_vector);
4479#endif 4640#endif
4480 igb_clean_rx_irq_adv(rx_ring, &work_done, budget); 4641 if (q_vector->tx_ring)
4642 tx_clean_complete = igb_clean_tx_irq(q_vector);
4481 4643
4482 if (rx_ring->buddy) { 4644 if (q_vector->rx_ring)
4483#ifdef CONFIG_IGB_DCA 4645 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4484 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 4646
4485 igb_update_tx_dca(rx_ring->buddy); 4647 if (!tx_clean_complete)
4486#endif 4648 work_done = budget;
4487 if (!igb_clean_tx_irq(rx_ring->buddy))
4488 work_done = budget;
4489 }
4490 4649
4491 /* If not enough Rx work done, exit the polling mode */ 4650 /* If not enough Rx work done, exit the polling mode */
4492 if (work_done < budget) { 4651 if (work_done < budget) {
4493 napi_complete(napi); 4652 napi_complete(napi);
4494 igb_rx_irq_enable(rx_ring); 4653 igb_ring_irq_enable(q_vector);
4495 } 4654 }
4496 4655
4497 return work_done; 4656 return work_done;
4498} 4657}
4499 4658
4500/** 4659/**
4501 * igb_hwtstamp - utility function which checks for TX time stamp 4660 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4502 * @adapter: board private structure 4661 * @adapter: board private structure
4662 * @shhwtstamps: timestamp structure to update
4663 * @regval: unsigned 64bit system time value.
4664 *
4665 * We need to convert the system time value stored in the RX/TXSTMP registers
4666 * into a hwtstamp which can be used by the upper level timestamping functions
4667 */
4668static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4669 struct skb_shared_hwtstamps *shhwtstamps,
4670 u64 regval)
4671{
4672 u64 ns;
4673
4674 ns = timecounter_cyc2time(&adapter->clock, regval);
4675 timecompare_update(&adapter->compare, ns);
4676 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4677 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4678 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4679}
4680
4681/**
4682 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4683 * @q_vector: pointer to q_vector containing needed info
4503 * @skb: packet that was just sent 4684 * @skb: packet that was just sent
4504 * 4685 *
4505 * If we were asked to do hardware stamping and such a time stamp is 4686 * If we were asked to do hardware stamping and such a time stamp is
4506 * available, then it must have been for this skb here because we only 4687 * available, then it must have been for this skb here because we only
4507 * allow only one such packet into the queue. 4688 * allow only one such packet into the queue.
4508 */ 4689 */
4509static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb) 4690static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4510{ 4691{
4692 struct igb_adapter *adapter = q_vector->adapter;
4511 union skb_shared_tx *shtx = skb_tx(skb); 4693 union skb_shared_tx *shtx = skb_tx(skb);
4512 struct e1000_hw *hw = &adapter->hw; 4694 struct e1000_hw *hw = &adapter->hw;
4695 struct skb_shared_hwtstamps shhwtstamps;
4696 u64 regval;
4513 4697
4514 if (unlikely(shtx->hardware)) { 4698 /* if skb does not support hw timestamp or TX stamp not valid exit */
4515 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID; 4699 if (likely(!shtx->hardware) ||
4516 if (valid) { 4700 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4517 u64 regval = rd32(E1000_TXSTMPL); 4701 return;
4518 u64 ns; 4702
4519 struct skb_shared_hwtstamps shhwtstamps; 4703 regval = rd32(E1000_TXSTMPL);
4520 4704 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4521 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 4705
4522 regval |= (u64)rd32(E1000_TXSTMPH) << 32; 4706 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4523 ns = timecounter_cyc2time(&adapter->clock, 4707 skb_tstamp_tx(skb, &shhwtstamps);
4524 regval);
4525 timecompare_update(&adapter->compare, ns);
4526 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4527 shhwtstamps.syststamp =
4528 timecompare_transform(&adapter->compare, ns);
4529 skb_tstamp_tx(skb, &shhwtstamps);
4530 }
4531 }
4532} 4708}
4533 4709
4534/** 4710/**
4535 * igb_clean_tx_irq - Reclaim resources after transmit completes 4711 * igb_clean_tx_irq - Reclaim resources after transmit completes
4536 * @adapter: board private structure 4712 * @q_vector: pointer to q_vector containing needed info
4537 * returns true if ring is completely cleaned 4713 * returns true if ring is completely cleaned
4538 **/ 4714 **/
4539static bool igb_clean_tx_irq(struct igb_ring *tx_ring) 4715static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4540{ 4716{
4541 struct igb_adapter *adapter = tx_ring->adapter; 4717 struct igb_adapter *adapter = q_vector->adapter;
4542 struct net_device *netdev = adapter->netdev; 4718 struct igb_ring *tx_ring = q_vector->tx_ring;
4719 struct net_device *netdev = tx_ring->netdev;
4543 struct e1000_hw *hw = &adapter->hw; 4720 struct e1000_hw *hw = &adapter->hw;
4544 struct igb_buffer *buffer_info; 4721 struct igb_buffer *buffer_info;
4545 struct sk_buff *skb; 4722 struct sk_buff *skb;
@@ -4570,10 +4747,10 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4570 total_packets += segs; 4747 total_packets += segs;
4571 total_bytes += bytecount; 4748 total_bytes += bytecount;
4572 4749
4573 igb_tx_hwtstamp(adapter, skb); 4750 igb_tx_hwtstamp(q_vector, skb);
4574 } 4751 }
4575 4752
4576 igb_unmap_and_free_tx_resource(adapter, buffer_info); 4753 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4577 tx_desc->wb.status = 0; 4754 tx_desc->wb.status = 0;
4578 4755
4579 i++; 4756 i++;
@@ -4596,7 +4773,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4596 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 4773 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4597 !(test_bit(__IGB_DOWN, &adapter->state))) { 4774 !(test_bit(__IGB_DOWN, &adapter->state))) {
4598 netif_wake_subqueue(netdev, tx_ring->queue_index); 4775 netif_wake_subqueue(netdev, tx_ring->queue_index);
4599 ++adapter->restart_queue; 4776 tx_ring->tx_stats.restart_queue++;
4600 } 4777 }
4601 } 4778 }
4602 4779
@@ -4611,7 +4788,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4611 E1000_STATUS_TXOFF)) { 4788 E1000_STATUS_TXOFF)) {
4612 4789
4613 /* detected Tx unit hang */ 4790 /* detected Tx unit hang */
4614 dev_err(&adapter->pdev->dev, 4791 dev_err(&tx_ring->pdev->dev,
4615 "Detected Tx Unit Hang\n" 4792 "Detected Tx Unit Hang\n"
4616 " Tx Queue <%d>\n" 4793 " Tx Queue <%d>\n"
4617 " TDH <%x>\n" 4794 " TDH <%x>\n"
@@ -4624,11 +4801,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4624 " jiffies <%lx>\n" 4801 " jiffies <%lx>\n"
4625 " desc.status <%x>\n", 4802 " desc.status <%x>\n",
4626 tx_ring->queue_index, 4803 tx_ring->queue_index,
4627 readl(adapter->hw.hw_addr + tx_ring->head), 4804 readl(tx_ring->head),
4628 readl(adapter->hw.hw_addr + tx_ring->tail), 4805 readl(tx_ring->tail),
4629 tx_ring->next_to_use, 4806 tx_ring->next_to_use,
4630 tx_ring->next_to_clean, 4807 tx_ring->next_to_clean,
4631 tx_ring->buffer_info[i].time_stamp, 4808 tx_ring->buffer_info[eop].time_stamp,
4632 eop, 4809 eop,
4633 jiffies, 4810 jiffies,
4634 eop_desc->wb.status); 4811 eop_desc->wb.status);
@@ -4639,43 +4816,38 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4639 tx_ring->total_packets += total_packets; 4816 tx_ring->total_packets += total_packets;
4640 tx_ring->tx_stats.bytes += total_bytes; 4817 tx_ring->tx_stats.bytes += total_bytes;
4641 tx_ring->tx_stats.packets += total_packets; 4818 tx_ring->tx_stats.packets += total_packets;
4642 netdev->stats.tx_bytes += total_bytes;
4643 netdev->stats.tx_packets += total_packets;
4644 return (count < tx_ring->count); 4819 return (count < tx_ring->count);
4645} 4820}
4646 4821
4647/** 4822/**
4648 * igb_receive_skb - helper function to handle rx indications 4823 * igb_receive_skb - helper function to handle rx indications
4649 * @ring: pointer to receive ring receving this packet 4824 * @q_vector: structure containing interrupt and ring information
4650 * @status: descriptor status field as written by hardware 4825 * @skb: packet to send up
4651 * @rx_desc: receive descriptor containing vlan and type information. 4826 * @vlan_tag: vlan tag for packet
4652 * @skb: pointer to sk_buff to be indicated to stack
4653 **/ 4827 **/
4654static void igb_receive_skb(struct igb_ring *ring, u8 status, 4828static void igb_receive_skb(struct igb_q_vector *q_vector,
4655 union e1000_adv_rx_desc * rx_desc, 4829 struct sk_buff *skb,
4656 struct sk_buff *skb) 4830 u16 vlan_tag)
4657{ 4831{
4658 struct igb_adapter * adapter = ring->adapter; 4832 struct igb_adapter *adapter = q_vector->adapter;
4659 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); 4833
4660 4834 if (vlan_tag)
4661 skb_record_rx_queue(skb, ring->queue_index); 4835 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4662 if (vlan_extracted) 4836 vlan_tag, skb);
4663 vlan_gro_receive(&ring->napi, adapter->vlgrp,
4664 le16_to_cpu(rx_desc->wb.upper.vlan),
4665 skb);
4666 else 4837 else
4667 napi_gro_receive(&ring->napi, skb); 4838 napi_gro_receive(&q_vector->napi, skb);
4668} 4839}
4669 4840
4670static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, 4841static inline void igb_rx_checksum_adv(struct igb_ring *ring,
4671 u32 status_err, struct sk_buff *skb) 4842 u32 status_err, struct sk_buff *skb)
4672{ 4843{
4673 skb->ip_summed = CHECKSUM_NONE; 4844 skb->ip_summed = CHECKSUM_NONE;
4674 4845
4675 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 4846 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4676 if ((status_err & E1000_RXD_STAT_IXSM) || 4847 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
4677 (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED)) 4848 (status_err & E1000_RXD_STAT_IXSM))
4678 return; 4849 return;
4850
4679 /* TCP/UDP checksum error bit is set */ 4851 /* TCP/UDP checksum error bit is set */
4680 if (status_err & 4852 if (status_err &
4681 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 4853 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -4684,9 +4856,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4684 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 4856 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4685 * packets, (aka let the stack check the crc32c) 4857 * packets, (aka let the stack check the crc32c)
4686 */ 4858 */
4687 if (!((adapter->hw.mac.type == e1000_82576) && 4859 if ((skb->len == 60) &&
4688 (skb->len == 60))) 4860 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
4689 adapter->hw_csum_err++; 4861 ring->rx_stats.csum_err++;
4862
4690 /* let the stack verify checksum errors */ 4863 /* let the stack verify checksum errors */
4691 return; 4864 return;
4692 } 4865 }
@@ -4694,11 +4867,38 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4694 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 4867 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4695 skb->ip_summed = CHECKSUM_UNNECESSARY; 4868 skb->ip_summed = CHECKSUM_UNNECESSARY;
4696 4869
4697 dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); 4870 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
4698 adapter->hw_csum_good++;
4699} 4871}
4700 4872
4701static inline u16 igb_get_hlen(struct igb_adapter *adapter, 4873static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
4874 struct sk_buff *skb)
4875{
4876 struct igb_adapter *adapter = q_vector->adapter;
4877 struct e1000_hw *hw = &adapter->hw;
4878 u64 regval;
4879
4880 /*
4881 * If this bit is set, then the RX registers contain the time stamp. No
4882 * other packet will be time stamped until we read these registers, so
4883 * read the registers to make them available again. Because only one
4884 * packet can be time stamped at a time, we know that the register
4885 * values must belong to this one here and therefore we don't need to
4886 * compare any of the additional attributes stored for it.
4887 *
4888 * If nothing went wrong, then it should have a skb_shared_tx that we
4889 * can turn into a skb_shared_hwtstamps.
4890 */
4891 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
4892 return;
4893 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
4894 return;
4895
4896 regval = rd32(E1000_RXSTMPL);
4897 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4898
4899 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
4900}
4901static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
4702 union e1000_adv_rx_desc *rx_desc) 4902 union e1000_adv_rx_desc *rx_desc)
4703{ 4903{
4704 /* HW will not DMA in data larger than the given buffer, even if it 4904 /* HW will not DMA in data larger than the given buffer, even if it
@@ -4707,18 +4907,17 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
4707 */ 4907 */
4708 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 4908 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4709 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 4909 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4710 if (hlen > adapter->rx_ps_hdr_size) 4910 if (hlen > rx_ring->rx_buffer_len)
4711 hlen = adapter->rx_ps_hdr_size; 4911 hlen = rx_ring->rx_buffer_len;
4712 return hlen; 4912 return hlen;
4713} 4913}
4714 4914
4715static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, 4915static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4716 int *work_done, int budget) 4916 int *work_done, int budget)
4717{ 4917{
4718 struct igb_adapter *adapter = rx_ring->adapter; 4918 struct igb_ring *rx_ring = q_vector->rx_ring;
4719 struct net_device *netdev = adapter->netdev; 4919 struct net_device *netdev = rx_ring->netdev;
4720 struct e1000_hw *hw = &adapter->hw; 4920 struct pci_dev *pdev = rx_ring->pdev;
4721 struct pci_dev *pdev = adapter->pdev;
4722 union e1000_adv_rx_desc *rx_desc , *next_rxd; 4921 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4723 struct igb_buffer *buffer_info , *next_buffer; 4922 struct igb_buffer *buffer_info , *next_buffer;
4724 struct sk_buff *skb; 4923 struct sk_buff *skb;
@@ -4728,6 +4927,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4728 unsigned int i; 4927 unsigned int i;
4729 u32 staterr; 4928 u32 staterr;
4730 u16 length; 4929 u16 length;
4930 u16 vlan_tag;
4731 4931
4732 i = rx_ring->next_to_clean; 4932 i = rx_ring->next_to_clean;
4733 buffer_info = &rx_ring->buffer_info[i]; 4933 buffer_info = &rx_ring->buffer_info[i];
@@ -4746,6 +4946,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4746 i++; 4946 i++;
4747 if (i == rx_ring->count) 4947 if (i == rx_ring->count)
4748 i = 0; 4948 i = 0;
4949
4749 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); 4950 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4750 prefetch(next_rxd); 4951 prefetch(next_rxd);
4751 next_buffer = &rx_ring->buffer_info[i]; 4952 next_buffer = &rx_ring->buffer_info[i];
@@ -4754,23 +4955,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4754 cleaned = true; 4955 cleaned = true;
4755 cleaned_count++; 4956 cleaned_count++;
4756 4957
4757 /* this is the fast path for the non-packet split case */
4758 if (!adapter->rx_ps_hdr_size) {
4759 pci_unmap_single(pdev, buffer_info->dma,
4760 adapter->rx_buffer_len,
4761 PCI_DMA_FROMDEVICE);
4762 buffer_info->dma = 0;
4763 skb_put(skb, length);
4764 goto send_up;
4765 }
4766
4767 if (buffer_info->dma) { 4958 if (buffer_info->dma) {
4768 u16 hlen = igb_get_hlen(adapter, rx_desc);
4769 pci_unmap_single(pdev, buffer_info->dma, 4959 pci_unmap_single(pdev, buffer_info->dma,
4770 adapter->rx_ps_hdr_size, 4960 rx_ring->rx_buffer_len,
4771 PCI_DMA_FROMDEVICE); 4961 PCI_DMA_FROMDEVICE);
4772 buffer_info->dma = 0; 4962 buffer_info->dma = 0;
4773 skb_put(skb, hlen); 4963 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
4964 skb_put(skb, length);
4965 goto send_up;
4966 }
4967 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
4774 } 4968 }
4775 4969
4776 if (length) { 4970 if (length) {
@@ -4783,15 +4977,13 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4783 buffer_info->page_offset, 4977 buffer_info->page_offset,
4784 length); 4978 length);
4785 4979
4786 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 4980 if (page_count(buffer_info->page) != 1)
4787 (page_count(buffer_info->page) != 1))
4788 buffer_info->page = NULL; 4981 buffer_info->page = NULL;
4789 else 4982 else
4790 get_page(buffer_info->page); 4983 get_page(buffer_info->page);
4791 4984
4792 skb->len += length; 4985 skb->len += length;
4793 skb->data_len += length; 4986 skb->data_len += length;
4794
4795 skb->truesize += length; 4987 skb->truesize += length;
4796 } 4988 }
4797 4989
@@ -4803,60 +4995,24 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4803 goto next_desc; 4995 goto next_desc;
4804 } 4996 }
4805send_up: 4997send_up:
4806 /*
4807 * If this bit is set, then the RX registers contain
4808 * the time stamp. No other packet will be time
4809 * stamped until we read these registers, so read the
4810 * registers to make them available again. Because
4811 * only one packet can be time stamped at a time, we
4812 * know that the register values must belong to this
4813 * one here and therefore we don't need to compare
4814 * any of the additional attributes stored for it.
4815 *
4816 * If nothing went wrong, then it should have a
4817 * skb_shared_tx that we can turn into a
4818 * skb_shared_hwtstamps.
4819 *
4820 * TODO: can time stamping be triggered (thus locking
4821 * the registers) without the packet reaching this point
4822 * here? In that case RX time stamping would get stuck.
4823 *
4824 * TODO: in "time stamp all packets" mode this bit is
4825 * not set. Need a global flag for this mode and then
4826 * always read the registers. Cannot be done without
4827 * a race condition.
4828 */
4829 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4830 u64 regval;
4831 u64 ns;
4832 struct skb_shared_hwtstamps *shhwtstamps =
4833 skb_hwtstamps(skb);
4834
4835 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4836 "igb: no RX time stamp available for time stamped packet");
4837 regval = rd32(E1000_RXSTMPL);
4838 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4839 ns = timecounter_cyc2time(&adapter->clock, regval);
4840 timecompare_update(&adapter->compare, ns);
4841 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4842 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4843 shhwtstamps->syststamp =
4844 timecompare_transform(&adapter->compare, ns);
4845 }
4846
4847 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 4998 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4848 dev_kfree_skb_irq(skb); 4999 dev_kfree_skb_irq(skb);
4849 goto next_desc; 5000 goto next_desc;
4850 } 5001 }
4851 5002
5003 igb_rx_hwtstamp(q_vector, staterr, skb);
4852 total_bytes += skb->len; 5004 total_bytes += skb->len;
4853 total_packets++; 5005 total_packets++;
4854 5006
4855 igb_rx_checksum_adv(adapter, staterr, skb); 5007 igb_rx_checksum_adv(rx_ring, staterr, skb);
4856 5008
4857 skb->protocol = eth_type_trans(skb, netdev); 5009 skb->protocol = eth_type_trans(skb, netdev);
5010 skb_record_rx_queue(skb, rx_ring->queue_index);
4858 5011
4859 igb_receive_skb(rx_ring, staterr, rx_desc, skb); 5012 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5013 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5014
5015 igb_receive_skb(q_vector, skb, vlan_tag);
4860 5016
4861next_desc: 5017next_desc:
4862 rx_desc->wb.upper.status_error = 0; 5018 rx_desc->wb.upper.status_error = 0;
@@ -4883,8 +5039,6 @@ next_desc:
4883 rx_ring->total_bytes += total_bytes; 5039 rx_ring->total_bytes += total_bytes;
4884 rx_ring->rx_stats.packets += total_packets; 5040 rx_ring->rx_stats.packets += total_packets;
4885 rx_ring->rx_stats.bytes += total_bytes; 5041 rx_ring->rx_stats.bytes += total_bytes;
4886 netdev->stats.rx_bytes += total_bytes;
4887 netdev->stats.rx_packets += total_packets;
4888 return cleaned; 5042 return cleaned;
4889} 5043}
4890 5044
@@ -4892,12 +5046,9 @@ next_desc:
4892 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split 5046 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4893 * @adapter: address of board private structure 5047 * @adapter: address of board private structure
4894 **/ 5048 **/
4895static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, 5049void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
4896 int cleaned_count)
4897{ 5050{
4898 struct igb_adapter *adapter = rx_ring->adapter; 5051 struct net_device *netdev = rx_ring->netdev;
4899 struct net_device *netdev = adapter->netdev;
4900 struct pci_dev *pdev = adapter->pdev;
4901 union e1000_adv_rx_desc *rx_desc; 5052 union e1000_adv_rx_desc *rx_desc;
4902 struct igb_buffer *buffer_info; 5053 struct igb_buffer *buffer_info;
4903 struct sk_buff *skb; 5054 struct sk_buff *skb;
@@ -4907,19 +5058,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4907 i = rx_ring->next_to_use; 5058 i = rx_ring->next_to_use;
4908 buffer_info = &rx_ring->buffer_info[i]; 5059 buffer_info = &rx_ring->buffer_info[i];
4909 5060
4910 if (adapter->rx_ps_hdr_size) 5061 bufsz = rx_ring->rx_buffer_len;
4911 bufsz = adapter->rx_ps_hdr_size;
4912 else
4913 bufsz = adapter->rx_buffer_len;
4914 5062
4915 while (cleaned_count--) { 5063 while (cleaned_count--) {
4916 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 5064 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4917 5065
4918 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 5066 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
4919 if (!buffer_info->page) { 5067 if (!buffer_info->page) {
4920 buffer_info->page = alloc_page(GFP_ATOMIC); 5068 buffer_info->page = netdev_alloc_page(netdev);
4921 if (!buffer_info->page) { 5069 if (!buffer_info->page) {
4922 adapter->alloc_rx_buff_failed++; 5070 rx_ring->rx_stats.alloc_failed++;
4923 goto no_buffers; 5071 goto no_buffers;
4924 } 5072 }
4925 buffer_info->page_offset = 0; 5073 buffer_info->page_offset = 0;
@@ -4927,33 +5075,48 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4927 buffer_info->page_offset ^= PAGE_SIZE / 2; 5075 buffer_info->page_offset ^= PAGE_SIZE / 2;
4928 } 5076 }
4929 buffer_info->page_dma = 5077 buffer_info->page_dma =
4930 pci_map_page(pdev, buffer_info->page, 5078 pci_map_page(rx_ring->pdev, buffer_info->page,
4931 buffer_info->page_offset, 5079 buffer_info->page_offset,
4932 PAGE_SIZE / 2, 5080 PAGE_SIZE / 2,
4933 PCI_DMA_FROMDEVICE); 5081 PCI_DMA_FROMDEVICE);
5082 if (pci_dma_mapping_error(rx_ring->pdev,
5083 buffer_info->page_dma)) {
5084 buffer_info->page_dma = 0;
5085 rx_ring->rx_stats.alloc_failed++;
5086 goto no_buffers;
5087 }
4934 } 5088 }
4935 5089
4936 if (!buffer_info->skb) { 5090 skb = buffer_info->skb;
5091 if (!skb) {
4937 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 5092 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4938 if (!skb) { 5093 if (!skb) {
4939 adapter->alloc_rx_buff_failed++; 5094 rx_ring->rx_stats.alloc_failed++;
4940 goto no_buffers; 5095 goto no_buffers;
4941 } 5096 }
4942 5097
4943 buffer_info->skb = skb; 5098 buffer_info->skb = skb;
4944 buffer_info->dma = pci_map_single(pdev, skb->data, 5099 }
5100 if (!buffer_info->dma) {
5101 buffer_info->dma = pci_map_single(rx_ring->pdev,
5102 skb->data,
4945 bufsz, 5103 bufsz,
4946 PCI_DMA_FROMDEVICE); 5104 PCI_DMA_FROMDEVICE);
5105 if (pci_dma_mapping_error(rx_ring->pdev,
5106 buffer_info->dma)) {
5107 buffer_info->dma = 0;
5108 rx_ring->rx_stats.alloc_failed++;
5109 goto no_buffers;
5110 }
4947 } 5111 }
4948 /* Refresh the desc even if buffer_addrs didn't change because 5112 /* Refresh the desc even if buffer_addrs didn't change because
4949 * each write-back erases this info. */ 5113 * each write-back erases this info. */
4950 if (adapter->rx_ps_hdr_size) { 5114 if (bufsz < IGB_RXBUFFER_1024) {
4951 rx_desc->read.pkt_addr = 5115 rx_desc->read.pkt_addr =
4952 cpu_to_le64(buffer_info->page_dma); 5116 cpu_to_le64(buffer_info->page_dma);
4953 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 5117 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4954 } else { 5118 } else {
4955 rx_desc->read.pkt_addr = 5119 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
4956 cpu_to_le64(buffer_info->dma);
4957 rx_desc->read.hdr_addr = 0; 5120 rx_desc->read.hdr_addr = 0;
4958 } 5121 }
4959 5122
@@ -4976,7 +5139,7 @@ no_buffers:
4976 * applicable for weak-ordered memory model archs, 5139 * applicable for weak-ordered memory model archs,
4977 * such as IA-64). */ 5140 * such as IA-64). */
4978 wmb(); 5141 wmb();
4979 writel(i, adapter->hw.hw_addr + rx_ring->tail); 5142 writel(i, rx_ring->tail);
4980 } 5143 }
4981} 5144}
4982 5145
@@ -5035,13 +5198,11 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
5035 struct igb_adapter *adapter = netdev_priv(netdev); 5198 struct igb_adapter *adapter = netdev_priv(netdev);
5036 struct e1000_hw *hw = &adapter->hw; 5199 struct e1000_hw *hw = &adapter->hw;
5037 struct hwtstamp_config config; 5200 struct hwtstamp_config config;
5038 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED; 5201 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5039 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED; 5202 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
5040 u32 tsync_rx_ctl_type = 0;
5041 u32 tsync_rx_cfg = 0; 5203 u32 tsync_rx_cfg = 0;
5042 int is_l4 = 0; 5204 bool is_l4 = false;
5043 int is_l2 = 0; 5205 bool is_l2 = false;
5044 short port = 319; /* PTP */
5045 u32 regval; 5206 u32 regval;
5046 5207
5047 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 5208 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -5053,10 +5214,8 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
5053 5214
5054 switch (config.tx_type) { 5215 switch (config.tx_type) {
5055 case HWTSTAMP_TX_OFF: 5216 case HWTSTAMP_TX_OFF:
5056 tsync_tx_ctl_bit = 0; 5217 tsync_tx_ctl = 0;
5057 break;
5058 case HWTSTAMP_TX_ON: 5218 case HWTSTAMP_TX_ON:
5059 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
5060 break; 5219 break;
5061 default: 5220 default:
5062 return -ERANGE; 5221 return -ERANGE;
@@ -5064,7 +5223,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
5064 5223
5065 switch (config.rx_filter) { 5224 switch (config.rx_filter) {
5066 case HWTSTAMP_FILTER_NONE: 5225 case HWTSTAMP_FILTER_NONE:
5067 tsync_rx_ctl_bit = 0; 5226 tsync_rx_ctl = 0;
5068 break; 5227 break;
5069 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 5228 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5070 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 5229 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -5075,86 +5234,97 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
5075 * possible to time stamp both Sync and Delay_Req messages 5234 * possible to time stamp both Sync and Delay_Req messages
5076 * => fall back to time stamping all packets 5235 * => fall back to time stamping all packets
5077 */ 5236 */
5078 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL; 5237 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
5079 config.rx_filter = HWTSTAMP_FILTER_ALL; 5238 config.rx_filter = HWTSTAMP_FILTER_ALL;
5080 break; 5239 break;
5081 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 5240 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
5082 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; 5241 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
5083 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; 5242 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
5084 is_l4 = 1; 5243 is_l4 = true;
5085 break; 5244 break;
5086 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 5245 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
5087 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; 5246 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
5088 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; 5247 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
5089 is_l4 = 1; 5248 is_l4 = true;
5090 break; 5249 break;
5091 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 5250 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5092 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 5251 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5093 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 5252 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5094 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; 5253 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5095 is_l2 = 1; 5254 is_l2 = true;
5096 is_l4 = 1; 5255 is_l4 = true;
5097 config.rx_filter = HWTSTAMP_FILTER_SOME; 5256 config.rx_filter = HWTSTAMP_FILTER_SOME;
5098 break; 5257 break;
5099 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 5258 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5100 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 5259 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5101 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 5260 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5102 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; 5261 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5103 is_l2 = 1; 5262 is_l2 = true;
5104 is_l4 = 1; 5263 is_l4 = true;
5105 config.rx_filter = HWTSTAMP_FILTER_SOME; 5264 config.rx_filter = HWTSTAMP_FILTER_SOME;
5106 break; 5265 break;
5107 case HWTSTAMP_FILTER_PTP_V2_EVENT: 5266 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5108 case HWTSTAMP_FILTER_PTP_V2_SYNC: 5267 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5109 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 5268 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5110 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2; 5269 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5111 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 5270 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5112 is_l2 = 1; 5271 is_l2 = true;
5113 break; 5272 break;
5114 default: 5273 default:
5115 return -ERANGE; 5274 return -ERANGE;
5116 } 5275 }
5117 5276
5277 if (hw->mac.type == e1000_82575) {
5278 if (tsync_rx_ctl | tsync_tx_ctl)
5279 return -EINVAL;
5280 return 0;
5281 }
5282
5118 /* enable/disable TX */ 5283 /* enable/disable TX */
5119 regval = rd32(E1000_TSYNCTXCTL); 5284 regval = rd32(E1000_TSYNCTXCTL);
5120 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit; 5285 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5286 regval |= tsync_tx_ctl;
5121 wr32(E1000_TSYNCTXCTL, regval); 5287 wr32(E1000_TSYNCTXCTL, regval);
5122 5288
5123 /* enable/disable RX, define which PTP packets are time stamped */ 5289 /* enable/disable RX */
5124 regval = rd32(E1000_TSYNCRXCTL); 5290 regval = rd32(E1000_TSYNCRXCTL);
5125 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit; 5291 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5126 regval = (regval & ~0xE) | tsync_rx_ctl_type; 5292 regval |= tsync_rx_ctl;
5127 wr32(E1000_TSYNCRXCTL, regval); 5293 wr32(E1000_TSYNCRXCTL, regval);
5128 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5129 5294
5130 /* 5295 /* define which PTP packets are time stamped */
5131 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 5296 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5132 * (Ethertype to filter on)
5133 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
5134 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
5135 */
5136 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
5137
5138 /* L4 Queue Filter[0]: only filter by source and destination port */
5139 wr32(E1000_SPQF0, htons(port));
5140 wr32(E1000_IMIREXT(0), is_l4 ?
5141 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
5142 wr32(E1000_IMIR(0), is_l4 ?
5143 (htons(port)
5144 | (0<<16) /* immediate interrupt disabled */
5145 | 0 /* (1<<17) bit cleared: do not bypass
5146 destination port check */)
5147 : 0);
5148 wr32(E1000_FTQF0, is_l4 ?
5149 (0x11 /* UDP */
5150 | (1<<15) /* VF not compared */
5151 | (1<<27) /* Enable Timestamping */
5152 | (7<<28) /* only source port filter enabled,
5153 source/target address and protocol
5154 masked */)
5155 : ((1<<15) | (15<<28) /* all mask bits set = filter not
5156 enabled */));
5157 5297
5298 /* define ethertype filter for timestamped packets */
5299 if (is_l2)
5300 wr32(E1000_ETQF(3),
5301 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5302 E1000_ETQF_1588 | /* enable timestamping */
5303 ETH_P_1588)); /* 1588 eth protocol type */
5304 else
5305 wr32(E1000_ETQF(3), 0);
5306
5307#define PTP_PORT 319
5308 /* L4 Queue Filter[3]: filter by destination port and protocol */
5309 if (is_l4) {
5310 u32 ftqf = (IPPROTO_UDP /* UDP */
5311 | E1000_FTQF_VF_BP /* VF not compared */
5312 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5313 | E1000_FTQF_MASK); /* mask all inputs */
5314 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
5315
5316 wr32(E1000_IMIR(3), htons(PTP_PORT));
5317 wr32(E1000_IMIREXT(3),
5318 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5319 if (hw->mac.type == e1000_82576) {
5320 /* enable source port check */
5321 wr32(E1000_SPQF(3), htons(PTP_PORT));
5322 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5323 }
5324 wr32(E1000_FTQF(3), ftqf);
5325 } else {
5326 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5327 }
5158 wrfl(); 5328 wrfl();
5159 5329
5160 adapter->hwtstamp_config = config; 5330 adapter->hwtstamp_config = config;
@@ -5231,21 +5401,15 @@ static void igb_vlan_rx_register(struct net_device *netdev,
5231 ctrl |= E1000_CTRL_VME; 5401 ctrl |= E1000_CTRL_VME;
5232 wr32(E1000_CTRL, ctrl); 5402 wr32(E1000_CTRL, ctrl);
5233 5403
5234 /* enable VLAN receive filtering */ 5404 /* Disable CFI check */
5235 rctl = rd32(E1000_RCTL); 5405 rctl = rd32(E1000_RCTL);
5236 rctl &= ~E1000_RCTL_CFIEN; 5406 rctl &= ~E1000_RCTL_CFIEN;
5237 wr32(E1000_RCTL, rctl); 5407 wr32(E1000_RCTL, rctl);
5238 igb_update_mng_vlan(adapter);
5239 } else { 5408 } else {
5240 /* disable VLAN tag insert/strip */ 5409 /* disable VLAN tag insert/strip */
5241 ctrl = rd32(E1000_CTRL); 5410 ctrl = rd32(E1000_CTRL);
5242 ctrl &= ~E1000_CTRL_VME; 5411 ctrl &= ~E1000_CTRL_VME;
5243 wr32(E1000_CTRL, ctrl); 5412 wr32(E1000_CTRL, ctrl);
5244
5245 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
5246 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
5247 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
5248 }
5249 } 5413 }
5250 5414
5251 igb_rlpml_set(adapter); 5415 igb_rlpml_set(adapter);
@@ -5260,16 +5424,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5260 struct e1000_hw *hw = &adapter->hw; 5424 struct e1000_hw *hw = &adapter->hw;
5261 int pf_id = adapter->vfs_allocated_count; 5425 int pf_id = adapter->vfs_allocated_count;
5262 5426
5263 if ((hw->mng_cookie.status & 5427 /* attempt to add filter to vlvf array */
5264 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 5428 igb_vlvf_set(adapter, vid, true, pf_id);
5265 (vid == adapter->mng_vlan_id))
5266 return;
5267
5268 /* add vid to vlvf if sr-iov is enabled,
5269 * if that fails add directly to filter table */
5270 if (igb_vlvf_set(adapter, vid, true, pf_id))
5271 igb_vfta_set(hw, vid, true);
5272 5429
5430 /* add the filter since PF can receive vlans w/o entry in vlvf */
5431 igb_vfta_set(hw, vid, true);
5273} 5432}
5274 5433
5275static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 5434static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -5277,6 +5436,7 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5277 struct igb_adapter *adapter = netdev_priv(netdev); 5436 struct igb_adapter *adapter = netdev_priv(netdev);
5278 struct e1000_hw *hw = &adapter->hw; 5437 struct e1000_hw *hw = &adapter->hw;
5279 int pf_id = adapter->vfs_allocated_count; 5438 int pf_id = adapter->vfs_allocated_count;
5439 s32 err;
5280 5440
5281 igb_irq_disable(adapter); 5441 igb_irq_disable(adapter);
5282 vlan_group_set_device(adapter->vlgrp, vid, NULL); 5442 vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -5284,17 +5444,11 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5284 if (!test_bit(__IGB_DOWN, &adapter->state)) 5444 if (!test_bit(__IGB_DOWN, &adapter->state))
5285 igb_irq_enable(adapter); 5445 igb_irq_enable(adapter);
5286 5446
5287 if ((adapter->hw.mng_cookie.status & 5447 /* remove vlan from VLVF table array */
5288 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 5448 err = igb_vlvf_set(adapter, vid, false, pf_id);
5289 (vid == adapter->mng_vlan_id)) {
5290 /* release control to f/w */
5291 igb_release_hw_control(adapter);
5292 return;
5293 }
5294 5449
5295 /* remove vid from vlvf if sr-iov is enabled, 5450 /* if vid was not present in VLVF just remove it from table */
5296 * if not in vlvf remove from vfta */ 5451 if (err)
5297 if (igb_vlvf_set(adapter, vid, false, pf_id))
5298 igb_vfta_set(hw, vid, false); 5452 igb_vfta_set(hw, vid, false);
5299} 5453}
5300 5454
@@ -5314,6 +5468,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
5314 5468
5315int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) 5469int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5316{ 5470{
5471 struct pci_dev *pdev = adapter->pdev;
5317 struct e1000_mac_info *mac = &adapter->hw.mac; 5472 struct e1000_mac_info *mac = &adapter->hw.mac;
5318 5473
5319 mac->autoneg = 0; 5474 mac->autoneg = 0;
@@ -5337,8 +5492,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5337 break; 5492 break;
5338 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5493 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5339 default: 5494 default:
5340 dev_err(&adapter->pdev->dev, 5495 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
5341 "Unsupported Speed/Duplex configuration\n");
5342 return -EINVAL; 5496 return -EINVAL;
5343 } 5497 }
5344 return 0; 5498 return 0;
@@ -5360,9 +5514,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5360 if (netif_running(netdev)) 5514 if (netif_running(netdev))
5361 igb_close(netdev); 5515 igb_close(netdev);
5362 5516
5363 igb_reset_interrupt_capability(adapter); 5517 igb_clear_interrupt_scheme(adapter);
5364
5365 igb_free_queues(adapter);
5366 5518
5367#ifdef CONFIG_PM 5519#ifdef CONFIG_PM
5368 retval = pci_save_state(pdev); 5520 retval = pci_save_state(pdev);
@@ -5394,7 +5546,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5394 wr32(E1000_CTRL, ctrl); 5546 wr32(E1000_CTRL, ctrl);
5395 5547
5396 /* Allow time for pending master requests to run */ 5548 /* Allow time for pending master requests to run */
5397 igb_disable_pcie_master(&adapter->hw); 5549 igb_disable_pcie_master(hw);
5398 5550
5399 wr32(E1000_WUC, E1000_WUC_PME_EN); 5551 wr32(E1000_WUC, E1000_WUC_PME_EN);
5400 wr32(E1000_WUFC, wufc); 5552 wr32(E1000_WUFC, wufc);
@@ -5457,9 +5609,7 @@ static int igb_resume(struct pci_dev *pdev)
5457 pci_enable_wake(pdev, PCI_D3hot, 0); 5609 pci_enable_wake(pdev, PCI_D3hot, 0);
5458 pci_enable_wake(pdev, PCI_D3cold, 0); 5610 pci_enable_wake(pdev, PCI_D3cold, 0);
5459 5611
5460 igb_set_interrupt_capability(adapter); 5612 if (igb_init_interrupt_scheme(adapter)) {
5461
5462 if (igb_alloc_queues(adapter)) {
5463 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 5613 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5464 return -ENOMEM; 5614 return -ENOMEM;
5465 } 5615 }
@@ -5511,22 +5661,16 @@ static void igb_netpoll(struct net_device *netdev)
5511 int i; 5661 int i;
5512 5662
5513 if (!adapter->msix_entries) { 5663 if (!adapter->msix_entries) {
5664 struct igb_q_vector *q_vector = adapter->q_vector[0];
5514 igb_irq_disable(adapter); 5665 igb_irq_disable(adapter);
5515 napi_schedule(&adapter->rx_ring[0].napi); 5666 napi_schedule(&q_vector->napi);
5516 return; 5667 return;
5517 } 5668 }
5518 5669
5519 for (i = 0; i < adapter->num_tx_queues; i++) { 5670 for (i = 0; i < adapter->num_q_vectors; i++) {
5520 struct igb_ring *tx_ring = &adapter->tx_ring[i]; 5671 struct igb_q_vector *q_vector = adapter->q_vector[i];
5521 wr32(E1000_EIMC, tx_ring->eims_value); 5672 wr32(E1000_EIMC, q_vector->eims_value);
5522 igb_clean_tx_irq(tx_ring); 5673 napi_schedule(&q_vector->napi);
5523 wr32(E1000_EIMS, tx_ring->eims_value);
5524 }
5525
5526 for (i = 0; i < adapter->num_rx_queues; i++) {
5527 struct igb_ring *rx_ring = &adapter->rx_ring[i];
5528 wr32(E1000_EIMC, rx_ring->eims_value);
5529 napi_schedule(&rx_ring->napi);
5530 } 5674 }
5531} 5675}
5532#endif /* CONFIG_NET_POLL_CONTROLLER */ 5676#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -5671,19 +5815,29 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5671static void igb_vmm_control(struct igb_adapter *adapter) 5815static void igb_vmm_control(struct igb_adapter *adapter)
5672{ 5816{
5673 struct e1000_hw *hw = &adapter->hw; 5817 struct e1000_hw *hw = &adapter->hw;
5674 u32 reg_data; 5818 u32 reg;
5675 5819
5676 if (!adapter->vfs_allocated_count) 5820 /* replication is not supported for 82575 */
5821 if (hw->mac.type == e1000_82575)
5677 return; 5822 return;
5678 5823
5679 /* VF's need PF reset indication before they 5824 /* enable replication vlan tag stripping */
5680 * can send/receive mail */ 5825 reg = rd32(E1000_RPLOLR);
5681 reg_data = rd32(E1000_CTRL_EXT); 5826 reg |= E1000_RPLOLR_STRVLAN;
5682 reg_data |= E1000_CTRL_EXT_PFRSTD; 5827 wr32(E1000_RPLOLR, reg);
5683 wr32(E1000_CTRL_EXT, reg_data); 5828
5829 /* notify HW that the MAC is adding vlan tags */
5830 reg = rd32(E1000_DTXCTL);
5831 reg |= E1000_DTXCTL_VLAN_ADDED;
5832 wr32(E1000_DTXCTL, reg);
5684 5833
5685 igb_vmdq_set_loopback_pf(hw, true); 5834 if (adapter->vfs_allocated_count) {
5686 igb_vmdq_set_replication_pf(hw, true); 5835 igb_vmdq_set_loopback_pf(hw, true);
5836 igb_vmdq_set_replication_pf(hw, true);
5837 } else {
5838 igb_vmdq_set_loopback_pf(hw, false);
5839 igb_vmdq_set_replication_pf(hw, false);
5840 }
5687} 5841}
5688 5842
5689/* igb_main.c */ 5843/* igb_main.c */
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index bc606f8b61aa..8afff07ff559 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -279,7 +279,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
279{ 279{
280 struct igbvf_adapter *adapter = netdev_priv(netdev); 280 struct igbvf_adapter *adapter = netdev_priv(netdev);
281 struct igbvf_ring *temp_ring; 281 struct igbvf_ring *temp_ring;
282 int err; 282 int err = 0;
283 u32 new_rx_count, new_tx_count; 283 u32 new_rx_count, new_tx_count;
284 284
285 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 285 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -299,15 +299,22 @@ static int igbvf_set_ringparam(struct net_device *netdev,
299 return 0; 299 return 0;
300 } 300 }
301 301
302 temp_ring = vmalloc(sizeof(struct igbvf_ring));
303 if (!temp_ring)
304 return -ENOMEM;
305
306 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 302 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
307 msleep(1); 303 msleep(1);
308 304
309 if (netif_running(adapter->netdev)) 305 if (!netif_running(adapter->netdev)) {
310 igbvf_down(adapter); 306 adapter->tx_ring->count = new_tx_count;
307 adapter->rx_ring->count = new_rx_count;
308 goto clear_reset;
309 }
310
311 temp_ring = vmalloc(sizeof(struct igbvf_ring));
312 if (!temp_ring) {
313 err = -ENOMEM;
314 goto clear_reset;
315 }
316
317 igbvf_down(adapter);
311 318
312 /* 319 /*
313 * We can't just free everything and then setup again, 320 * We can't just free everything and then setup again,
@@ -339,14 +346,11 @@ static int igbvf_set_ringparam(struct net_device *netdev,
339 346
340 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); 347 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
341 } 348 }
342
343 err = 0;
344err_setup: 349err_setup:
345 if (netif_running(adapter->netdev)) 350 igbvf_up(adapter);
346 igbvf_up(adapter);
347
348 clear_bit(__IGBVF_RESETTING, &adapter->state);
349 vfree(temp_ring); 351 vfree(temp_ring);
352clear_reset:
353 clear_bit(__IGBVF_RESETTING, &adapter->state);
350 return err; 354 return err;
351} 355}
352 356
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 2b854161c61b..7eb08a6d3f99 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -457,6 +457,7 @@ extern int ixgbe_fcoe_disable(struct net_device *netdev);
457extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); 457extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
458extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); 458extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
459#endif /* CONFIG_IXGBE_DCB */ 459#endif /* CONFIG_IXGBE_DCB */
460extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
460#endif /* IXGBE_FCOE */ 461#endif /* IXGBE_FCOE */
461 462
462#endif /* _IXGBE_H_ */ 463#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index ae27c41222e3..72106898a5cb 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1000,6 +1000,10 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1000 hw->mac.num_rar_entries--; 1000 hw->mac.num_rar_entries--;
1001 } 1001 }
1002 1002
1003 /* Store the alternative WWNN/WWPN prefix */
1004 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1005 &hw->mac.wwpn_prefix);
1006
1003reset_hw_out: 1007reset_hw_out:
1004 return status; 1008 return status;
1005} 1009}
@@ -2536,6 +2540,51 @@ fw_version_out:
2536 return status; 2540 return status;
2537} 2541}
2538 2542
2543/**
2544 * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
2545 * the EEPROM
2546 * @hw: pointer to hardware structure
2547 * @wwnn_prefix: the alternative WWNN prefix
2548 * @wwpn_prefix: the alternative WWPN prefix
2549 *
2550 * This function will read the EEPROM from the alternative SAN MAC address
2551 * block to check the support for the alternative WWNN/WWPN prefix support.
2552 **/
2553static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2554 u16 *wwpn_prefix)
2555{
2556 u16 offset, caps;
2557 u16 alt_san_mac_blk_offset;
2558
2559 /* clear output first */
2560 *wwnn_prefix = 0xFFFF;
2561 *wwpn_prefix = 0xFFFF;
2562
2563 /* check if alternative SAN MAC is supported */
2564 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2565 &alt_san_mac_blk_offset);
2566
2567 if ((alt_san_mac_blk_offset == 0) ||
2568 (alt_san_mac_blk_offset == 0xFFFF))
2569 goto wwn_prefix_out;
2570
2571 /* check capability in alternative san mac address block */
2572 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2573 hw->eeprom.ops.read(hw, offset, &caps);
2574 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2575 goto wwn_prefix_out;
2576
2577 /* get the corresponding prefix for WWNN/WWPN */
2578 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2579 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2580
2581 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2582 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2583
2584wwn_prefix_out:
2585 return 0;
2586}
2587
2539static struct ixgbe_mac_operations mac_ops_82599 = { 2588static struct ixgbe_mac_operations mac_ops_82599 = {
2540 .init_hw = &ixgbe_init_hw_generic, 2589 .init_hw = &ixgbe_init_hw_generic,
2541 .reset_hw = &ixgbe_reset_hw_82599, 2590 .reset_hw = &ixgbe_reset_hw_82599,
@@ -2547,6 +2596,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2547 .get_mac_addr = &ixgbe_get_mac_addr_generic, 2596 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2548 .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599, 2597 .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599,
2549 .get_device_caps = &ixgbe_get_device_caps_82599, 2598 .get_device_caps = &ixgbe_get_device_caps_82599,
2599 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
2550 .stop_adapter = &ixgbe_stop_adapter_generic, 2600 .stop_adapter = &ixgbe_stop_adapter_generic,
2551 .get_bus_info = &ixgbe_get_bus_info_generic, 2601 .get_bus_info = &ixgbe_get_bus_info_generic,
2552 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 2602 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 08eccf418c67..9d2cc833691b 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -806,7 +806,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
806{ 806{
807 struct ixgbe_adapter *adapter = netdev_priv(netdev); 807 struct ixgbe_adapter *adapter = netdev_priv(netdev);
808 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 808 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
809 int i, err; 809 int i, err = 0;
810 u32 new_rx_count, new_tx_count; 810 u32 new_rx_count, new_tx_count;
811 bool need_update = false; 811 bool need_update = false;
812 812
@@ -830,6 +830,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
830 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 830 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
831 msleep(1); 831 msleep(1);
832 832
833 if (!netif_running(adapter->netdev)) {
834 for (i = 0; i < adapter->num_tx_queues; i++)
835 adapter->tx_ring[i].count = new_tx_count;
836 for (i = 0; i < adapter->num_rx_queues; i++)
837 adapter->rx_ring[i].count = new_rx_count;
838 adapter->tx_ring_count = new_tx_count;
839 adapter->rx_ring_count = new_rx_count;
840 goto err_setup;
841 }
842
833 temp_tx_ring = kcalloc(adapter->num_tx_queues, 843 temp_tx_ring = kcalloc(adapter->num_tx_queues,
834 sizeof(struct ixgbe_ring), GFP_KERNEL); 844 sizeof(struct ixgbe_ring), GFP_KERNEL);
835 if (!temp_tx_ring) { 845 if (!temp_tx_ring) {
@@ -887,8 +897,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
887 897
888 /* if rings need to be updated, here's the place to do it in one shot */ 898 /* if rings need to be updated, here's the place to do it in one shot */
889 if (need_update) { 899 if (need_update) {
890 if (netif_running(netdev)) 900 ixgbe_down(adapter);
891 ixgbe_down(adapter);
892 901
893 /* tx */ 902 /* tx */
894 if (new_tx_count != adapter->tx_ring_count) { 903 if (new_tx_count != adapter->tx_ring_count) {
@@ -905,13 +914,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
905 temp_rx_ring = NULL; 914 temp_rx_ring = NULL;
906 adapter->rx_ring_count = new_rx_count; 915 adapter->rx_ring_count = new_rx_count;
907 } 916 }
908 }
909
910 /* success! */
911 err = 0;
912 if (netif_running(netdev))
913 ixgbe_up(adapter); 917 ixgbe_up(adapter);
914 918 }
915err_setup: 919err_setup:
916 clear_bit(__IXGBE_RESETTING, &adapter->state); 920 clear_bit(__IXGBE_RESETTING, &adapter->state);
917 return err; 921 return err;
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index a3c9f99515e2..edecdc853c14 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -718,3 +718,49 @@ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
718 return 1; 718 return 1;
719} 719}
720#endif /* CONFIG_IXGBE_DCB */ 720#endif /* CONFIG_IXGBE_DCB */
721
722/**
723 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
724 * @netdev : ixgbe adapter
725 * @wwn : the world wide name
726 * @type: the type of world wide name
727 *
728 * Returns the node or port world wide name if both the prefix and the san
729 * mac address are valid, then the wwn is formed based on the NAA-2 for
730 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
731 *
732 * Returns : 0 on success
733 */
734int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
735{
736 int rc = -EINVAL;
737 u16 prefix = 0xffff;
738 struct ixgbe_adapter *adapter = netdev_priv(netdev);
739 struct ixgbe_mac_info *mac = &adapter->hw.mac;
740
741 switch (type) {
742 case NETDEV_FCOE_WWNN:
743 prefix = mac->wwnn_prefix;
744 break;
745 case NETDEV_FCOE_WWPN:
746 prefix = mac->wwpn_prefix;
747 break;
748 default:
749 break;
750 }
751
752 if ((prefix != 0xffff) &&
753 is_valid_ether_addr(mac->san_addr)) {
754 *wwn = ((u64) prefix << 48) |
755 ((u64) mac->san_addr[0] << 40) |
756 ((u64) mac->san_addr[1] << 32) |
757 ((u64) mac->san_addr[2] << 24) |
758 ((u64) mac->san_addr[3] << 16) |
759 ((u64) mac->san_addr[4] << 8) |
760 ((u64) mac->san_addr[5]);
761 rc = 0;
762 }
763 return rc;
764}
765
766
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 4c8a44919705..45c5faf0824a 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5449,6 +5449,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5449 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 5449 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
5450 .ndo_fcoe_enable = ixgbe_fcoe_enable, 5450 .ndo_fcoe_enable = ixgbe_fcoe_enable,
5451 .ndo_fcoe_disable = ixgbe_fcoe_disable, 5451 .ndo_fcoe_disable = ixgbe_fcoe_disable,
5452 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
5452#endif /* IXGBE_FCOE */ 5453#endif /* IXGBE_FCOE */
5453}; 5454};
5454 5455
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 1cab53eb22f3..21b6633da578 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1539,6 +1539,16 @@
1539#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 1539#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
1540#define IXGBE_FW_PATCH_VERSION_4 0x7 1540#define IXGBE_FW_PATCH_VERSION_4 0x7
1541 1541
1542/* Alternative SAN MAC Address Block */
1543#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
1544#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
1545#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
1546#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
1547#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
1548#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
1549#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
1550#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
1551
1542/* PCI Bus Info */ 1552/* PCI Bus Info */
1543#define IXGBE_PCI_LINK_STATUS 0xB2 1553#define IXGBE_PCI_LINK_STATUS 0xB2
1544#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 1554#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
@@ -2345,6 +2355,7 @@ struct ixgbe_mac_operations {
2345 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); 2355 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
2346 s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); 2356 s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
2347 s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); 2357 s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
2358 s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
2348 s32 (*stop_adapter)(struct ixgbe_hw *); 2359 s32 (*stop_adapter)(struct ixgbe_hw *);
2349 s32 (*get_bus_info)(struct ixgbe_hw *); 2360 s32 (*get_bus_info)(struct ixgbe_hw *);
2350 void (*set_lan_id)(struct ixgbe_hw *); 2361 void (*set_lan_id)(struct ixgbe_hw *);
@@ -2416,6 +2427,10 @@ struct ixgbe_mac_info {
2416 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 2427 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
2417 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 2428 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
2418 u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 2429 u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
2430 /* prefix for World Wide Node Name (WWNN) */
2431 u16 wwnn_prefix;
2432 /* prefix for World Wide Port Name (WWPN) */
2433 u16 wwpn_prefix;
2419 s32 mc_filter_type; 2434 s32 mc_filter_type;
2420 u32 mcft_size; 2435 u32 mcft_size;
2421 u32 vft_size; 2436 u32 vft_size;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 237835864357..a23f739d222f 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -171,6 +171,36 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
171} 171}
172 172
173/** 173/**
174 * ks8851_wrreg8 - write 8bit register value to chip
175 * @ks: The chip state
176 * @reg: The register address
177 * @val: The value to write
178 *
179 * Issue a write to put the value @val into the register specified in @reg.
180 */
181static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
182{
183 struct spi_transfer *xfer = &ks->spi_xfer1;
184 struct spi_message *msg = &ks->spi_msg1;
185 __le16 txb[2];
186 int ret;
187 int bit;
188
189 bit = 1 << (reg & 3);
190
191 txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
192 txb[1] = val;
193
194 xfer->tx_buf = txb;
195 xfer->rx_buf = NULL;
196 xfer->len = 3;
197
198 ret = spi_sync(ks->spidev, msg);
199 if (ret < 0)
200 ks_err(ks, "spi_sync() failed\n");
201}
202
203/**
174 * ks8851_rx_1msg - select whether to use one or two messages for spi read 204 * ks8851_rx_1msg - select whether to use one or two messages for spi read
175 * @ks: The device structure 205 * @ks: The device structure
176 * 206 *
@@ -322,13 +352,12 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
322static int ks8851_write_mac_addr(struct net_device *dev) 352static int ks8851_write_mac_addr(struct net_device *dev)
323{ 353{
324 struct ks8851_net *ks = netdev_priv(dev); 354 struct ks8851_net *ks = netdev_priv(dev);
325 u16 *mcp = (u16 *)dev->dev_addr; 355 int i;
326 356
327 mutex_lock(&ks->lock); 357 mutex_lock(&ks->lock);
328 358
329 ks8851_wrreg16(ks, KS_MARL, mcp[0]); 359 for (i = 0; i < ETH_ALEN; i++)
330 ks8851_wrreg16(ks, KS_MARM, mcp[1]); 360 ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
331 ks8851_wrreg16(ks, KS_MARH, mcp[2]);
332 361
333 mutex_unlock(&ks->lock); 362 mutex_unlock(&ks->lock);
334 363
@@ -951,7 +980,7 @@ static void ks8851_set_rx_mode(struct net_device *dev)
951 mcptr = mcptr->next; 980 mcptr = mcptr->next;
952 } 981 }
953 982
954 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA; 983 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
955 } else { 984 } else {
956 /* just accept broadcast / unicast */ 985 /* just accept broadcast / unicast */
957 rxctrl.rxcr1 = RXCR1_RXPAFMA; 986 rxctrl.rxcr1 = RXCR1_RXPAFMA;
@@ -1239,6 +1268,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1239 ndev->netdev_ops = &ks8851_netdev_ops; 1268 ndev->netdev_ops = &ks8851_netdev_ops;
1240 ndev->irq = spi->irq; 1269 ndev->irq = spi->irq;
1241 1270
1271 /* issue a global soft reset to reset the device. */
1272 ks8851_soft_reset(ks, GRR_GSR);
1273
1242 /* simple check for a valid chip being connected to the bus */ 1274 /* simple check for a valid chip being connected to the bus */
1243 1275
1244 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1276 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index 85abe147afbf..f52c312cc356 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -16,6 +16,7 @@
16#define CCR_32PIN (1 << 0) 16#define CCR_32PIN (1 << 0)
17 17
18/* MAC address registers */ 18/* MAC address registers */
19#define KS_MAR(_m) 0x15 - (_m)
19#define KS_MARL 0x10 20#define KS_MARL 0x10
20#define KS_MARM 0x12 21#define KS_MARM 0x12
21#define KS_MARH 0x14 22#define KS_MARH 0x14
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 3aabfd9dd212..20b7707f38ef 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -555,13 +555,13 @@ static int macvlan_newlink(struct net_device *dev,
555 return 0; 555 return 0;
556} 556}
557 557
558static void macvlan_dellink(struct net_device *dev) 558static void macvlan_dellink(struct net_device *dev, struct list_head *head)
559{ 559{
560 struct macvlan_dev *vlan = netdev_priv(dev); 560 struct macvlan_dev *vlan = netdev_priv(dev);
561 struct macvlan_port *port = vlan->port; 561 struct macvlan_port *port = vlan->port;
562 562
563 list_del(&vlan->list); 563 list_del(&vlan->list);
564 unregister_netdevice(dev); 564 unregister_netdevice_queue(dev, head);
565 565
566 if (list_empty(&port->vlans)) 566 if (list_empty(&port->vlans))
567 macvlan_port_destroy(port->dev); 567 macvlan_port_destroy(port->dev);
@@ -601,7 +601,7 @@ static int macvlan_device_event(struct notifier_block *unused,
601 break; 601 break;
602 case NETDEV_UNREGISTER: 602 case NETDEV_UNREGISTER:
603 list_for_each_entry_safe(vlan, next, &port->vlans, list) 603 list_for_each_entry_safe(vlan, next, &port->vlans, list)
604 macvlan_dellink(vlan->dev); 604 macvlan_dellink(vlan->dev, NULL);
605 break; 605 break;
606 } 606 }
607 return NOTIFY_DONE; 607 return NOTIFY_DONE;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 29c9fe2951e0..5319db9901d8 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.5.0-1.432" 78#define MYRI10GE_VERSION_STR "1.5.1-1.451"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -1623,10 +1623,21 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1623 return 0; 1623 return 0;
1624 } 1624 }
1625 } 1625 }
1626 if (*ptr == 'R' || *ptr == 'Q') { 1626 if (*ptr == '2')
1627 /* We've found either an XFP or quad ribbon fiber */ 1627 ptr++;
1628 if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
1629 /* We've found either an XFP, quad ribbon fiber, or SFP+ */
1628 cmd->port = PORT_FIBRE; 1630 cmd->port = PORT_FIBRE;
1631 cmd->supported |= SUPPORTED_FIBRE;
1632 cmd->advertising |= ADVERTISED_FIBRE;
1633 } else {
1634 cmd->port = PORT_OTHER;
1629 } 1635 }
1636 if (*ptr == 'R' || *ptr == 'S')
1637 cmd->transceiver = XCVR_EXTERNAL;
1638 else
1639 cmd->transceiver = XCVR_INTERNAL;
1640
1630 return 0; 1641 return 0;
1631} 1642}
1632 1643
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index e98cfa6baa8f..645450d93f4e 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 62 56#define _NETXEN_NIC_LINUX_SUBVERSION 65
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.62" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.65"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
@@ -74,8 +74,6 @@
74#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \ 74#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \
75 * NETXEN_FLASH_SECTOR_SIZE) 75 * NETXEN_FLASH_SECTOR_SIZE)
76 76
77#define PHAN_VENDOR_ID 0x4040
78
79#define RCV_DESC_RINGSIZE(rds_ring) \ 77#define RCV_DESC_RINGSIZE(rds_ring) \
80 (sizeof(struct rcv_desc) * (rds_ring)->num_desc) 78 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
81#define RCV_BUFF_RINGSIZE(rds_ring) \ 79#define RCV_BUFF_RINGSIZE(rds_ring) \
@@ -421,6 +419,34 @@ struct status_desc {
421 __le64 status_desc_data[2]; 419 __le64 status_desc_data[2];
422} __attribute__ ((aligned(16))); 420} __attribute__ ((aligned(16)));
423 421
422/* UNIFIED ROMIMAGE *************************/
423#define NX_UNI_FW_MIN_SIZE 0x3eb000
424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
425#define NX_UNI_DIR_SECT_BOOTLD 0x6
426#define NX_UNI_DIR_SECT_FW 0x7
427
428/*Offsets */
429#define NX_UNI_CHIP_REV_OFF 10
430#define NX_UNI_FLAGS_OFF 11
431#define NX_UNI_BIOS_VERSION_OFF 12
432#define NX_UNI_BOOTLD_IDX_OFF 27
433#define NX_UNI_FIRMWARE_IDX_OFF 29
434
435struct uni_table_desc{
436 uint32_t findex;
437 uint32_t num_entries;
438 uint32_t entry_size;
439 uint32_t reserved[5];
440};
441
442struct uni_data_desc{
443 uint32_t findex;
444 uint32_t size;
445 uint32_t reserved[5];
446};
447
448/* UNIFIED ROMIMAGE *************************/
449
424/* The version of the main data structure */ 450/* The version of the main data structure */
425#define NETXEN_BDINFO_VERSION 1 451#define NETXEN_BDINFO_VERSION 1
426 452
@@ -487,7 +513,15 @@ struct status_desc {
487#define NX_P2_MN_ROMIMAGE 0 513#define NX_P2_MN_ROMIMAGE 0
488#define NX_P3_CT_ROMIMAGE 1 514#define NX_P3_CT_ROMIMAGE 1
489#define NX_P3_MN_ROMIMAGE 2 515#define NX_P3_MN_ROMIMAGE 2
490#define NX_FLASH_ROMIMAGE 3 516#define NX_UNIFIED_ROMIMAGE 3
517#define NX_FLASH_ROMIMAGE 4
518#define NX_UNKNOWN_ROMIMAGE 0xff
519
520#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin"
521#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin"
522#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin"
523#define NX_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
524#define NX_FLASH_ROMIMAGE_NAME "flash"
491 525
492extern char netxen_nic_driver_name[]; 526extern char netxen_nic_driver_name[];
493 527
@@ -1210,7 +1244,7 @@ struct netxen_adapter {
1210 nx_nic_intr_coalesce_t coal; 1244 nx_nic_intr_coalesce_t coal;
1211 1245
1212 unsigned long state; 1246 unsigned long state;
1213 u32 resv5; 1247 __le32 file_prd_off; /*File fw product offset*/
1214 u32 fw_version; 1248 u32 fw_version;
1215 const struct firmware *fw; 1249 const struct firmware *fw;
1216}; 1250};
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index a3b18e0c9670..c86095eb5d9e 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -688,8 +688,8 @@ static int netxen_nic_reg_test(struct net_device *dev)
688 u32 data_read, data_written; 688 u32 data_read, data_written;
689 689
690 data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0)); 690 data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
691 if ((data_read & 0xffff) != PHAN_VENDOR_ID) 691 if ((data_read & 0xffff) != adapter->pdev->vendor)
692 return 1; 692 return 1;
693 693
694 data_written = (u32)0xa5a5a5a5; 694 data_written = (u32)0xa5a5a5a5;
695 695
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 7386a7cce2ba..a39155d61bad 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -419,6 +419,7 @@ enum {
419#define NETXEN_CRB_ROMUSB \ 419#define NETXEN_CRB_ROMUSB \
420 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) 420 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
421#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) 421#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
422#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
422#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) 423#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
423#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) 424#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
424 425
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index e43cbbd5bec1..b3054c6cc608 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -1090,39 +1090,33 @@ netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter,
1090 * In: 'off' is offset from base in 128M pci map 1090 * In: 'off' is offset from base in 128M pci map
1091 */ 1091 */
1092static int 1092static int
1093netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong *off) 1093netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
1094 ulong off, void __iomem **addr)
1094{ 1095{
1095 crb_128M_2M_sub_block_map_t *m; 1096 crb_128M_2M_sub_block_map_t *m;
1096 1097
1097 1098
1098 if (*off >= NETXEN_CRB_MAX) 1099 if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE))
1099 return -EINVAL; 1100 return -EINVAL;
1100 1101
1101 if (*off >= NETXEN_PCI_CAMQM && (*off < NETXEN_PCI_CAMQM_2M_END)) { 1102 off -= NETXEN_PCI_CRBSPACE;
1102 *off = (*off - NETXEN_PCI_CAMQM) + NETXEN_PCI_CAMQM_2M_BASE +
1103 (ulong)adapter->ahw.pci_base0;
1104 return 0;
1105 }
1106
1107 if (*off < NETXEN_PCI_CRBSPACE)
1108 return -EINVAL;
1109
1110 *off -= NETXEN_PCI_CRBSPACE;
1111 1103
1112 /* 1104 /*
1113 * Try direct map 1105 * Try direct map
1114 */ 1106 */
1115 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; 1107 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
1116 1108
1117 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { 1109 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
1118 *off = *off + m->start_2M - m->start_128M + 1110 *addr = adapter->ahw.pci_base0 + m->start_2M +
1119 (ulong)adapter->ahw.pci_base0; 1111 (off - m->start_128M);
1120 return 0; 1112 return 0;
1121 } 1113 }
1122 1114
1123 /* 1115 /*
1124 * Not in direct map, use crb window 1116 * Not in direct map, use crb window
1125 */ 1117 */
1118 *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M +
1119 (off & MASK(16));
1126 return 1; 1120 return 1;
1127} 1121}
1128 1122
@@ -1132,28 +1126,26 @@ netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong *off)
1132 * side effect: lock crb window 1126 * side effect: lock crb window
1133 */ 1127 */
1134static void 1128static void
1135netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong *off) 1129netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
1136{ 1130{
1137 u32 window; 1131 u32 window;
1138 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; 1132 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
1139 1133
1140 window = CRB_HI(*off); 1134 off -= NETXEN_PCI_CRBSPACE;
1135
1136 window = CRB_HI(off);
1141 1137
1142 if (adapter->ahw.crb_win == window) 1138 if (adapter->ahw.crb_win == window)
1143 goto done; 1139 return;
1144 1140
1145 writel(window, addr); 1141 writel(window, addr);
1146 if (readl(addr) != window) { 1142 if (readl(addr) != window) {
1147 if (printk_ratelimit()) 1143 if (printk_ratelimit())
1148 dev_warn(&adapter->pdev->dev, 1144 dev_warn(&adapter->pdev->dev,
1149 "failed to set CRB window to %d off 0x%lx\n", 1145 "failed to set CRB window to %d off 0x%lx\n",
1150 window, *off); 1146 window, off);
1151 } 1147 }
1152 adapter->ahw.crb_win = window; 1148 adapter->ahw.crb_win = window;
1153
1154done:
1155 *off = (*off & MASK(16)) + CRB_INDIRECT_2M +
1156 (ulong)adapter->ahw.pci_base0;
1157} 1149}
1158 1150
1159static int 1151static int
@@ -1217,11 +1209,12 @@ netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
1217{ 1209{
1218 unsigned long flags; 1210 unsigned long flags;
1219 int rv; 1211 int rv;
1212 void __iomem *addr = NULL;
1220 1213
1221 rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off); 1214 rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
1222 1215
1223 if (rv == 0) { 1216 if (rv == 0) {
1224 writel(data, (void __iomem *)off); 1217 writel(data, addr);
1225 return 0; 1218 return 0;
1226 } 1219 }
1227 1220
@@ -1229,8 +1222,8 @@ netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
1229 /* indirect access */ 1222 /* indirect access */
1230 write_lock_irqsave(&adapter->ahw.crb_lock, flags); 1223 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
1231 crb_win_lock(adapter); 1224 crb_win_lock(adapter);
1232 netxen_nic_pci_set_crbwindow_2M(adapter, &off); 1225 netxen_nic_pci_set_crbwindow_2M(adapter, off);
1233 writel(data, (void __iomem *)off); 1226 writel(data, addr);
1234 crb_win_unlock(adapter); 1227 crb_win_unlock(adapter);
1235 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); 1228 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
1236 return 0; 1229 return 0;
@@ -1248,18 +1241,19 @@ netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off)
1248 unsigned long flags; 1241 unsigned long flags;
1249 int rv; 1242 int rv;
1250 u32 data; 1243 u32 data;
1244 void __iomem *addr = NULL;
1251 1245
1252 rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off); 1246 rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
1253 1247
1254 if (rv == 0) 1248 if (rv == 0)
1255 return readl((void __iomem *)off); 1249 return readl(addr);
1256 1250
1257 if (rv > 0) { 1251 if (rv > 0) {
1258 /* indirect access */ 1252 /* indirect access */
1259 write_lock_irqsave(&adapter->ahw.crb_lock, flags); 1253 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
1260 crb_win_lock(adapter); 1254 crb_win_lock(adapter);
1261 netxen_nic_pci_set_crbwindow_2M(adapter, &off); 1255 netxen_nic_pci_set_crbwindow_2M(adapter, off);
1262 data = readl((void __iomem *)off); 1256 data = readl(addr);
1263 crb_win_unlock(adapter); 1257 crb_win_unlock(adapter);
1264 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); 1258 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
1265 return data; 1259 return data;
@@ -1307,17 +1301,20 @@ static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter,
1307void __iomem * 1301void __iomem *
1308netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) 1302netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset)
1309{ 1303{
1310 ulong off = offset; 1304 void __iomem *addr = NULL;
1311 1305
1312 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1306 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1313 if (offset < NETXEN_CRB_PCIX_HOST2 && 1307 if ((offset < NETXEN_CRB_PCIX_HOST2) &&
1314 offset > NETXEN_CRB_PCIX_HOST) 1308 (offset > NETXEN_CRB_PCIX_HOST))
1315 return PCI_OFFSET_SECOND_RANGE(adapter, offset); 1309 addr = PCI_OFFSET_SECOND_RANGE(adapter, offset);
1316 return NETXEN_CRB_NORMALIZE(adapter, offset); 1310 else
1311 addr = NETXEN_CRB_NORMALIZE(adapter, offset);
1312 } else {
1313 WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter,
1314 offset, &addr));
1317 } 1315 }
1318 1316
1319 BUG_ON(netxen_nic_pci_get_crb_addr_2M(adapter, &off)); 1317 return addr;
1320 return (void __iomem *)off;
1321} 1318}
1322 1319
1323static int 1320static int
@@ -1778,22 +1775,16 @@ netxen_setup_hwops(struct netxen_adapter *adapter)
1778 1775
1779int netxen_nic_get_board_info(struct netxen_adapter *adapter) 1776int netxen_nic_get_board_info(struct netxen_adapter *adapter)
1780{ 1777{
1781 int offset, board_type, magic, header_version; 1778 int offset, board_type, magic;
1782 struct pci_dev *pdev = adapter->pdev; 1779 struct pci_dev *pdev = adapter->pdev;
1783 1780
1784 offset = NX_FW_MAGIC_OFFSET; 1781 offset = NX_FW_MAGIC_OFFSET;
1785 if (netxen_rom_fast_read(adapter, offset, &magic)) 1782 if (netxen_rom_fast_read(adapter, offset, &magic))
1786 return -EIO; 1783 return -EIO;
1787 1784
1788 offset = NX_HDR_VERSION_OFFSET; 1785 if (magic != NETXEN_BDINFO_MAGIC) {
1789 if (netxen_rom_fast_read(adapter, offset, &header_version)) 1786 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1790 return -EIO; 1787 magic);
1791
1792 if (magic != NETXEN_BDINFO_MAGIC ||
1793 header_version != NETXEN_BDINFO_VERSION) {
1794 dev_err(&pdev->dev,
1795 "invalid board config, magic=%08x, version=%08x\n",
1796 magic, header_version);
1797 return -EIO; 1788 return -EIO;
1798 } 1789 }
1799 1790
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index d8c4b70e35ba..6ee27a630d89 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -46,6 +46,7 @@ static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
46static void 46static void
47netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, 47netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
48 struct nx_host_rds_ring *rds_ring); 48 struct nx_host_rds_ring *rds_ring);
49static int netxen_p3_has_mn(struct netxen_adapter *adapter);
49 50
50static void crb_addr_transform_setup(void) 51static void crb_addr_transform_setup(void)
51{ 52{
@@ -514,6 +515,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
514 continue; 515 continue;
515 516
516 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 517 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
518 if (off == (NETXEN_CRB_I2C0 + 0x1c))
519 continue;
517 /* do not reset PCI */ 520 /* do not reset PCI */
518 if (off == (ROMUSB_GLB + 0xbc)) 521 if (off == (ROMUSB_GLB + 0xbc))
519 continue; 522 continue;
@@ -537,12 +540,6 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
537 continue; 540 continue;
538 } 541 }
539 542
540 if (off == NETXEN_ADDR_ERROR) {
541 printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
542 netxen_nic_driver_name, buf[i].addr);
543 continue;
544 }
545
546 init_delay = 1; 543 init_delay = 1;
547 /* After writing this register, HW needs time for CRB */ 544 /* After writing this register, HW needs time for CRB */
548 /* to quiet down (else crb_window returns 0xffffffff) */ 545 /* to quiet down (else crb_window returns 0xffffffff) */
@@ -593,6 +590,172 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
593 return 0; 590 return 0;
594} 591}
595 592
593static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
594{
595 uint32_t i;
596 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
597 __le32 entries = cpu_to_le32(directory->num_entries);
598
599 for (i = 0; i < entries; i++) {
600
601 __le32 offs = cpu_to_le32(directory->findex) +
602 (i * cpu_to_le32(directory->entry_size));
603 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
604
605 if (tab_type == section)
606 return (struct uni_table_desc *) &unirom[offs];
607 }
608
609 return NULL;
610}
611
612static int
613nx_set_product_offs(struct netxen_adapter *adapter)
614{
615 struct uni_table_desc *ptab_descr;
616 const u8 *unirom = adapter->fw->data;
617 uint32_t i;
618 __le32 entries;
619
620 ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
621 if (ptab_descr == NULL)
622 return -1;
623
624 entries = cpu_to_le32(ptab_descr->num_entries);
625
626 for (i = 0; i < entries; i++) {
627
628 __le32 flags, file_chiprev, offs;
629 u8 chiprev = adapter->ahw.revision_id;
630 int mn_present = netxen_p3_has_mn(adapter);
631 uint32_t flagbit;
632
633 offs = cpu_to_le32(ptab_descr->findex) +
634 (i * cpu_to_le32(ptab_descr->entry_size));
635 flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
636 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
637 NX_UNI_CHIP_REV_OFF));
638
639 flagbit = mn_present ? 1 : 2;
640
641 if ((chiprev == file_chiprev) &&
642 ((1ULL << flagbit) & flags)) {
643 adapter->file_prd_off = offs;
644 return 0;
645 }
646 }
647
648 return -1;
649}
650
651
652static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
653 u32 section, u32 idx_offset)
654{
655 const u8 *unirom = adapter->fw->data;
656 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
657 idx_offset));
658 struct uni_table_desc *tab_desc;
659 __le32 offs;
660
661 tab_desc = nx_get_table_desc(unirom, section);
662
663 if (tab_desc == NULL)
664 return NULL;
665
666 offs = cpu_to_le32(tab_desc->findex) +
667 (cpu_to_le32(tab_desc->entry_size) * idx);
668
669 return (struct uni_data_desc *)&unirom[offs];
670}
671
672static u8 *
673nx_get_bootld_offs(struct netxen_adapter *adapter)
674{
675 u32 offs = NETXEN_BOOTLD_START;
676
677 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
678 offs = cpu_to_le32((nx_get_data_desc(adapter,
679 NX_UNI_DIR_SECT_BOOTLD,
680 NX_UNI_BOOTLD_IDX_OFF))->findex);
681
682 return (u8 *)&adapter->fw->data[offs];
683}
684
685static u8 *
686nx_get_fw_offs(struct netxen_adapter *adapter)
687{
688 u32 offs = NETXEN_IMAGE_START;
689
690 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
691 offs = cpu_to_le32((nx_get_data_desc(adapter,
692 NX_UNI_DIR_SECT_FW,
693 NX_UNI_FIRMWARE_IDX_OFF))->findex);
694
695 return (u8 *)&adapter->fw->data[offs];
696}
697
698static __le32
699nx_get_fw_size(struct netxen_adapter *adapter)
700{
701 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
702 return cpu_to_le32((nx_get_data_desc(adapter,
703 NX_UNI_DIR_SECT_FW,
704 NX_UNI_FIRMWARE_IDX_OFF))->size);
705 else
706 return cpu_to_le32(
707 *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
708}
709
710static __le32
711nx_get_fw_version(struct netxen_adapter *adapter)
712{
713 struct uni_data_desc *fw_data_desc;
714 const struct firmware *fw = adapter->fw;
715 __le32 major, minor, sub;
716 const u8 *ver_str;
717 int i, ret = 0;
718
719 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
720
721 fw_data_desc = nx_get_data_desc(adapter,
722 NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
723 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
724 cpu_to_le32(fw_data_desc->size) - 17;
725
726 for (i = 0; i < 12; i++) {
727 if (!strncmp(&ver_str[i], "REV=", 4)) {
728 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
729 &major, &minor, &sub);
730 break;
731 }
732 }
733
734 if (ret != 3)
735 return 0;
736
737 return major + (minor << 8) + (sub << 16);
738
739 } else
740 return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
741}
742
743static __le32
744nx_get_bios_version(struct netxen_adapter *adapter)
745{
746 const struct firmware *fw = adapter->fw;
747 __le32 bios_ver, prd_off = adapter->file_prd_off;
748
749 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
750 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
751 + NX_UNI_BIOS_VERSION_OFF));
752 return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
753 (bios_ver >> 24);
754 } else
755 return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
756
757}
758
596int 759int
597netxen_need_fw_reset(struct netxen_adapter *adapter) 760netxen_need_fw_reset(struct netxen_adapter *adapter)
598{ 761{
@@ -632,9 +795,8 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
632 /* check if we have got newer or different file firmware */ 795 /* check if we have got newer or different file firmware */
633 if (adapter->fw) { 796 if (adapter->fw) {
634 797
635 const struct firmware *fw = adapter->fw; 798 val = nx_get_fw_version(adapter);
636 799
637 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
638 version = NETXEN_DECODE_VERSION(val); 800 version = NETXEN_DECODE_VERSION(val);
639 801
640 major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); 802 major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
@@ -644,7 +806,8 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
644 if (version > NETXEN_VERSION_CODE(major, minor, build)) 806 if (version > NETXEN_VERSION_CODE(major, minor, build))
645 return 1; 807 return 1;
646 808
647 if (version == NETXEN_VERSION_CODE(major, minor, build)) { 809 if (version == NETXEN_VERSION_CODE(major, minor, build) &&
810 adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
648 811
649 val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); 812 val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
650 fw_type = (val & 0x4) ? 813 fw_type = (val & 0x4) ?
@@ -659,7 +822,11 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
659} 822}
660 823
661static char *fw_name[] = { 824static char *fw_name[] = {
662 "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash", 825 NX_P2_MN_ROMIMAGE_NAME,
826 NX_P3_CT_ROMIMAGE_NAME,
827 NX_P3_MN_ROMIMAGE_NAME,
828 NX_UNIFIED_ROMIMAGE_NAME,
829 NX_FLASH_ROMIMAGE_NAME,
663}; 830};
664 831
665int 832int
@@ -681,22 +848,21 @@ netxen_load_firmware(struct netxen_adapter *adapter)
681 848
682 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; 849 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
683 850
684 ptr64 = (u64 *)&fw->data[NETXEN_BOOTLD_START]; 851 ptr64 = (u64 *)nx_get_bootld_offs(adapter);
685 flashaddr = NETXEN_BOOTLD_START; 852 flashaddr = NETXEN_BOOTLD_START;
686 853
687 for (i = 0; i < size; i++) { 854 for (i = 0; i < size; i++) {
688 data = cpu_to_le64(ptr64[i]); 855 data = cpu_to_le64(ptr64[i]);
689 if (adapter->pci_mem_write(adapter, 856
690 flashaddr, data)) 857 if (adapter->pci_mem_write(adapter, flashaddr, data))
691 return -EIO; 858 return -EIO;
692 859
693 flashaddr += 8; 860 flashaddr += 8;
694 } 861 }
695 862
696 size = *(u32 *)&fw->data[NX_FW_SIZE_OFFSET]; 863 size = (__force u32)nx_get_fw_size(adapter) / 8;
697 size = (__force u32)cpu_to_le32(size) / 8;
698 864
699 ptr64 = (u64 *)&fw->data[NETXEN_IMAGE_START]; 865 ptr64 = (u64 *)nx_get_fw_offs(adapter);
700 flashaddr = NETXEN_IMAGE_START; 866 flashaddr = NETXEN_IMAGE_START;
701 867
702 for (i = 0; i < size; i++) { 868 for (i = 0; i < size; i++) {
@@ -749,21 +915,31 @@ netxen_load_firmware(struct netxen_adapter *adapter)
749} 915}
750 916
751static int 917static int
752netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname) 918netxen_validate_firmware(struct netxen_adapter *adapter)
753{ 919{
754 __le32 val; 920 __le32 val;
755 u32 ver, min_ver, bios; 921 u32 ver, min_ver, bios, min_size;
756 struct pci_dev *pdev = adapter->pdev; 922 struct pci_dev *pdev = adapter->pdev;
757 const struct firmware *fw = adapter->fw; 923 const struct firmware *fw = adapter->fw;
924 u8 fw_type = adapter->fw_type;
758 925
759 if (fw->size < NX_FW_MIN_SIZE) 926 if (fw_type == NX_UNIFIED_ROMIMAGE) {
760 return -EINVAL; 927 if (nx_set_product_offs(adapter))
928 return -EINVAL;
761 929
762 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); 930 min_size = NX_UNI_FW_MIN_SIZE;
763 if ((__force u32)val != NETXEN_BDINFO_MAGIC) 931 } else {
932 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
933 if ((__force u32)val != NETXEN_BDINFO_MAGIC)
934 return -EINVAL;
935
936 min_size = NX_FW_MIN_SIZE;
937 }
938
939 if (fw->size < min_size)
764 return -EINVAL; 940 return -EINVAL;
765 941
766 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); 942 val = nx_get_fw_version(adapter);
767 943
768 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 944 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
769 min_ver = NETXEN_VERSION_CODE(4, 0, 216); 945 min_ver = NETXEN_VERSION_CODE(4, 0, 216);
@@ -775,15 +951,15 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
775 if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) { 951 if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
776 dev_err(&pdev->dev, 952 dev_err(&pdev->dev,
777 "%s: firmware version %d.%d.%d unsupported\n", 953 "%s: firmware version %d.%d.%d unsupported\n",
778 fwname, _major(ver), _minor(ver), _build(ver)); 954 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
779 return -EINVAL; 955 return -EINVAL;
780 } 956 }
781 957
782 val = cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); 958 val = nx_get_bios_version(adapter);
783 netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); 959 netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
784 if ((__force u32)val != bios) { 960 if ((__force u32)val != bios) {
785 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", 961 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
786 fwname); 962 fw_name[fw_type]);
787 return -EINVAL; 963 return -EINVAL;
788 } 964 }
789 965
@@ -794,7 +970,7 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
794 val = NETXEN_DECODE_VERSION(val); 970 val = NETXEN_DECODE_VERSION(val);
795 if (val > ver) { 971 if (val > ver) {
796 dev_info(&pdev->dev, "%s: firmware is older than flash\n", 972 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
797 fwname); 973 fw_name[fw_type]);
798 return -EINVAL; 974 return -EINVAL;
799 } 975 }
800 976
@@ -802,6 +978,41 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
802 return 0; 978 return 0;
803} 979}
804 980
981static void
982nx_get_next_fwtype(struct netxen_adapter *adapter)
983{
984 u8 fw_type;
985
986 switch (adapter->fw_type) {
987 case NX_UNKNOWN_ROMIMAGE:
988 fw_type = NX_UNIFIED_ROMIMAGE;
989 break;
990
991 case NX_UNIFIED_ROMIMAGE:
992 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
993 fw_type = NX_FLASH_ROMIMAGE;
994 else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
995 fw_type = NX_P2_MN_ROMIMAGE;
996 else if (netxen_p3_has_mn(adapter))
997 fw_type = NX_P3_MN_ROMIMAGE;
998 else
999 fw_type = NX_P3_CT_ROMIMAGE;
1000 break;
1001
1002 case NX_P3_MN_ROMIMAGE:
1003 fw_type = NX_P3_CT_ROMIMAGE;
1004 break;
1005
1006 case NX_P2_MN_ROMIMAGE:
1007 case NX_P3_CT_ROMIMAGE:
1008 default:
1009 fw_type = NX_FLASH_ROMIMAGE;
1010 break;
1011 }
1012
1013 adapter->fw_type = fw_type;
1014}
1015
805static int 1016static int
806netxen_p3_has_mn(struct netxen_adapter *adapter) 1017netxen_p3_has_mn(struct netxen_adapter *adapter)
807{ 1018{
@@ -823,55 +1034,29 @@ netxen_p3_has_mn(struct netxen_adapter *adapter)
823 1034
824void netxen_request_firmware(struct netxen_adapter *adapter) 1035void netxen_request_firmware(struct netxen_adapter *adapter)
825{ 1036{
826 u8 fw_type;
827 struct pci_dev *pdev = adapter->pdev; 1037 struct pci_dev *pdev = adapter->pdev;
828 int rc = 0; 1038 int rc = 0;
829 1039
830 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1040 adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
831 fw_type = NX_P2_MN_ROMIMAGE;
832 goto request_fw;
833 }
834
835 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
836 /* No file firmware for the time being */
837 fw_type = NX_FLASH_ROMIMAGE;
838 goto done;
839 }
840
841 fw_type = netxen_p3_has_mn(adapter) ?
842 NX_P3_MN_ROMIMAGE : NX_P3_CT_ROMIMAGE;
843 1041
844request_fw: 1042next:
845 rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev); 1043 nx_get_next_fwtype(adapter);
846 if (rc != 0) {
847 if (fw_type == NX_P3_MN_ROMIMAGE) {
848 msleep(1);
849 fw_type = NX_P3_CT_ROMIMAGE;
850 goto request_fw;
851 }
852 1044
853 fw_type = NX_FLASH_ROMIMAGE; 1045 if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
854 adapter->fw = NULL; 1046 adapter->fw = NULL;
855 goto done; 1047 } else {
856 } 1048 rc = request_firmware(&adapter->fw,
857 1049 fw_name[adapter->fw_type], &pdev->dev);
858 rc = netxen_validate_firmware(adapter, fw_name[fw_type]); 1050 if (rc != 0)
859 if (rc != 0) { 1051 goto next;
860 release_firmware(adapter->fw); 1052
861 1053 rc = netxen_validate_firmware(adapter);
862 if (fw_type == NX_P3_MN_ROMIMAGE) { 1054 if (rc != 0) {
1055 release_firmware(adapter->fw);
863 msleep(1); 1056 msleep(1);
864 fw_type = NX_P3_CT_ROMIMAGE; 1057 goto next;
865 goto request_fw;
866 } 1058 }
867
868 fw_type = NX_FLASH_ROMIMAGE;
869 adapter->fw = NULL;
870 goto done;
871 } 1059 }
872
873done:
874 adapter->fw_type = fw_type;
875} 1060}
876 1061
877 1062
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 1071f090a124..12d1037cd81b 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -34,13 +34,18 @@
34#include <net/ip.h> 34#include <net/ip.h>
35#include <linux/ipv6.h> 35#include <linux/ipv6.h>
36#include <linux/inetdevice.h> 36#include <linux/inetdevice.h>
37#include <linux/sysfs.h>
37 38
38MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); 39MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
39MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
40MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 41MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
42MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
43MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
44MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
45MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
41 46
42char netxen_nic_driver_name[] = "netxen_nic"; 47char netxen_nic_driver_name[] = "netxen_nic";
43static char netxen_nic_driver_string[] = "NetXen Network Driver version " 48static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
44 NETXEN_NIC_LINUX_VERSIONID; 49 NETXEN_NIC_LINUX_VERSIONID;
45 50
46static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; 51static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
@@ -54,7 +59,6 @@ static int use_msi_x = 1;
54 59
55static unsigned long auto_fw_reset = AUTO_FW_RESET_ENABLED; 60static unsigned long auto_fw_reset = AUTO_FW_RESET_ENABLED;
56 61
57/* Local functions to NetXen NIC driver */
58static int __devinit netxen_nic_probe(struct pci_dev *pdev, 62static int __devinit netxen_nic_probe(struct pci_dev *pdev,
59 const struct pci_device_id *ent); 63 const struct pci_device_id *ent);
60static void __devexit netxen_nic_remove(struct pci_dev *pdev); 64static void __devexit netxen_nic_remove(struct pci_dev *pdev);
@@ -91,6 +95,11 @@ static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
91#define ENTRY(device) \ 95#define ENTRY(device) \
92 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ 96 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
93 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 97 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
98#define ENTRY2(device) \
99 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
100 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
101
102#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
94 103
95static struct pci_device_id netxen_pci_tbl[] __devinitdata = { 104static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
96 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), 105 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
@@ -101,6 +110,7 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
101 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), 110 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
102 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), 111 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
103 ENTRY(PCI_DEVICE_ID_NX3031), 112 ENTRY(PCI_DEVICE_ID_NX3031),
113 ENTRY2(PCI_DEVICE_ID_QLOGIC_QLE824X),
104 {0,} 114 {0,}
105}; 115};
106 116
@@ -724,7 +734,8 @@ netxen_check_options(struct netxen_adapter *adapter)
724 if (adapter->portnum == 0) { 734 if (adapter->portnum == 0) {
725 get_brd_name_by_type(adapter->ahw.board_type, brd_name); 735 get_brd_name_by_type(adapter->ahw.board_type, brd_name);
726 736
727 printk(KERN_INFO "NetXen %s Board S/N %s Chip rev 0x%x\n", 737 pr_info("%s: %s Board S/N %s Chip rev 0x%x\n",
738 module_name(THIS_MODULE),
728 brd_name, serial_num, adapter->ahw.revision_id); 739 brd_name, serial_num, adapter->ahw.revision_id);
729 } 740 }
730 741
@@ -1206,16 +1217,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1206 int pci_func_id = PCI_FUNC(pdev->devfn); 1217 int pci_func_id = PCI_FUNC(pdev->devfn);
1207 uint8_t revision_id; 1218 uint8_t revision_id;
1208 1219
1209 if (pdev->class != 0x020000) {
1210 printk(KERN_DEBUG "NetXen function %d, class %x will not "
1211 "be enabled.\n",pci_func_id, pdev->class);
1212 return -ENODEV;
1213 }
1214
1215 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { 1220 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
1216 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x" 1221 pr_warning("%s: chip revisions between 0x%x-0x%x"
1217 "will not be enabled.\n", 1222 "will not be enabled.\n",
1218 NX_P3_A0, NX_P3_B1); 1223 module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
1219 return -ENODEV; 1224 return -ENODEV;
1220 } 1225 }
1221 1226
@@ -1925,6 +1930,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1925 1930
1926request_reset: 1931request_reset:
1927 adapter->need_fw_reset = 1; 1932 adapter->need_fw_reset = 1;
1933 clear_bit(__NX_RESETTING, &adapter->state);
1928} 1934}
1929 1935
1930struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) 1936struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
@@ -2499,6 +2505,7 @@ static struct bin_attribute bin_attr_mem = {
2499 .write = netxen_sysfs_write_mem, 2505 .write = netxen_sysfs_write_mem,
2500}; 2506};
2501 2507
2508#ifdef CONFIG_MODULES
2502static ssize_t 2509static ssize_t
2503netxen_store_auto_fw_reset(struct module_attribute *mattr, 2510netxen_store_auto_fw_reset(struct module_attribute *mattr,
2504 struct module *mod, const char *buf, size_t count) 2511 struct module *mod, const char *buf, size_t count)
@@ -2533,6 +2540,7 @@ static struct module_attribute mod_attr_fw_reset = {
2533 .show = netxen_show_auto_fw_reset, 2540 .show = netxen_show_auto_fw_reset,
2534 .store = netxen_store_auto_fw_reset, 2541 .store = netxen_store_auto_fw_reset,
2535}; 2542};
2543#endif
2536 2544
2537static void 2545static void
2538netxen_create_sysfs_entries(struct netxen_adapter *adapter) 2546netxen_create_sysfs_entries(struct netxen_adapter *adapter)
@@ -2738,7 +2746,9 @@ static struct pci_driver netxen_driver = {
2738 2746
2739static int __init netxen_init_module(void) 2747static int __init netxen_init_module(void)
2740{ 2748{
2749#ifdef CONFIG_MODULES
2741 struct module *mod = THIS_MODULE; 2750 struct module *mod = THIS_MODULE;
2751#endif
2742 2752
2743 printk(KERN_INFO "%s\n", netxen_nic_driver_string); 2753 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
2744 2754
@@ -2747,9 +2757,11 @@ static int __init netxen_init_module(void)
2747 register_inetaddr_notifier(&netxen_inetaddr_cb); 2757 register_inetaddr_notifier(&netxen_inetaddr_cb);
2748#endif 2758#endif
2749 2759
2760#ifdef CONFIG_MODULES
2750 if (sysfs_create_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr)) 2761 if (sysfs_create_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr))
2751 printk(KERN_ERR "%s: Failed to create auto_fw_reset " 2762 printk(KERN_ERR "%s: Failed to create auto_fw_reset "
2752 "sysfs entry.", netxen_nic_driver_name); 2763 "sysfs entry.", netxen_nic_driver_name);
2764#endif
2753 2765
2754 return pci_register_driver(&netxen_driver); 2766 return pci_register_driver(&netxen_driver);
2755} 2767}
@@ -2758,9 +2770,11 @@ module_init(netxen_init_module);
2758 2770
2759static void __exit netxen_exit_module(void) 2771static void __exit netxen_exit_module(void)
2760{ 2772{
2773#ifdef CONFIG_MODULES
2761 struct module *mod = THIS_MODULE; 2774 struct module *mod = THIS_MODULE;
2762 2775
2763 sysfs_remove_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr); 2776 sysfs_remove_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr);
2777#endif
2764 2778
2765 pci_unregister_driver(&netxen_driver); 2779 pci_unregister_driver(&netxen_driver);
2766 2780
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 1d1e657991d2..5506f870037f 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3545,7 +3545,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3545 rp->rcr_index = index; 3545 rp->rcr_index = index;
3546 3546
3547 skb_reserve(skb, NET_IP_ALIGN); 3547 skb_reserve(skb, NET_IP_ALIGN);
3548 __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); 3548 __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
3549 3549
3550 rp->rx_packets++; 3550 rp->rx_packets++;
3551 rp->rx_bytes += skb->len; 3551 rp->rx_bytes += skb->len;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index bd3447f04902..94c9ad2746bc 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1760,7 +1760,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), 1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), 1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), 1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), 1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), 1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), 1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b),
1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", 1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f81e53222230..f63c96a4ecb4 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/phy.h> 18#include <linux/phy.h>
19#include <linux/brcmphy.h>
19 20
20#define PHY_ID_BCM50610 0x0143bd60 21#define PHY_ID_BCM50610 0x0143bd60
21#define PHY_ID_BCM50610M 0x0143bd70 22#define PHY_ID_BCM50610M 0x0143bd70
@@ -24,6 +25,9 @@
24#define BRCM_PHY_MODEL(phydev) \ 25#define BRCM_PHY_MODEL(phydev) \
25 ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask) 26 ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
26 27
28#define BRCM_PHY_REV(phydev) \
29 ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
30
27 31
28#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ 32#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
29#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ 33#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
@@ -94,22 +98,35 @@
94#define BCM_LED_SRC_OFF 0xe /* Tied high */ 98#define BCM_LED_SRC_OFF 0xe /* Tied high */
95#define BCM_LED_SRC_ON 0xf /* Tied low */ 99#define BCM_LED_SRC_ON 0xf /* Tied low */
96 100
101
97/* 102/*
98 * BCM5482: Shadow registers 103 * BCM5482: Shadow registers
99 * Shadow values go into bits [14:10] of register 0x1c to select a shadow 104 * Shadow values go into bits [14:10] of register 0x1c to select a shadow
100 * register to access. 105 * register to access.
101 */ 106 */
107/* 00101: Spare Control Register 3 */
108#define BCM54XX_SHD_SCR3 0x05
109#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
110#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
111#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
112
113/* 01010: Auto Power-Down */
114#define BCM54XX_SHD_APD 0x0a
115#define BCM54XX_SHD_APD_EN 0x0020
116
102#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ 117#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
103 /* LED3 / ~LINKSPD[2] selector */ 118 /* LED3 / ~LINKSPD[2] selector */
104#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) 119#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
105 /* LED1 / ~LINKSPD[1] selector */ 120 /* LED1 / ~LINKSPD[1] selector */
106#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) 121#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
122#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
107#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ 123#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
108#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ 124#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
109#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ 125#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
110#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ 126#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
111#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ 127#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
112 128
129
113/* 130/*
114 * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) 131 * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
115 */ 132 */
@@ -138,16 +155,6 @@
138#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ 155#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
139#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ 156#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
140 157
141/*
142 * Device flags for PHYs that can be configured for different operating
143 * modes.
144 */
145#define PHY_BCM_FLAGS_VALID 0x80000000
146#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
147#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
148#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
149#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
150
151 158
152/*****************************************************************************/ 159/*****************************************************************************/
153/* Fast Ethernet Transceiver definitions. */ 160/* Fast Ethernet Transceiver definitions. */
@@ -237,53 +244,145 @@ static int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val)
237 return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val); 244 return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
238} 245}
239 246
247/* Needs SMDSP clock enabled via bcm54xx_phydsp_config() */
240static int bcm50610_a0_workaround(struct phy_device *phydev) 248static int bcm50610_a0_workaround(struct phy_device *phydev)
241{ 249{
242 int err; 250 int err;
243 251
244 err = bcm54xx_auxctl_write(phydev,
245 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
246 MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
247 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
248 if (err < 0)
249 return err;
250
251 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
252 MII_BCM54XX_EXP_EXP08_RJCT_2MHZ |
253 MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE);
254 if (err < 0)
255 goto error;
256
257 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0, 252 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0,
258 MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN | 253 MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN |
259 MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF); 254 MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF);
260 if (err < 0) 255 if (err < 0)
261 goto error; 256 return err;
262 257
263 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3, 258 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3,
264 MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ); 259 MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
265 if (err < 0) 260 if (err < 0)
266 goto error; 261 return err;
267 262
268 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, 263 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75,
269 MII_BCM54XX_EXP_EXP75_VDACCTRL); 264 MII_BCM54XX_EXP_EXP75_VDACCTRL);
270 if (err < 0) 265 if (err < 0)
271 goto error; 266 return err;
272 267
273 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96, 268 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96,
274 MII_BCM54XX_EXP_EXP96_MYST); 269 MII_BCM54XX_EXP_EXP96_MYST);
275 if (err < 0) 270 if (err < 0)
276 goto error; 271 return err;
277 272
278 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97, 273 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97,
279 MII_BCM54XX_EXP_EXP97_MYST); 274 MII_BCM54XX_EXP_EXP97_MYST);
280 275
276 return err;
277}
278
279static int bcm54xx_phydsp_config(struct phy_device *phydev)
280{
281 int err, err2;
282
283 /* Enable the SMDSP clock */
284 err = bcm54xx_auxctl_write(phydev,
285 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
286 MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
287 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
288 if (err < 0)
289 return err;
290
291 if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
292 BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) {
293 /* Clear bit 9 to fix a phy interop issue. */
294 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
295 MII_BCM54XX_EXP_EXP08_RJCT_2MHZ);
296 if (err < 0)
297 goto error;
298
299 if (phydev->drv->phy_id == PHY_ID_BCM50610) {
300 err = bcm50610_a0_workaround(phydev);
301 if (err < 0)
302 goto error;
303 }
304 }
305
306 if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
307 int val;
308
309 val = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
310 if (val < 0)
311 goto error;
312
313 val |= MII_BCM54XX_EXP_EXP75_CM_OSC;
314 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, val);
315 }
316
281error: 317error:
282 bcm54xx_auxctl_write(phydev, 318 /* Disable the SMDSP clock */
283 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL, 319 err2 = bcm54xx_auxctl_write(phydev,
284 MII_BCM54XX_AUXCTL_ACTL_TX_6DB); 320 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
321 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
285 322
286 return err; 323 /* Return the first error reported. */
324 return err ? err : err2;
325}
326
327static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
328{
329 u32 val, orig;
330 bool clk125en = true;
331
332 /* Abort if we are using an untested phy. */
333 if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 ||
334 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 ||
335 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
336 return;
337
338 val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3);
339 if (val < 0)
340 return;
341
342 orig = val;
343
344 if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
345 BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
346 BRCM_PHY_REV(phydev) >= 0x3) {
347 /*
348 * Here, bit 0 _disables_ CLK125 when set.
349 * This bit is set by default.
350 */
351 clk125en = false;
352 } else {
353 if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) {
354 /* Here, bit 0 _enables_ CLK125 when set */
355 val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
356 clk125en = false;
357 }
358 }
359
360 if (clk125en == false ||
361 (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
362 val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS;
363 else
364 val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
365
366 if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY)
367 val |= BCM54XX_SHD_SCR3_TRDDAPD;
368
369 if (orig != val)
370 bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val);
371
372 val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD);
373 if (val < 0)
374 return;
375
376 orig = val;
377
378 if (clk125en == false ||
379 (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
380 val |= BCM54XX_SHD_APD_EN;
381 else
382 val &= ~BCM54XX_SHD_APD_EN;
383
384 if (orig != val)
385 bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val);
287} 386}
288 387
289static int bcm54xx_config_init(struct phy_device *phydev) 388static int bcm54xx_config_init(struct phy_device *phydev)
@@ -308,38 +407,17 @@ static int bcm54xx_config_init(struct phy_device *phydev)
308 if (err < 0) 407 if (err < 0)
309 return err; 408 return err;
310 409
311 if (phydev->drv->phy_id == PHY_ID_BCM50610) { 410 if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
312 err = bcm50610_a0_workaround(phydev); 411 BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
313 if (err < 0) 412 (phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
314 return err; 413 bcm54xx_shadow_write(phydev, BCM54XX_SHD_RGMII_MODE, 0);
315 }
316
317 if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
318 int err2;
319
320 err = bcm54xx_auxctl_write(phydev,
321 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
322 MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
323 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
324 if (err < 0)
325 return err;
326
327 reg = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
328 if (reg < 0)
329 goto error;
330 414
331 reg |= MII_BCM54XX_EXP_EXP75_CM_OSC; 415 if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) ||
332 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, reg); 416 (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) ||
417 (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
418 bcm54xx_adjust_rxrefclk(phydev);
333 419
334error: 420 bcm54xx_phydsp_config(phydev);
335 err2 = bcm54xx_auxctl_write(phydev,
336 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
337 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
338 if (err)
339 return err;
340 if (err2)
341 return err2;
342 }
343 421
344 return 0; 422 return 0;
345} 423}
@@ -564,9 +642,11 @@ static int brcm_fet_config_init(struct phy_device *phydev)
564 if (err < 0) 642 if (err < 0)
565 goto done; 643 goto done;
566 644
567 /* Enable auto power down */ 645 if (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE) {
568 err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2, 646 /* Enable auto power down */
569 MII_BRCM_FET_SHDW_AS2_APDE); 647 err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
648 MII_BRCM_FET_SHDW_AS2_APDE);
649 }
570 650
571done: 651done:
572 /* Disable shadow register access */ 652 /* Disable shadow register access */
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 250e10f2c35b..8659d341e769 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -238,6 +238,7 @@ static struct of_device_id mdio_ofgpio_match[] = {
238 }, 238 },
239 {}, 239 {},
240}; 240};
241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
241 242
242static struct of_platform_driver mdio_ofgpio_driver = { 243static struct of_platform_driver mdio_ofgpio_driver = {
243 .name = "mdio-gpio", 244 .name = "mdio-gpio",
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 7cbf6f9b51de..60c8d233209f 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -111,9 +111,6 @@ struct pppoe_net {
111 rwlock_t hash_lock; 111 rwlock_t hash_lock;
112}; 112};
113 113
114/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
115static DEFINE_SPINLOCK(flush_lock);
116
117/* 114/*
118 * PPPoE could be in the following stages: 115 * PPPoE could be in the following stages:
119 * 1) Discovery stage (to obtain remote MAC and Session ID) 116 * 1) Discovery stage (to obtain remote MAC and Session ID)
@@ -253,20 +250,19 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
253{ 250{
254 struct net_device *dev; 251 struct net_device *dev;
255 struct pppoe_net *pn; 252 struct pppoe_net *pn;
256 struct pppox_sock *pppox_sock; 253 struct pppox_sock *pppox_sock = NULL;
257 254
258 int ifindex; 255 int ifindex;
259 256
260 dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev); 257 rcu_read_lock();
261 if (!dev) 258 dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
262 return NULL; 259 if (dev) {
263 260 ifindex = dev->ifindex;
264 ifindex = dev->ifindex; 261 pn = net_generic(net, pppoe_net_id);
265 pn = net_generic(net, pppoe_net_id); 262 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
266 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
267 sp->sa_addr.pppoe.remote, ifindex); 263 sp->sa_addr.pppoe.remote, ifindex);
268 dev_put(dev); 264 }
269 265 rcu_read_unlock();
270 return pppox_sock; 266 return pppox_sock;
271} 267}
272 268
@@ -303,45 +299,48 @@ static void pppoe_flush_dev(struct net_device *dev)
303 write_lock_bh(&pn->hash_lock); 299 write_lock_bh(&pn->hash_lock);
304 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 300 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
305 struct pppox_sock *po = pn->hash_table[i]; 301 struct pppox_sock *po = pn->hash_table[i];
302 struct sock *sk;
306 303
307 while (po != NULL) { 304 while (po) {
308 struct sock *sk; 305 while (po && po->pppoe_dev != dev) {
309 if (po->pppoe_dev != dev) {
310 po = po->next; 306 po = po->next;
311 continue;
312 } 307 }
308
309 if (!po)
310 break;
311
313 sk = sk_pppox(po); 312 sk = sk_pppox(po);
314 spin_lock(&flush_lock);
315 po->pppoe_dev = NULL;
316 spin_unlock(&flush_lock);
317 dev_put(dev);
318 313
319 /* We always grab the socket lock, followed by the 314 /* We always grab the socket lock, followed by the
320 * hash_lock, in that order. Since we should 315 * hash_lock, in that order. Since we should hold the
321 * hold the sock lock while doing any unbinding, 316 * sock lock while doing any unbinding, we need to
322 * we need to release the lock we're holding. 317 * release the lock we're holding. Hold a reference to
323 * Hold a reference to the sock so it doesn't disappear 318 * the sock so it doesn't disappear as we're jumping
324 * as we're jumping between locks. 319 * between locks.
325 */ 320 */
326 321
327 sock_hold(sk); 322 sock_hold(sk);
328
329 write_unlock_bh(&pn->hash_lock); 323 write_unlock_bh(&pn->hash_lock);
330 lock_sock(sk); 324 lock_sock(sk);
331 325
332 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 326 if (po->pppoe_dev == dev
327 && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
333 pppox_unbind_sock(sk); 328 pppox_unbind_sock(sk);
334 sk->sk_state = PPPOX_ZOMBIE; 329 sk->sk_state = PPPOX_ZOMBIE;
335 sk->sk_state_change(sk); 330 sk->sk_state_change(sk);
331 po->pppoe_dev = NULL;
332 dev_put(dev);
336 } 333 }
337 334
338 release_sock(sk); 335 release_sock(sk);
339 sock_put(sk); 336 sock_put(sk);
340 337
341 /* Restart scan at the beginning of this hash chain. 338 /* Restart the process from the start of the current
342 * While the lock was dropped the chain contents may 339 * hash chain. We dropped locks so the world may have
343 * have changed. 340 * change from underneath us.
344 */ 341 */
342
343 BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
345 write_lock_bh(&pn->hash_lock); 344 write_lock_bh(&pn->hash_lock);
346 po = pn->hash_table[i]; 345 po = pn->hash_table[i];
347 } 346 }
@@ -388,11 +387,16 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
388 struct pppox_sock *po = pppox_sk(sk); 387 struct pppox_sock *po = pppox_sk(sk);
389 struct pppox_sock *relay_po; 388 struct pppox_sock *relay_po;
390 389
390 /* Backlog receive. Semantics of backlog rcv preclude any code from
391 * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
392 * can't change.
393 */
394
391 if (sk->sk_state & PPPOX_BOUND) { 395 if (sk->sk_state & PPPOX_BOUND) {
392 ppp_input(&po->chan, skb); 396 ppp_input(&po->chan, skb);
393 } else if (sk->sk_state & PPPOX_RELAY) { 397 } else if (sk->sk_state & PPPOX_RELAY) {
394 relay_po = get_item_by_addr(dev_net(po->pppoe_dev), 398 relay_po = get_item_by_addr(sock_net(sk),
395 &po->pppoe_relay); 399 &po->pppoe_relay);
396 if (relay_po == NULL) 400 if (relay_po == NULL)
397 goto abort_kfree; 401 goto abort_kfree;
398 402
@@ -447,6 +451,10 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
447 goto drop; 451 goto drop;
448 452
449 pn = pppoe_pernet(dev_net(dev)); 453 pn = pppoe_pernet(dev_net(dev));
454
455 /* Note that get_item does a sock_hold(), so sk_pppox(po)
456 * is known to be safe.
457 */
450 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 458 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
451 if (!po) 459 if (!po)
452 goto drop; 460 goto drop;
@@ -561,6 +569,7 @@ static int pppoe_release(struct socket *sock)
561 struct sock *sk = sock->sk; 569 struct sock *sk = sock->sk;
562 struct pppox_sock *po; 570 struct pppox_sock *po;
563 struct pppoe_net *pn; 571 struct pppoe_net *pn;
572 struct net *net = NULL;
564 573
565 if (!sk) 574 if (!sk)
566 return 0; 575 return 0;
@@ -571,44 +580,28 @@ static int pppoe_release(struct socket *sock)
571 return -EBADF; 580 return -EBADF;
572 } 581 }
573 582
583 po = pppox_sk(sk);
584
585 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
586 dev_put(po->pppoe_dev);
587 po->pppoe_dev = NULL;
588 }
589
574 pppox_unbind_sock(sk); 590 pppox_unbind_sock(sk);
575 591
576 /* Signal the death of the socket. */ 592 /* Signal the death of the socket. */
577 sk->sk_state = PPPOX_DEAD; 593 sk->sk_state = PPPOX_DEAD;
578 594
579 /* 595 net = sock_net(sk);
580 * pppoe_flush_dev could lead to a race with 596 pn = pppoe_pernet(net);
581 * this routine so we use flush_lock to eliminate
582 * such a case (we only need per-net specific data)
583 */
584 spin_lock(&flush_lock);
585 po = pppox_sk(sk);
586 if (!po->pppoe_dev) {
587 spin_unlock(&flush_lock);
588 goto out;
589 }
590 pn = pppoe_pernet(dev_net(po->pppoe_dev));
591 spin_unlock(&flush_lock);
592 597
593 /* 598 /*
594 * protect "po" from concurrent updates 599 * protect "po" from concurrent updates
595 * on pppoe_flush_dev 600 * on pppoe_flush_dev
596 */ 601 */
597 write_lock_bh(&pn->hash_lock); 602 delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
603 po->pppoe_ifindex);
598 604
599 po = pppox_sk(sk);
600 if (stage_session(po->pppoe_pa.sid))
601 __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
602 po->pppoe_ifindex);
603
604 if (po->pppoe_dev) {
605 dev_put(po->pppoe_dev);
606 po->pppoe_dev = NULL;
607 }
608
609 write_unlock_bh(&pn->hash_lock);
610
611out:
612 sock_orphan(sk); 605 sock_orphan(sk);
613 sock->sk = NULL; 606 sock->sk = NULL;
614 607
@@ -625,8 +618,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
625 struct sock *sk = sock->sk; 618 struct sock *sk = sock->sk;
626 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; 619 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
627 struct pppox_sock *po = pppox_sk(sk); 620 struct pppox_sock *po = pppox_sk(sk);
628 struct net_device *dev; 621 struct net_device *dev = NULL;
629 struct pppoe_net *pn; 622 struct pppoe_net *pn;
623 struct net *net = NULL;
630 int error; 624 int error;
631 625
632 lock_sock(sk); 626 lock_sock(sk);
@@ -652,12 +646,14 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
652 /* Delete the old binding */ 646 /* Delete the old binding */
653 if (stage_session(po->pppoe_pa.sid)) { 647 if (stage_session(po->pppoe_pa.sid)) {
654 pppox_unbind_sock(sk); 648 pppox_unbind_sock(sk);
649 pn = pppoe_pernet(sock_net(sk));
650 delete_item(pn, po->pppoe_pa.sid,
651 po->pppoe_pa.remote, po->pppoe_ifindex);
655 if (po->pppoe_dev) { 652 if (po->pppoe_dev) {
656 pn = pppoe_pernet(dev_net(po->pppoe_dev));
657 delete_item(pn, po->pppoe_pa.sid,
658 po->pppoe_pa.remote, po->pppoe_ifindex);
659 dev_put(po->pppoe_dev); 653 dev_put(po->pppoe_dev);
654 po->pppoe_dev = NULL;
660 } 655 }
656
661 memset(sk_pppox(po) + 1, 0, 657 memset(sk_pppox(po) + 1, 0,
662 sizeof(struct pppox_sock) - sizeof(struct sock)); 658 sizeof(struct pppox_sock) - sizeof(struct sock));
663 sk->sk_state = PPPOX_NONE; 659 sk->sk_state = PPPOX_NONE;
@@ -666,16 +662,15 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
666 /* Re-bind in session stage only */ 662 /* Re-bind in session stage only */
667 if (stage_session(sp->sa_addr.pppoe.sid)) { 663 if (stage_session(sp->sa_addr.pppoe.sid)) {
668 error = -ENODEV; 664 error = -ENODEV;
669 dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev); 665 net = sock_net(sk);
666 dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
670 if (!dev) 667 if (!dev)
671 goto end; 668 goto err_put;
672 669
673 po->pppoe_dev = dev; 670 po->pppoe_dev = dev;
674 po->pppoe_ifindex = dev->ifindex; 671 po->pppoe_ifindex = dev->ifindex;
675 pn = pppoe_pernet(dev_net(dev)); 672 pn = pppoe_pernet(net);
676 write_lock_bh(&pn->hash_lock);
677 if (!(dev->flags & IFF_UP)) { 673 if (!(dev->flags & IFF_UP)) {
678 write_unlock_bh(&pn->hash_lock);
679 goto err_put; 674 goto err_put;
680 } 675 }
681 676
@@ -683,6 +678,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
683 &sp->sa_addr.pppoe, 678 &sp->sa_addr.pppoe,
684 sizeof(struct pppoe_addr)); 679 sizeof(struct pppoe_addr));
685 680
681 write_lock_bh(&pn->hash_lock);
686 error = __set_item(pn, po); 682 error = __set_item(pn, po);
687 write_unlock_bh(&pn->hash_lock); 683 write_unlock_bh(&pn->hash_lock);
688 if (error < 0) 684 if (error < 0)
@@ -696,8 +692,11 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
696 po->chan.ops = &pppoe_chan_ops; 692 po->chan.ops = &pppoe_chan_ops;
697 693
698 error = ppp_register_net_channel(dev_net(dev), &po->chan); 694 error = ppp_register_net_channel(dev_net(dev), &po->chan);
699 if (error) 695 if (error) {
696 delete_item(pn, po->pppoe_pa.sid,
697 po->pppoe_pa.remote, po->pppoe_ifindex);
700 goto err_put; 698 goto err_put;
699 }
701 700
702 sk->sk_state = PPPOX_CONNECTED; 701 sk->sk_state = PPPOX_CONNECTED;
703 } 702 }
@@ -915,6 +914,14 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
915 struct pppoe_hdr *ph; 914 struct pppoe_hdr *ph;
916 int data_len = skb->len; 915 int data_len = skb->len;
917 916
917 /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
918 * xmit operations conclude prior to an unregistration call. Thus
919 * sk->sk_state cannot change, so we don't need to do lock_sock().
920 * But, we also can't do a lock_sock since that introduces a potential
921 * deadlock as we'd reverse the lock ordering used when calling
922 * ppp_unregister_channel().
923 */
924
918 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 925 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
919 goto abort; 926 goto abort;
920 927
@@ -944,7 +951,6 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
944 po->pppoe_pa.remote, NULL, data_len); 951 po->pppoe_pa.remote, NULL, data_len);
945 952
946 dev_queue_xmit(skb); 953 dev_queue_xmit(skb);
947
948 return 1; 954 return 1;
949 955
950abort: 956abort:
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index c14ee24c05a8..ac806b27c658 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -104,7 +104,8 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
104 104
105EXPORT_SYMBOL(pppox_ioctl); 105EXPORT_SYMBOL(pppox_ioctl);
106 106
107static int pppox_create(struct net *net, struct socket *sock, int protocol) 107static int pppox_create(struct net *net, struct socket *sock, int protocol,
108 int kern)
108{ 109{
109 int rc = -EPROTOTYPE; 110 int rc = -EPROTOTYPE;
110 111
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 421471790601..1f59f054452d 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -56,7 +56,8 @@
56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) 56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
57#define SMALL_BUFFER_SIZE 512 57#define SMALL_BUFFER_SIZE 512
58#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) 58#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
59#define LARGE_BUFFER_SIZE PAGE_SIZE 59#define LARGE_BUFFER_MAX_SIZE 8192
60#define LARGE_BUFFER_MIN_SIZE 2048
60#define MAX_SPLIT_SIZE 1023 61#define MAX_SPLIT_SIZE 1023
61#define QLGE_SB_PAD 32 62#define QLGE_SB_PAD 32
62 63
@@ -96,6 +97,7 @@ enum {
96 97
97 /* Misc. stuff */ 98 /* Misc. stuff */
98 MAILBOX_COUNT = 16, 99 MAILBOX_COUNT = 16,
100 MAILBOX_TIMEOUT = 5,
99 101
100 PROC_ADDR_RDY = (1 << 31), 102 PROC_ADDR_RDY = (1 << 31),
101 PROC_ADDR_R = (1 << 30), 103 PROC_ADDR_R = (1 << 30),
@@ -795,6 +797,7 @@ enum {
795 MB_WOL_BCAST = (1 << 5), 797 MB_WOL_BCAST = (1 << 5),
796 MB_WOL_LINK_UP = (1 << 6), 798 MB_WOL_LINK_UP = (1 << 6),
797 MB_WOL_LINK_DOWN = (1 << 7), 799 MB_WOL_LINK_DOWN = (1 << 7),
800 MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
798 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ 801 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
799 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ 802 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
800 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ 803 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
@@ -804,12 +807,27 @@ enum {
804 MB_CMD_SET_PORT_CFG = 0x00000122, 807 MB_CMD_SET_PORT_CFG = 0x00000122,
805 MB_CMD_GET_PORT_CFG = 0x00000123, 808 MB_CMD_GET_PORT_CFG = 0x00000123,
806 MB_CMD_GET_LINK_STS = 0x00000124, 809 MB_CMD_GET_LINK_STS = 0x00000124,
810 MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
811 QL_LED_BLINK = 0x03e803e8,
812 MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
807 MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ 813 MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
808 MB_SET_MPI_TFK_STOP = (1 << 0), 814 MB_SET_MPI_TFK_STOP = (1 << 0),
809 MB_SET_MPI_TFK_RESUME = (1 << 1), 815 MB_SET_MPI_TFK_RESUME = (1 << 1),
810 MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ 816 MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
811 MB_GET_MPI_TFK_STOPPED = (1 << 0), 817 MB_GET_MPI_TFK_STOPPED = (1 << 0),
812 MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), 818 MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
819 /* Sub-commands for IDC request.
820 * This describes the reason for the
821 * IDC request.
822 */
823 MB_CMD_IOP_NONE = 0x0000,
824 MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
825 MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
826 MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
827 MB_CMD_IOP_DVR_START = 0x0100,
828 MB_CMD_IOP_FLASH_ACC = 0x0101,
829 MB_CMD_IOP_RESTART_MPI = 0x0102,
830 MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
813 831
814 /* Mailbox Command Status. */ 832 /* Mailbox Command Status. */
815 MB_CMD_STS_GOOD = 0x00004000, /* Success. */ 833 MB_CMD_STS_GOOD = 0x00004000, /* Success. */
@@ -1201,9 +1219,17 @@ struct tx_ring_desc {
1201 struct tx_ring_desc *next; 1219 struct tx_ring_desc *next;
1202}; 1220};
1203 1221
1222struct page_chunk {
1223 struct page *page; /* master page */
1224 char *va; /* virt addr for this chunk */
1225 u64 map; /* mapping for master */
1226 unsigned int offset; /* offset for this chunk */
1227 unsigned int last_flag; /* flag set for last chunk in page */
1228};
1229
1204struct bq_desc { 1230struct bq_desc {
1205 union { 1231 union {
1206 struct page *lbq_page; 1232 struct page_chunk pg_chunk;
1207 struct sk_buff *skb; 1233 struct sk_buff *skb;
1208 } p; 1234 } p;
1209 __le64 *addr; 1235 __le64 *addr;
@@ -1237,6 +1263,9 @@ struct tx_ring {
1237 atomic_t queue_stopped; /* Turns queue off when full. */ 1263 atomic_t queue_stopped; /* Turns queue off when full. */
1238 struct delayed_work tx_work; 1264 struct delayed_work tx_work;
1239 struct ql_adapter *qdev; 1265 struct ql_adapter *qdev;
1266 u64 tx_packets;
1267 u64 tx_bytes;
1268 u64 tx_errors;
1240}; 1269};
1241 1270
1242/* 1271/*
@@ -1272,6 +1301,7 @@ struct rx_ring {
1272 dma_addr_t lbq_base_dma; 1301 dma_addr_t lbq_base_dma;
1273 void *lbq_base_indirect; 1302 void *lbq_base_indirect;
1274 dma_addr_t lbq_base_indirect_dma; 1303 dma_addr_t lbq_base_indirect_dma;
1304 struct page_chunk pg_chunk; /* current page for chunks */
1275 struct bq_desc *lbq; /* array of control blocks */ 1305 struct bq_desc *lbq; /* array of control blocks */
1276 void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ 1306 void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
1277 u32 lbq_prod_idx; /* current sw prod idx */ 1307 u32 lbq_prod_idx; /* current sw prod idx */
@@ -1302,6 +1332,11 @@ struct rx_ring {
1302 struct napi_struct napi; 1332 struct napi_struct napi;
1303 u8 reserved; 1333 u8 reserved;
1304 struct ql_adapter *qdev; 1334 struct ql_adapter *qdev;
1335 u64 rx_packets;
1336 u64 rx_multicast;
1337 u64 rx_bytes;
1338 u64 rx_dropped;
1339 u64 rx_errors;
1305}; 1340};
1306 1341
1307/* 1342/*
@@ -1386,6 +1421,153 @@ struct nic_stats {
1386 u64 rx_nic_fifo_drop; 1421 u64 rx_nic_fifo_drop;
1387}; 1422};
1388 1423
1424/* Address/Length pairs for the coredump. */
1425enum {
1426 MPI_CORE_REGS_ADDR = 0x00030000,
1427 MPI_CORE_REGS_CNT = 127,
1428 MPI_CORE_SH_REGS_CNT = 16,
1429 TEST_REGS_ADDR = 0x00001000,
1430 TEST_REGS_CNT = 23,
1431 RMII_REGS_ADDR = 0x00001040,
1432 RMII_REGS_CNT = 64,
1433 FCMAC1_REGS_ADDR = 0x00001080,
1434 FCMAC2_REGS_ADDR = 0x000010c0,
1435 FCMAC_REGS_CNT = 64,
1436 FC1_MBX_REGS_ADDR = 0x00001100,
1437 FC2_MBX_REGS_ADDR = 0x00001240,
1438 FC_MBX_REGS_CNT = 64,
1439 IDE_REGS_ADDR = 0x00001140,
1440 IDE_REGS_CNT = 64,
1441 NIC1_MBX_REGS_ADDR = 0x00001180,
1442 NIC2_MBX_REGS_ADDR = 0x00001280,
1443 NIC_MBX_REGS_CNT = 64,
1444 SMBUS_REGS_ADDR = 0x00001200,
1445 SMBUS_REGS_CNT = 64,
1446 I2C_REGS_ADDR = 0x00001fc0,
1447 I2C_REGS_CNT = 64,
1448 MEMC_REGS_ADDR = 0x00003000,
1449 MEMC_REGS_CNT = 256,
1450 PBUS_REGS_ADDR = 0x00007c00,
1451 PBUS_REGS_CNT = 256,
1452 MDE_REGS_ADDR = 0x00010000,
1453 MDE_REGS_CNT = 6,
1454 CODE_RAM_ADDR = 0x00020000,
1455 CODE_RAM_CNT = 0x2000,
1456 MEMC_RAM_ADDR = 0x00100000,
1457 MEMC_RAM_CNT = 0x2000,
1458};
1459
1460#define MPI_COREDUMP_COOKIE 0x5555aaaa
1461struct mpi_coredump_global_header {
1462 u32 cookie;
1463 u8 idString[16];
1464 u32 timeLo;
1465 u32 timeHi;
1466 u32 imageSize;
1467 u32 headerSize;
1468 u8 info[220];
1469};
1470
1471struct mpi_coredump_segment_header {
1472 u32 cookie;
1473 u32 segNum;
1474 u32 segSize;
1475 u32 extra;
1476 u8 description[16];
1477};
1478
1479/* Reg dump segment numbers. */
1480enum {
1481 CORE_SEG_NUM = 1,
1482 TEST_LOGIC_SEG_NUM = 2,
1483 RMII_SEG_NUM = 3,
1484 FCMAC1_SEG_NUM = 4,
1485 FCMAC2_SEG_NUM = 5,
1486 FC1_MBOX_SEG_NUM = 6,
1487 IDE_SEG_NUM = 7,
1488 NIC1_MBOX_SEG_NUM = 8,
1489 SMBUS_SEG_NUM = 9,
1490 FC2_MBOX_SEG_NUM = 10,
1491 NIC2_MBOX_SEG_NUM = 11,
1492 I2C_SEG_NUM = 12,
1493 MEMC_SEG_NUM = 13,
1494 PBUS_SEG_NUM = 14,
1495 MDE_SEG_NUM = 15,
1496 NIC1_CONTROL_SEG_NUM = 16,
1497 NIC2_CONTROL_SEG_NUM = 17,
1498 NIC1_XGMAC_SEG_NUM = 18,
1499 NIC2_XGMAC_SEG_NUM = 19,
1500 WCS_RAM_SEG_NUM = 20,
1501 MEMC_RAM_SEG_NUM = 21,
1502 XAUI_AN_SEG_NUM = 22,
1503 XAUI_HSS_PCS_SEG_NUM = 23,
1504 XFI_AN_SEG_NUM = 24,
1505 XFI_TRAIN_SEG_NUM = 25,
1506 XFI_HSS_PCS_SEG_NUM = 26,
1507 XFI_HSS_TX_SEG_NUM = 27,
1508 XFI_HSS_RX_SEG_NUM = 28,
1509 XFI_HSS_PLL_SEG_NUM = 29,
1510 MISC_NIC_INFO_SEG_NUM = 30,
1511 INTR_STATES_SEG_NUM = 31,
1512 CAM_ENTRIES_SEG_NUM = 32,
1513 ROUTING_WORDS_SEG_NUM = 33,
1514 ETS_SEG_NUM = 34,
1515 PROBE_DUMP_SEG_NUM = 35,
1516 ROUTING_INDEX_SEG_NUM = 36,
1517 MAC_PROTOCOL_SEG_NUM = 37,
1518 XAUI2_AN_SEG_NUM = 38,
1519 XAUI2_HSS_PCS_SEG_NUM = 39,
1520 XFI2_AN_SEG_NUM = 40,
1521 XFI2_TRAIN_SEG_NUM = 41,
1522 XFI2_HSS_PCS_SEG_NUM = 42,
1523 XFI2_HSS_TX_SEG_NUM = 43,
1524 XFI2_HSS_RX_SEG_NUM = 44,
1525 XFI2_HSS_PLL_SEG_NUM = 45,
1526 SEM_REGS_SEG_NUM = 50
1527
1528};
1529
1530struct ql_nic_misc {
1531 u32 rx_ring_count;
1532 u32 tx_ring_count;
1533 u32 intr_count;
1534 u32 function;
1535};
1536
1537struct ql_reg_dump {
1538
1539 /* segment 0 */
1540 struct mpi_coredump_global_header mpi_global_header;
1541
1542 /* segment 16 */
1543 struct mpi_coredump_segment_header nic_regs_seg_hdr;
1544 u32 nic_regs[64];
1545
1546 /* segment 30 */
1547 struct mpi_coredump_segment_header misc_nic_seg_hdr;
1548 struct ql_nic_misc misc_nic_info;
1549
1550 /* segment 31 */
1551 /* one interrupt state for each CQ */
1552 struct mpi_coredump_segment_header intr_states_seg_hdr;
1553 u32 intr_states[MAX_CPUS];
1554
1555 /* segment 32 */
1556 /* 3 cam words each for 16 unicast,
1557 * 2 cam words for each of 32 multicast.
1558 */
1559 struct mpi_coredump_segment_header cam_entries_seg_hdr;
1560 u32 cam_entries[(16 * 3) + (32 * 3)];
1561
1562 /* segment 33 */
1563 struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
1564 u32 nic_routing_words[16];
1565
1566 /* segment 34 */
1567 struct mpi_coredump_segment_header ets_seg_hdr;
1568 u32 ets[8+2];
1569};
1570
1389/* 1571/*
1390 * intr_context structure is used during initialization 1572 * intr_context structure is used during initialization
1391 * to hook the interrupts. It is also used in a single 1573 * to hook the interrupts. It is also used in a single
@@ -1419,6 +1601,8 @@ enum {
1419 QL_ALLMULTI = 6, 1601 QL_ALLMULTI = 6,
1420 QL_PORT_CFG = 7, 1602 QL_PORT_CFG = 7,
1421 QL_CAM_RT_SET = 8, 1603 QL_CAM_RT_SET = 8,
1604 QL_SELFTEST = 9,
1605 QL_LB_LINK_UP = 10,
1422}; 1606};
1423 1607
1424/* link_status bit definitions */ 1608/* link_status bit definitions */
@@ -1526,6 +1710,7 @@ struct ql_adapter {
1526 1710
1527 struct rx_ring rx_ring[MAX_RX_RINGS]; 1711 struct rx_ring rx_ring[MAX_RX_RINGS];
1528 struct tx_ring tx_ring[MAX_TX_RINGS]; 1712 struct tx_ring tx_ring[MAX_TX_RINGS];
1713 unsigned int lbq_buf_order;
1529 1714
1530 int rx_csum; 1715 int rx_csum;
1531 u32 default_rx_queue; 1716 u32 default_rx_queue;
@@ -1540,6 +1725,7 @@ struct ql_adapter {
1540 u32 port_init; 1725 u32 port_init;
1541 u32 link_status; 1726 u32 link_status;
1542 u32 link_config; 1727 u32 link_config;
1728 u32 led_config;
1543 u32 max_frame_size; 1729 u32 max_frame_size;
1544 1730
1545 union flash_params flash; 1731 union flash_params flash;
@@ -1553,6 +1739,7 @@ struct ql_adapter {
1553 struct completion ide_completion; 1739 struct completion ide_completion;
1554 struct nic_operations *nic_ops; 1740 struct nic_operations *nic_ops;
1555 u16 device_id; 1741 u16 device_id;
1742 atomic_t lb_count;
1556}; 1743};
1557 1744
1558/* 1745/*
@@ -1631,10 +1818,22 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev);
1631int ql_cam_route_initialize(struct ql_adapter *qdev); 1818int ql_cam_route_initialize(struct ql_adapter *qdev);
1632int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 1819int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
1633int ql_mb_about_fw(struct ql_adapter *qdev); 1820int ql_mb_about_fw(struct ql_adapter *qdev);
1821int ql_wol(struct ql_adapter *qdev);
1822int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
1823int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
1824int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
1825int ql_mb_get_led_cfg(struct ql_adapter *qdev);
1634void ql_link_on(struct ql_adapter *qdev); 1826void ql_link_on(struct ql_adapter *qdev);
1635void ql_link_off(struct ql_adapter *qdev); 1827void ql_link_off(struct ql_adapter *qdev);
1636int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); 1828int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
1829int ql_mb_get_port_cfg(struct ql_adapter *qdev);
1830int ql_mb_set_port_cfg(struct ql_adapter *qdev);
1637int ql_wait_fifo_empty(struct ql_adapter *qdev); 1831int ql_wait_fifo_empty(struct ql_adapter *qdev);
1832void ql_gen_reg_dump(struct ql_adapter *qdev,
1833 struct ql_reg_dump *mpi_coredump);
1834netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
1835void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
1836int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
1638 1837
1639#if 1 1838#if 1
1640#define QL_ALL_DUMP 1839#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index aa88cb3f41c7..9f58c4710761 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,185 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3
4static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
5{
6 int status = 0;
7 int i;
8
9 for (i = 0; i < 8; i++, buf++) {
10 ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
11 *buf = ql_read32(qdev, NIC_ETS);
12 }
13
14 for (i = 0; i < 2; i++, buf++) {
15 ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
16 *buf = ql_read32(qdev, CNA_ETS);
17 }
18
19 return status;
20}
21
22static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
23{
24 int i;
25
26 for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
27 ql_write32(qdev, INTR_EN,
28 qdev->intr_context[i].intr_read_mask);
29 *buf = ql_read32(qdev, INTR_EN);
30 }
31}
32
33static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
34{
35 int i, status;
36 u32 value[3];
37
38 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
39 if (status)
40 return status;
41
42 for (i = 0; i < 16; i++) {
43 status = ql_get_mac_addr_reg(qdev,
44 MAC_ADDR_TYPE_CAM_MAC, i, value);
45 if (status) {
46 QPRINTK(qdev, DRV, ERR,
47 "Failed read of mac index register.\n");
48 goto err;
49 }
50 *buf++ = value[0]; /* lower MAC address */
51 *buf++ = value[1]; /* upper MAC address */
52 *buf++ = value[2]; /* output */
53 }
54 for (i = 0; i < 32; i++) {
55 status = ql_get_mac_addr_reg(qdev,
56 MAC_ADDR_TYPE_MULTI_MAC, i, value);
57 if (status) {
58 QPRINTK(qdev, DRV, ERR,
59 "Failed read of mac index register.\n");
60 goto err;
61 }
62 *buf++ = value[0]; /* lower Mcast address */
63 *buf++ = value[1]; /* upper Mcast address */
64 }
65err:
66 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
67 return status;
68}
69
70static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
71{
72 int status;
73 u32 value, i;
74
75 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
76 if (status)
77 return status;
78
79 for (i = 0; i < 16; i++) {
80 status = ql_get_routing_reg(qdev, i, &value);
81 if (status) {
82 QPRINTK(qdev, DRV, ERR,
83 "Failed read of routing index register.\n");
84 goto err;
85 } else {
86 *buf++ = value;
87 }
88 }
89err:
90 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
91 return status;
92}
93
94/* Create a coredump segment header */
95static void ql_build_coredump_seg_header(
96 struct mpi_coredump_segment_header *seg_hdr,
97 u32 seg_number, u32 seg_size, u8 *desc)
98{
99 memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
100 seg_hdr->cookie = MPI_COREDUMP_COOKIE;
101 seg_hdr->segNum = seg_number;
102 seg_hdr->segSize = seg_size;
103 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
104}
105
106void ql_gen_reg_dump(struct ql_adapter *qdev,
107 struct ql_reg_dump *mpi_coredump)
108{
109 int i, status;
110
111
112 memset(&(mpi_coredump->mpi_global_header), 0,
113 sizeof(struct mpi_coredump_global_header));
114 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
115 mpi_coredump->mpi_global_header.headerSize =
116 sizeof(struct mpi_coredump_global_header);
117 mpi_coredump->mpi_global_header.imageSize =
118 sizeof(struct ql_reg_dump);
119 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
120 sizeof(mpi_coredump->mpi_global_header.idString));
121
122
123 /* segment 16 */
124 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
125 MISC_NIC_INFO_SEG_NUM,
126 sizeof(struct mpi_coredump_segment_header)
127 + sizeof(mpi_coredump->misc_nic_info),
128 "MISC NIC INFO");
129 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
130 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
131 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
132 mpi_coredump->misc_nic_info.function = qdev->func;
133
134 /* Segment 16, Rev C. Step 18 */
135 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
136 NIC1_CONTROL_SEG_NUM,
137 sizeof(struct mpi_coredump_segment_header)
138 + sizeof(mpi_coredump->nic_regs),
139 "NIC Registers");
140 /* Get generic reg dump */
141 for (i = 0; i < 64; i++)
142 mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
143
144 /* Segment 31 */
145 /* Get indexed register values. */
146 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
147 INTR_STATES_SEG_NUM,
148 sizeof(struct mpi_coredump_segment_header)
149 + sizeof(mpi_coredump->intr_states),
150 "INTR States");
151 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
152
153 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
154 CAM_ENTRIES_SEG_NUM,
155 sizeof(struct mpi_coredump_segment_header)
156 + sizeof(mpi_coredump->cam_entries),
157 "CAM Entries");
158 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
159 if (status)
160 return;
161
162 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
163 ROUTING_WORDS_SEG_NUM,
164 sizeof(struct mpi_coredump_segment_header)
165 + sizeof(mpi_coredump->nic_routing_words),
166 "Routing Words");
167 status = ql_get_routing_entries(qdev,
168 &mpi_coredump->nic_routing_words[0]);
169 if (status)
170 return;
171
172 /* Segment 34 (Rev C. step 23) */
173 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
174 ETS_SEG_NUM,
175 sizeof(struct mpi_coredump_segment_header)
176 + sizeof(mpi_coredump->ets),
177 "ETS Registers");
178 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
179 if (status)
180 return;
181}
182
3#ifdef QL_REG_DUMP 183#ifdef QL_REG_DUMP
4static void ql_dump_intr_states(struct ql_adapter *qdev) 184static void ql_dump_intr_states(struct ql_adapter *qdev)
5{ 185{
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index aac6c6f19a21..058fa0a48c6f 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -36,6 +36,11 @@
36 36
37#include "qlge.h" 37#include "qlge.h"
38 38
39static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
40 "Loopback test (offline)"
41};
42#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
43
39static int ql_update_ring_coalescing(struct ql_adapter *qdev) 44static int ql_update_ring_coalescing(struct ql_adapter *qdev)
40{ 45{
41 int i, status = 0; 46 int i, status = 0;
@@ -251,6 +256,8 @@ static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
251static int ql_get_sset_count(struct net_device *dev, int sset) 256static int ql_get_sset_count(struct net_device *dev, int sset)
252{ 257{
253 switch (sset) { 258 switch (sset) {
259 case ETH_SS_TEST:
260 return QLGE_TEST_LEN;
254 case ETH_SS_STATS: 261 case ETH_SS_STATS:
255 return ARRAY_SIZE(ql_stats_str_arr); 262 return ARRAY_SIZE(ql_stats_str_arr);
256 default: 263 default:
@@ -371,6 +378,181 @@ static void ql_get_drvinfo(struct net_device *ndev,
371 drvinfo->eedump_len = 0; 378 drvinfo->eedump_len = 0;
372} 379}
373 380
381static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
382{
383 struct ql_adapter *qdev = netdev_priv(ndev);
384 /* What we support. */
385 wol->supported = WAKE_MAGIC;
386 /* What we've currently got set. */
387 wol->wolopts = qdev->wol;
388}
389
390static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
391{
392 struct ql_adapter *qdev = netdev_priv(ndev);
393 int status;
394
395 if (wol->wolopts & ~WAKE_MAGIC)
396 return -EINVAL;
397 qdev->wol = wol->wolopts;
398
399 QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
400 qdev->wol, ndev->name);
401 if (!qdev->wol) {
402 u32 wol = 0;
403 status = ql_mb_wol_mode(qdev, wol);
404 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
405 (status == 0) ? "cleared sucessfully" : "clear failed",
406 wol, qdev->ndev->name);
407 }
408
409 return 0;
410}
411
412static int ql_phys_id(struct net_device *ndev, u32 data)
413{
414 struct ql_adapter *qdev = netdev_priv(ndev);
415 u32 led_reg, i;
416 int status;
417
418 /* Save the current LED settings */
419 status = ql_mb_get_led_cfg(qdev);
420 if (status)
421 return status;
422 led_reg = qdev->led_config;
423
424 /* Start blinking the led */
425 if (!data || data > 300)
426 data = 300;
427
428 for (i = 0; i < (data * 10); i++)
429 ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
430
431 /* Restore LED settings */
432 status = ql_mb_set_led_cfg(qdev, led_reg);
433 if (status)
434 return status;
435
436 return 0;
437}
438
439static int ql_start_loopback(struct ql_adapter *qdev)
440{
441 if (netif_carrier_ok(qdev->ndev)) {
442 set_bit(QL_LB_LINK_UP, &qdev->flags);
443 netif_carrier_off(qdev->ndev);
444 } else
445 clear_bit(QL_LB_LINK_UP, &qdev->flags);
446 qdev->link_config |= CFG_LOOPBACK_PCS;
447 return ql_mb_set_port_cfg(qdev);
448}
449
450static void ql_stop_loopback(struct ql_adapter *qdev)
451{
452 qdev->link_config &= ~CFG_LOOPBACK_PCS;
453 ql_mb_set_port_cfg(qdev);
454 if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
455 netif_carrier_on(qdev->ndev);
456 clear_bit(QL_LB_LINK_UP, &qdev->flags);
457 }
458}
459
460static void ql_create_lb_frame(struct sk_buff *skb,
461 unsigned int frame_size)
462{
463 memset(skb->data, 0xFF, frame_size);
464 frame_size &= ~1;
465 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
466 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
467 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
468}
469
470void ql_check_lb_frame(struct ql_adapter *qdev,
471 struct sk_buff *skb)
472{
473 unsigned int frame_size = skb->len;
474
475 if ((*(skb->data + 3) == 0xFF) &&
476 (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
477 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
478 atomic_dec(&qdev->lb_count);
479 return;
480 }
481}
482
483static int ql_run_loopback_test(struct ql_adapter *qdev)
484{
485 int i;
486 netdev_tx_t rc;
487 struct sk_buff *skb;
488 unsigned int size = SMALL_BUF_MAP_SIZE;
489
490 for (i = 0; i < 64; i++) {
491 skb = netdev_alloc_skb(qdev->ndev, size);
492 if (!skb)
493 return -ENOMEM;
494
495 skb->queue_mapping = 0;
496 skb_put(skb, size);
497 ql_create_lb_frame(skb, size);
498 rc = ql_lb_send(skb, qdev->ndev);
499 if (rc != NETDEV_TX_OK)
500 return -EPIPE;
501 atomic_inc(&qdev->lb_count);
502 }
503
504 ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
505 return atomic_read(&qdev->lb_count) ? -EIO : 0;
506}
507
508static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
509{
510 *data = ql_start_loopback(qdev);
511 if (*data)
512 goto out;
513 *data = ql_run_loopback_test(qdev);
514out:
515 ql_stop_loopback(qdev);
516 return *data;
517}
518
519static void ql_self_test(struct net_device *ndev,
520 struct ethtool_test *eth_test, u64 *data)
521{
522 struct ql_adapter *qdev = netdev_priv(ndev);
523
524 if (netif_running(ndev)) {
525 set_bit(QL_SELFTEST, &qdev->flags);
526 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
527 /* Offline tests */
528 if (ql_loopback_test(qdev, &data[0]))
529 eth_test->flags |= ETH_TEST_FL_FAILED;
530
531 } else {
532 /* Online tests */
533 data[0] = 0;
534 }
535 clear_bit(QL_SELFTEST, &qdev->flags);
536 } else {
537 QPRINTK(qdev, DRV, ERR,
538 "%s: is down, Loopback test will fail.\n", ndev->name);
539 eth_test->flags |= ETH_TEST_FL_FAILED;
540 }
541}
542
543static int ql_get_regs_len(struct net_device *ndev)
544{
545 return sizeof(struct ql_reg_dump);
546}
547
548static void ql_get_regs(struct net_device *ndev,
549 struct ethtool_regs *regs, void *p)
550{
551 struct ql_adapter *qdev = netdev_priv(ndev);
552
553 ql_gen_reg_dump(qdev, p);
554}
555
374static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 556static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
375{ 557{
376 struct ql_adapter *qdev = netdev_priv(dev); 558 struct ql_adapter *qdev = netdev_priv(dev);
@@ -424,6 +606,37 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
424 return ql_update_ring_coalescing(qdev); 606 return ql_update_ring_coalescing(qdev);
425} 607}
426 608
609static void ql_get_pauseparam(struct net_device *netdev,
610 struct ethtool_pauseparam *pause)
611{
612 struct ql_adapter *qdev = netdev_priv(netdev);
613
614 ql_mb_get_port_cfg(qdev);
615 if (qdev->link_config & CFG_PAUSE_STD) {
616 pause->rx_pause = 1;
617 pause->tx_pause = 1;
618 }
619}
620
621static int ql_set_pauseparam(struct net_device *netdev,
622 struct ethtool_pauseparam *pause)
623{
624 struct ql_adapter *qdev = netdev_priv(netdev);
625 int status = 0;
626
627 if ((pause->rx_pause) && (pause->tx_pause))
628 qdev->link_config |= CFG_PAUSE_STD;
629 else if (!pause->rx_pause && !pause->tx_pause)
630 qdev->link_config &= ~CFG_PAUSE_STD;
631 else
632 return -EINVAL;
633
634 status = ql_mb_set_port_cfg(qdev);
635 if (status)
636 return status;
637 return status;
638}
639
427static u32 ql_get_rx_csum(struct net_device *netdev) 640static u32 ql_get_rx_csum(struct net_device *netdev)
428{ 641{
429 struct ql_adapter *qdev = netdev_priv(netdev); 642 struct ql_adapter *qdev = netdev_priv(netdev);
@@ -465,9 +678,17 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
465const struct ethtool_ops qlge_ethtool_ops = { 678const struct ethtool_ops qlge_ethtool_ops = {
466 .get_settings = ql_get_settings, 679 .get_settings = ql_get_settings,
467 .get_drvinfo = ql_get_drvinfo, 680 .get_drvinfo = ql_get_drvinfo,
681 .get_wol = ql_get_wol,
682 .set_wol = ql_set_wol,
683 .get_regs_len = ql_get_regs_len,
684 .get_regs = ql_get_regs,
468 .get_msglevel = ql_get_msglevel, 685 .get_msglevel = ql_get_msglevel,
469 .set_msglevel = ql_set_msglevel, 686 .set_msglevel = ql_set_msglevel,
470 .get_link = ethtool_op_get_link, 687 .get_link = ethtool_op_get_link,
688 .phys_id = ql_phys_id,
689 .self_test = ql_self_test,
690 .get_pauseparam = ql_get_pauseparam,
691 .set_pauseparam = ql_set_pauseparam,
471 .get_rx_csum = ql_get_rx_csum, 692 .get_rx_csum = ql_get_rx_csum,
472 .set_rx_csum = ql_set_rx_csum, 693 .set_rx_csum = ql_set_rx_csum,
473 .get_tx_csum = ethtool_op_get_tx_csum, 694 .get_tx_csum = ethtool_op_get_tx_csum,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 817613919b51..0de596ad8a7e 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1025,6 +1025,11 @@ end:
1025 return status; 1025 return status;
1026} 1026}
1027 1027
1028static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1029{
1030 return PAGE_SIZE << qdev->lbq_buf_order;
1031}
1032
1028/* Get the next large buffer. */ 1033/* Get the next large buffer. */
1029static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) 1034static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1030{ 1035{
@@ -1036,6 +1041,28 @@ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1036 return lbq_desc; 1041 return lbq_desc;
1037} 1042}
1038 1043
1044static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1045 struct rx_ring *rx_ring)
1046{
1047 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1048
1049 pci_dma_sync_single_for_cpu(qdev->pdev,
1050 pci_unmap_addr(lbq_desc, mapaddr),
1051 rx_ring->lbq_buf_size,
1052 PCI_DMA_FROMDEVICE);
1053
1054 /* If it's the last chunk of our master page then
1055 * we unmap it.
1056 */
1057 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1058 == ql_lbq_block_size(qdev))
1059 pci_unmap_page(qdev->pdev,
1060 lbq_desc->p.pg_chunk.map,
1061 ql_lbq_block_size(qdev),
1062 PCI_DMA_FROMDEVICE);
1063 return lbq_desc;
1064}
1065
1039/* Get the next small buffer. */ 1066/* Get the next small buffer. */
1040static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) 1067static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1041{ 1068{
@@ -1063,6 +1090,53 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
1063 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); 1090 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1064} 1091}
1065 1092
1093static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1094 struct bq_desc *lbq_desc)
1095{
1096 if (!rx_ring->pg_chunk.page) {
1097 u64 map;
1098 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1099 GFP_ATOMIC,
1100 qdev->lbq_buf_order);
1101 if (unlikely(!rx_ring->pg_chunk.page)) {
1102 QPRINTK(qdev, DRV, ERR,
1103 "page allocation failed.\n");
1104 return -ENOMEM;
1105 }
1106 rx_ring->pg_chunk.offset = 0;
1107 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1108 0, ql_lbq_block_size(qdev),
1109 PCI_DMA_FROMDEVICE);
1110 if (pci_dma_mapping_error(qdev->pdev, map)) {
1111 __free_pages(rx_ring->pg_chunk.page,
1112 qdev->lbq_buf_order);
1113 QPRINTK(qdev, DRV, ERR,
1114 "PCI mapping failed.\n");
1115 return -ENOMEM;
1116 }
1117 rx_ring->pg_chunk.map = map;
1118 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 }
1120
1121 /* Copy the current master pg_chunk info
1122 * to the current descriptor.
1123 */
1124 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1125
1126 /* Adjust the master page chunk for next
1127 * buffer get.
1128 */
1129 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1130 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1131 rx_ring->pg_chunk.page = NULL;
1132 lbq_desc->p.pg_chunk.last_flag = 1;
1133 } else {
1134 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1135 get_page(rx_ring->pg_chunk.page);
1136 lbq_desc->p.pg_chunk.last_flag = 0;
1137 }
1138 return 0;
1139}
1066/* Process (refill) a large buffer queue. */ 1140/* Process (refill) a large buffer queue. */
1067static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) 1141static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1068{ 1142{
@@ -1072,39 +1146,28 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1072 u64 map; 1146 u64 map;
1073 int i; 1147 int i;
1074 1148
1075 while (rx_ring->lbq_free_cnt > 16) { 1149 while (rx_ring->lbq_free_cnt > 32) {
1076 for (i = 0; i < 16; i++) { 1150 for (i = 0; i < 16; i++) {
1077 QPRINTK(qdev, RX_STATUS, DEBUG, 1151 QPRINTK(qdev, RX_STATUS, DEBUG,
1078 "lbq: try cleaning clean_idx = %d.\n", 1152 "lbq: try cleaning clean_idx = %d.\n",
1079 clean_idx); 1153 clean_idx);
1080 lbq_desc = &rx_ring->lbq[clean_idx]; 1154 lbq_desc = &rx_ring->lbq[clean_idx];
1081 if (lbq_desc->p.lbq_page == NULL) { 1155 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1082 QPRINTK(qdev, RX_STATUS, DEBUG, 1156 QPRINTK(qdev, IFUP, ERR,
1083 "lbq: getting new page for index %d.\n", 1157 "Could not get a page chunk.\n");
1084 lbq_desc->index);
1085 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1086 if (lbq_desc->p.lbq_page == NULL) {
1087 rx_ring->lbq_clean_idx = clean_idx;
1088 QPRINTK(qdev, RX_STATUS, ERR,
1089 "Couldn't get a page.\n");
1090 return;
1091 }
1092 map = pci_map_page(qdev->pdev,
1093 lbq_desc->p.lbq_page,
1094 0, PAGE_SIZE,
1095 PCI_DMA_FROMDEVICE);
1096 if (pci_dma_mapping_error(qdev->pdev, map)) {
1097 rx_ring->lbq_clean_idx = clean_idx;
1098 put_page(lbq_desc->p.lbq_page);
1099 lbq_desc->p.lbq_page = NULL;
1100 QPRINTK(qdev, RX_STATUS, ERR,
1101 "PCI mapping failed.\n");
1102 return; 1158 return;
1103 } 1159 }
1160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
1104 pci_unmap_addr_set(lbq_desc, mapaddr, map); 1163 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1105 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 1164 pci_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
1106 *lbq_desc->addr = cpu_to_le64(map); 1166 *lbq_desc->addr = cpu_to_le64(map);
1107 } 1167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
1108 clean_idx++; 1171 clean_idx++;
1109 if (clean_idx == rx_ring->lbq_len) 1172 if (clean_idx == rx_ring->lbq_len)
1110 clean_idx = 0; 1173 clean_idx = 0;
@@ -1480,27 +1543,24 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1480 * chain it to the header buffer's skb and let 1543 * chain it to the header buffer's skb and let
1481 * it rip. 1544 * it rip.
1482 */ 1545 */
1483 lbq_desc = ql_get_curr_lbuf(rx_ring); 1546 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1484 pci_unmap_page(qdev->pdev,
1485 pci_unmap_addr(lbq_desc,
1486 mapaddr),
1487 pci_unmap_len(lbq_desc, maplen),
1488 PCI_DMA_FROMDEVICE);
1489 QPRINTK(qdev, RX_STATUS, DEBUG, 1547 QPRINTK(qdev, RX_STATUS, DEBUG,
1490 "Chaining page to skb.\n"); 1548 "Chaining page at offset = %d,"
1491 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, 1549 "for %d bytes to skb.\n",
1492 0, length); 1550 lbq_desc->p.pg_chunk.offset, length);
1551 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1552 lbq_desc->p.pg_chunk.offset,
1553 length);
1493 skb->len += length; 1554 skb->len += length;
1494 skb->data_len += length; 1555 skb->data_len += length;
1495 skb->truesize += length; 1556 skb->truesize += length;
1496 lbq_desc->p.lbq_page = NULL;
1497 } else { 1557 } else {
1498 /* 1558 /*
1499 * The headers and data are in a single large buffer. We 1559 * The headers and data are in a single large buffer. We
1500 * copy it to a new skb and let it go. This can happen with 1560 * copy it to a new skb and let it go. This can happen with
1501 * jumbo mtu on a non-TCP/UDP frame. 1561 * jumbo mtu on a non-TCP/UDP frame.
1502 */ 1562 */
1503 lbq_desc = ql_get_curr_lbuf(rx_ring); 1563 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1504 skb = netdev_alloc_skb(qdev->ndev, length); 1564 skb = netdev_alloc_skb(qdev->ndev, length);
1505 if (skb == NULL) { 1565 if (skb == NULL) {
1506 QPRINTK(qdev, PROBE, DEBUG, 1566 QPRINTK(qdev, PROBE, DEBUG,
@@ -1515,13 +1575,14 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1515 skb_reserve(skb, NET_IP_ALIGN); 1575 skb_reserve(skb, NET_IP_ALIGN);
1516 QPRINTK(qdev, RX_STATUS, DEBUG, 1576 QPRINTK(qdev, RX_STATUS, DEBUG,
1517 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); 1577 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1518 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, 1578 skb_fill_page_desc(skb, 0,
1519 0, length); 1579 lbq_desc->p.pg_chunk.page,
1580 lbq_desc->p.pg_chunk.offset,
1581 length);
1520 skb->len += length; 1582 skb->len += length;
1521 skb->data_len += length; 1583 skb->data_len += length;
1522 skb->truesize += length; 1584 skb->truesize += length;
1523 length -= length; 1585 length -= length;
1524 lbq_desc->p.lbq_page = NULL;
1525 __pskb_pull_tail(skb, 1586 __pskb_pull_tail(skb,
1526 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 1587 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1527 VLAN_ETH_HLEN : ETH_HLEN); 1588 VLAN_ETH_HLEN : ETH_HLEN);
@@ -1538,8 +1599,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1538 * frames. If the MTU goes up we could 1599 * frames. If the MTU goes up we could
1539 * eventually be in trouble. 1600 * eventually be in trouble.
1540 */ 1601 */
1541 int size, offset, i = 0; 1602 int size, i = 0;
1542 __le64 *bq, bq_array[8];
1543 sbq_desc = ql_get_curr_sbuf(rx_ring); 1603 sbq_desc = ql_get_curr_sbuf(rx_ring);
1544 pci_unmap_single(qdev->pdev, 1604 pci_unmap_single(qdev->pdev,
1545 pci_unmap_addr(sbq_desc, mapaddr), 1605 pci_unmap_addr(sbq_desc, mapaddr),
@@ -1558,37 +1618,25 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1558 QPRINTK(qdev, RX_STATUS, DEBUG, 1618 QPRINTK(qdev, RX_STATUS, DEBUG,
1559 "%d bytes of headers & data in chain of large.\n", length); 1619 "%d bytes of headers & data in chain of large.\n", length);
1560 skb = sbq_desc->p.skb; 1620 skb = sbq_desc->p.skb;
1561 bq = &bq_array[0];
1562 memcpy(bq, skb->data, sizeof(bq_array));
1563 sbq_desc->p.skb = NULL; 1621 sbq_desc->p.skb = NULL;
1564 skb_reserve(skb, NET_IP_ALIGN); 1622 skb_reserve(skb, NET_IP_ALIGN);
1565 } else {
1566 QPRINTK(qdev, RX_STATUS, DEBUG,
1567 "Headers in small, %d bytes of data in chain of large.\n", length);
1568 bq = (__le64 *)sbq_desc->p.skb->data;
1569 } 1623 }
1570 while (length > 0) { 1624 while (length > 0) {
1571 lbq_desc = ql_get_curr_lbuf(rx_ring); 1625 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1572 pci_unmap_page(qdev->pdev, 1626 size = (length < rx_ring->lbq_buf_size) ? length :
1573 pci_unmap_addr(lbq_desc, 1627 rx_ring->lbq_buf_size;
1574 mapaddr),
1575 pci_unmap_len(lbq_desc,
1576 maplen),
1577 PCI_DMA_FROMDEVICE);
1578 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1579 offset = 0;
1580 1628
1581 QPRINTK(qdev, RX_STATUS, DEBUG, 1629 QPRINTK(qdev, RX_STATUS, DEBUG,
1582 "Adding page %d to skb for %d bytes.\n", 1630 "Adding page %d to skb for %d bytes.\n",
1583 i, size); 1631 i, size);
1584 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page, 1632 skb_fill_page_desc(skb, i,
1585 offset, size); 1633 lbq_desc->p.pg_chunk.page,
1634 lbq_desc->p.pg_chunk.offset,
1635 size);
1586 skb->len += size; 1636 skb->len += size;
1587 skb->data_len += size; 1637 skb->data_len += size;
1588 skb->truesize += size; 1638 skb->truesize += size;
1589 length -= size; 1639 length -= size;
1590 lbq_desc->p.lbq_page = NULL;
1591 bq++;
1592 i++; 1640 i++;
1593 } 1641 }
1594 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 1642 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
@@ -1613,6 +1661,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1613 if (unlikely(!skb)) { 1661 if (unlikely(!skb)) {
1614 QPRINTK(qdev, RX_STATUS, DEBUG, 1662 QPRINTK(qdev, RX_STATUS, DEBUG,
1615 "No skb available, drop packet.\n"); 1663 "No skb available, drop packet.\n");
1664 rx_ring->rx_dropped++;
1616 return; 1665 return;
1617 } 1666 }
1618 1667
@@ -1621,6 +1670,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1621 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", 1670 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1622 ib_mac_rsp->flags2); 1671 ib_mac_rsp->flags2);
1623 dev_kfree_skb_any(skb); 1672 dev_kfree_skb_any(skb);
1673 rx_ring->rx_errors++;
1624 return; 1674 return;
1625 } 1675 }
1626 1676
@@ -1629,6 +1679,14 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1629 */ 1679 */
1630 if (skb->len > ndev->mtu + ETH_HLEN) { 1680 if (skb->len > ndev->mtu + ETH_HLEN) {
1631 dev_kfree_skb_any(skb); 1681 dev_kfree_skb_any(skb);
1682 rx_ring->rx_dropped++;
1683 return;
1684 }
1685
1686 /* loopback self test for ethtool */
1687 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1688 ql_check_lb_frame(qdev, skb);
1689 dev_kfree_skb_any(skb);
1632 return; 1690 return;
1633 } 1691 }
1634 1692
@@ -1642,6 +1700,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1642 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", 1700 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1643 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1701 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); 1702 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1703 rx_ring->rx_multicast++;
1645 } 1704 }
1646 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { 1705 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1647 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); 1706 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
@@ -1673,8 +1732,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1673 } 1732 }
1674 } 1733 }
1675 1734
1676 ndev->stats.rx_packets++; 1735 rx_ring->rx_packets++;
1677 ndev->stats.rx_bytes += skb->len; 1736 rx_ring->rx_bytes += skb->len;
1678 skb_record_rx_queue(skb, rx_ring->cq_id); 1737 skb_record_rx_queue(skb, rx_ring->cq_id);
1679 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1738 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1680 if (qdev->vlgrp && 1739 if (qdev->vlgrp &&
@@ -1698,7 +1757,6 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1698static void ql_process_mac_tx_intr(struct ql_adapter *qdev, 1757static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1699 struct ob_mac_iocb_rsp *mac_rsp) 1758 struct ob_mac_iocb_rsp *mac_rsp)
1700{ 1759{
1701 struct net_device *ndev = qdev->ndev;
1702 struct tx_ring *tx_ring; 1760 struct tx_ring *tx_ring;
1703 struct tx_ring_desc *tx_ring_desc; 1761 struct tx_ring_desc *tx_ring_desc;
1704 1762
@@ -1706,8 +1764,8 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1706 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; 1764 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1707 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; 1765 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1708 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); 1766 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1709 ndev->stats.tx_bytes += (tx_ring_desc->skb)->len; 1767 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
1710 ndev->stats.tx_packets++; 1768 tx_ring->tx_packets++;
1711 dev_kfree_skb(tx_ring_desc->skb); 1769 dev_kfree_skb(tx_ring_desc->skb);
1712 tx_ring_desc->skb = NULL; 1770 tx_ring_desc->skb = NULL;
1713 1771
@@ -1930,7 +1988,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1930 return work_done; 1988 return work_done;
1931} 1989}
1932 1990
1933static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) 1991static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1934{ 1992{
1935 struct ql_adapter *qdev = netdev_priv(ndev); 1993 struct ql_adapter *qdev = netdev_priv(ndev);
1936 1994
@@ -1946,7 +2004,7 @@ static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1946 } 2004 }
1947} 2005}
1948 2006
1949static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) 2007static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1950{ 2008{
1951 struct ql_adapter *qdev = netdev_priv(ndev); 2009 struct ql_adapter *qdev = netdev_priv(ndev);
1952 u32 enable_bit = MAC_ADDR_E; 2010 u32 enable_bit = MAC_ADDR_E;
@@ -1962,7 +2020,7 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1962 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2020 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1963} 2021}
1964 2022
1965static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) 2023static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1966{ 2024{
1967 struct ql_adapter *qdev = netdev_priv(ndev); 2025 struct ql_adapter *qdev = netdev_priv(ndev);
1968 u32 enable_bit = 0; 2026 u32 enable_bit = 0;
@@ -2047,12 +2105,12 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2047 */ 2105 */
2048 var = ql_read32(qdev, ISR1); 2106 var = ql_read32(qdev, ISR1);
2049 if (var & intr_context->irq_mask) { 2107 if (var & intr_context->irq_mask) {
2050 QPRINTK(qdev, INTR, INFO, 2108 QPRINTK(qdev, INTR, INFO,
2051 "Waking handler for rx_ring[0].\n"); 2109 "Waking handler for rx_ring[0].\n");
2052 ql_disable_completion_interrupt(qdev, intr_context->intr); 2110 ql_disable_completion_interrupt(qdev, intr_context->intr);
2053 napi_schedule(&rx_ring->napi); 2111 napi_schedule(&rx_ring->napi);
2054 work_done++; 2112 work_done++;
2055 } 2113 }
2056 ql_enable_completion_interrupt(qdev, intr_context->intr); 2114 ql_enable_completion_interrupt(qdev, intr_context->intr);
2057 return work_done ? IRQ_HANDLED : IRQ_NONE; 2115 return work_done ? IRQ_HANDLED : IRQ_NONE;
2058} 2116}
@@ -2150,6 +2208,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2150 __func__, tx_ring_idx); 2208 __func__, tx_ring_idx);
2151 netif_stop_subqueue(ndev, tx_ring->wq_id); 2209 netif_stop_subqueue(ndev, tx_ring->wq_id);
2152 atomic_inc(&tx_ring->queue_stopped); 2210 atomic_inc(&tx_ring->queue_stopped);
2211 tx_ring->tx_errors++;
2153 return NETDEV_TX_BUSY; 2212 return NETDEV_TX_BUSY;
2154 } 2213 }
2155 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; 2214 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
@@ -2184,6 +2243,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2184 NETDEV_TX_OK) { 2243 NETDEV_TX_OK) {
2185 QPRINTK(qdev, TX_QUEUED, ERR, 2244 QPRINTK(qdev, TX_QUEUED, ERR,
2186 "Could not map the segments.\n"); 2245 "Could not map the segments.\n");
2246 tx_ring->tx_errors++;
2187 return NETDEV_TX_BUSY; 2247 return NETDEV_TX_BUSY;
2188 } 2248 }
2189 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); 2249 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
@@ -2200,6 +2260,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2200 return NETDEV_TX_OK; 2260 return NETDEV_TX_OK;
2201} 2261}
2202 2262
2263
2203static void ql_free_shadow_space(struct ql_adapter *qdev) 2264static void ql_free_shadow_space(struct ql_adapter *qdev)
2204{ 2265{
2205 if (qdev->rx_ring_shadow_reg_area) { 2266 if (qdev->rx_ring_shadow_reg_area) {
@@ -2305,20 +2366,29 @@ err:
2305 2366
2306static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2367static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2307{ 2368{
2308 int i;
2309 struct bq_desc *lbq_desc; 2369 struct bq_desc *lbq_desc;
2310 2370
2311 for (i = 0; i < rx_ring->lbq_len; i++) { 2371 uint32_t curr_idx, clean_idx;
2312 lbq_desc = &rx_ring->lbq[i]; 2372
2313 if (lbq_desc->p.lbq_page) { 2373 curr_idx = rx_ring->lbq_curr_idx;
2374 clean_idx = rx_ring->lbq_clean_idx;
2375 while (curr_idx != clean_idx) {
2376 lbq_desc = &rx_ring->lbq[curr_idx];
2377
2378 if (lbq_desc->p.pg_chunk.last_flag) {
2314 pci_unmap_page(qdev->pdev, 2379 pci_unmap_page(qdev->pdev,
2315 pci_unmap_addr(lbq_desc, mapaddr), 2380 lbq_desc->p.pg_chunk.map,
2316 pci_unmap_len(lbq_desc, maplen), 2381 ql_lbq_block_size(qdev),
2317 PCI_DMA_FROMDEVICE); 2382 PCI_DMA_FROMDEVICE);
2318 2383 lbq_desc->p.pg_chunk.last_flag = 0;
2319 put_page(lbq_desc->p.lbq_page);
2320 lbq_desc->p.lbq_page = NULL;
2321 } 2384 }
2385
2386 put_page(lbq_desc->p.pg_chunk.page);
2387 lbq_desc->p.pg_chunk.page = NULL;
2388
2389 if (++curr_idx == rx_ring->lbq_len)
2390 curr_idx = 0;
2391
2322 } 2392 }
2323} 2393}
2324 2394
@@ -2616,6 +2686,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2616 /* Set up the shadow registers for this ring. */ 2686 /* Set up the shadow registers for this ring. */
2617 rx_ring->prod_idx_sh_reg = shadow_reg; 2687 rx_ring->prod_idx_sh_reg = shadow_reg;
2618 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; 2688 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2689 *rx_ring->prod_idx_sh_reg = 0;
2619 shadow_reg += sizeof(u64); 2690 shadow_reg += sizeof(u64);
2620 shadow_reg_dma += sizeof(u64); 2691 shadow_reg_dma += sizeof(u64);
2621 rx_ring->lbq_base_indirect = shadow_reg; 2692 rx_ring->lbq_base_indirect = shadow_reg;
@@ -3277,6 +3348,22 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3277 * the same MAC address. 3348 * the same MAC address.
3278 */ 3349 */
3279 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); 3350 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3351 /* Reroute all packets to our Interface.
3352 * They may have been routed to MPI firmware
3353 * due to WOL.
3354 */
3355 value = ql_read32(qdev, MGMT_RCV_CFG);
3356 value &= ~MGMT_RCV_CFG_RM;
3357 mask = 0xffff0000;
3358
3359 /* Sticky reg needs clearing due to WOL. */
3360 ql_write32(qdev, MGMT_RCV_CFG, mask);
3361 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3362
3363 /* Default WOL is enable on Mezz cards */
3364 if (qdev->pdev->subsystem_device == 0x0068 ||
3365 qdev->pdev->subsystem_device == 0x0180)
3366 qdev->wol = WAKE_MAGIC;
3280 3367
3281 /* Start up the rx queues. */ 3368 /* Start up the rx queues. */
3282 for (i = 0; i < qdev->rx_ring_count; i++) { 3369 for (i = 0; i < qdev->rx_ring_count; i++) {
@@ -3391,6 +3478,55 @@ static void ql_display_dev_info(struct net_device *ndev)
3391 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr); 3478 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3392} 3479}
3393 3480
3481int ql_wol(struct ql_adapter *qdev)
3482{
3483 int status = 0;
3484 u32 wol = MB_WOL_DISABLE;
3485
3486 /* The CAM is still intact after a reset, but if we
3487 * are doing WOL, then we may need to program the
3488 * routing regs. We would also need to issue the mailbox
3489 * commands to instruct the MPI what to do per the ethtool
3490 * settings.
3491 */
3492
3493 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3494 WAKE_MCAST | WAKE_BCAST)) {
3495 QPRINTK(qdev, IFDOWN, ERR,
3496 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3497 qdev->wol);
3498 return -EINVAL;
3499 }
3500
3501 if (qdev->wol & WAKE_MAGIC) {
3502 status = ql_mb_wol_set_magic(qdev, 1);
3503 if (status) {
3504 QPRINTK(qdev, IFDOWN, ERR,
3505 "Failed to set magic packet on %s.\n",
3506 qdev->ndev->name);
3507 return status;
3508 } else
3509 QPRINTK(qdev, DRV, INFO,
3510 "Enabled magic packet successfully on %s.\n",
3511 qdev->ndev->name);
3512
3513 wol |= MB_WOL_MAGIC_PKT;
3514 }
3515
3516 if (qdev->wol) {
3517 /* Reroute all packets to Management Interface */
3518 ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM |
3519 (MGMT_RCV_CFG_RM << 16)));
3520 wol |= MB_WOL_MODE_ON;
3521 status = ql_mb_wol_mode(qdev, wol);
3522 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3523 (status == 0) ? "Sucessfully set" : "Failed", wol,
3524 qdev->ndev->name);
3525 }
3526
3527 return status;
3528}
3529
3394static int ql_adapter_down(struct ql_adapter *qdev) 3530static int ql_adapter_down(struct ql_adapter *qdev)
3395{ 3531{
3396 int i, status = 0; 3532 int i, status = 0;
@@ -3496,6 +3632,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3496 struct rx_ring *rx_ring; 3632 struct rx_ring *rx_ring;
3497 struct tx_ring *tx_ring; 3633 struct tx_ring *tx_ring;
3498 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); 3634 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3635 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3636 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3637
3638 qdev->lbq_buf_order = get_order(lbq_buf_len);
3499 3639
3500 /* In a perfect world we have one RSS ring for each CPU 3640 /* In a perfect world we have one RSS ring for each CPU
3501 * and each has it's own vector. To do that we ask for 3641 * and each has it's own vector. To do that we ask for
@@ -3543,7 +3683,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3543 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3683 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3544 rx_ring->lbq_size = 3684 rx_ring->lbq_size =
3545 rx_ring->lbq_len * sizeof(__le64); 3685 rx_ring->lbq_len * sizeof(__le64);
3546 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3686 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3687 QPRINTK(qdev, IFUP, DEBUG,
3688 "lbq_buf_size %d, order = %d\n",
3689 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
3547 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3690 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3548 rx_ring->sbq_size = 3691 rx_ring->sbq_size =
3549 rx_ring->sbq_len * sizeof(__le64); 3692 rx_ring->sbq_len * sizeof(__le64);
@@ -3593,14 +3736,63 @@ error_up:
3593 return err; 3736 return err;
3594} 3737}
3595 3738
3739static int ql_change_rx_buffers(struct ql_adapter *qdev)
3740{
3741 struct rx_ring *rx_ring;
3742 int i, status;
3743 u32 lbq_buf_len;
3744
3745 /* Wait for an oustanding reset to complete. */
3746 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3747 int i = 3;
3748 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3749 QPRINTK(qdev, IFUP, ERR,
3750 "Waiting for adapter UP...\n");
3751 ssleep(1);
3752 }
3753
3754 if (!i) {
3755 QPRINTK(qdev, IFUP, ERR,
3756 "Timed out waiting for adapter UP\n");
3757 return -ETIMEDOUT;
3758 }
3759 }
3760
3761 status = ql_adapter_down(qdev);
3762 if (status)
3763 goto error;
3764
3765 /* Get the new rx buffer size. */
3766 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3767 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3768 qdev->lbq_buf_order = get_order(lbq_buf_len);
3769
3770 for (i = 0; i < qdev->rss_ring_count; i++) {
3771 rx_ring = &qdev->rx_ring[i];
3772 /* Set the new size. */
3773 rx_ring->lbq_buf_size = lbq_buf_len;
3774 }
3775
3776 status = ql_adapter_up(qdev);
3777 if (status)
3778 goto error;
3779
3780 return status;
3781error:
3782 QPRINTK(qdev, IFUP, ALERT,
3783 "Driver up/down cycle failed, closing device.\n");
3784 set_bit(QL_ADAPTER_UP, &qdev->flags);
3785 dev_close(qdev->ndev);
3786 return status;
3787}
3788
3596static int qlge_change_mtu(struct net_device *ndev, int new_mtu) 3789static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3597{ 3790{
3598 struct ql_adapter *qdev = netdev_priv(ndev); 3791 struct ql_adapter *qdev = netdev_priv(ndev);
3792 int status;
3599 3793
3600 if (ndev->mtu == 1500 && new_mtu == 9000) { 3794 if (ndev->mtu == 1500 && new_mtu == 9000) {
3601 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); 3795 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3602 queue_delayed_work(qdev->workqueue,
3603 &qdev->mpi_port_cfg_work, 0);
3604 } else if (ndev->mtu == 9000 && new_mtu == 1500) { 3796 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3605 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); 3797 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3606 } else if ((ndev->mtu == 1500 && new_mtu == 1500) || 3798 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
@@ -3608,13 +3800,59 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3608 return 0; 3800 return 0;
3609 } else 3801 } else
3610 return -EINVAL; 3802 return -EINVAL;
3803
3804 queue_delayed_work(qdev->workqueue,
3805 &qdev->mpi_port_cfg_work, 3*HZ);
3806
3807 if (!netif_running(qdev->ndev)) {
3808 ndev->mtu = new_mtu;
3809 return 0;
3810 }
3811
3611 ndev->mtu = new_mtu; 3812 ndev->mtu = new_mtu;
3612 return 0; 3813 status = ql_change_rx_buffers(qdev);
3814 if (status) {
3815 QPRINTK(qdev, IFUP, ERR,
3816 "Changing MTU failed.\n");
3817 }
3818
3819 return status;
3613} 3820}
3614 3821
3615static struct net_device_stats *qlge_get_stats(struct net_device 3822static struct net_device_stats *qlge_get_stats(struct net_device
3616 *ndev) 3823 *ndev)
3617{ 3824{
3825 struct ql_adapter *qdev = netdev_priv(ndev);
3826 struct rx_ring *rx_ring = &qdev->rx_ring[0];
3827 struct tx_ring *tx_ring = &qdev->tx_ring[0];
3828 unsigned long pkts, mcast, dropped, errors, bytes;
3829 int i;
3830
3831 /* Get RX stats. */
3832 pkts = mcast = dropped = errors = bytes = 0;
3833 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
3834 pkts += rx_ring->rx_packets;
3835 bytes += rx_ring->rx_bytes;
3836 dropped += rx_ring->rx_dropped;
3837 errors += rx_ring->rx_errors;
3838 mcast += rx_ring->rx_multicast;
3839 }
3840 ndev->stats.rx_packets = pkts;
3841 ndev->stats.rx_bytes = bytes;
3842 ndev->stats.rx_dropped = dropped;
3843 ndev->stats.rx_errors = errors;
3844 ndev->stats.multicast = mcast;
3845
3846 /* Get TX stats. */
3847 pkts = errors = bytes = 0;
3848 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
3849 pkts += tx_ring->tx_packets;
3850 bytes += tx_ring->tx_bytes;
3851 errors += tx_ring->tx_errors;
3852 }
3853 ndev->stats.tx_packets = pkts;
3854 ndev->stats.tx_bytes = bytes;
3855 ndev->stats.tx_errors = errors;
3618 return &ndev->stats; 3856 return &ndev->stats;
3619} 3857}
3620 3858
@@ -3907,6 +4145,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3907 goto err_out; 4145 goto err_out;
3908 } 4146 }
3909 4147
4148 pci_save_state(pdev);
3910 qdev->reg_base = 4149 qdev->reg_base =
3911 ioremap_nocache(pci_resource_start(pdev, 1), 4150 ioremap_nocache(pci_resource_start(pdev, 1),
3912 pci_resource_len(pdev, 1)); 4151 pci_resource_len(pdev, 1));
@@ -3979,7 +4218,6 @@ err_out:
3979 return err; 4218 return err;
3980} 4219}
3981 4220
3982
3983static const struct net_device_ops qlge_netdev_ops = { 4221static const struct net_device_ops qlge_netdev_ops = {
3984 .ndo_open = qlge_open, 4222 .ndo_open = qlge_open,
3985 .ndo_stop = qlge_close, 4223 .ndo_stop = qlge_close,
@@ -3990,9 +4228,9 @@ static const struct net_device_ops qlge_netdev_ops = {
3990 .ndo_set_mac_address = qlge_set_mac_address, 4228 .ndo_set_mac_address = qlge_set_mac_address,
3991 .ndo_validate_addr = eth_validate_addr, 4229 .ndo_validate_addr = eth_validate_addr,
3992 .ndo_tx_timeout = qlge_tx_timeout, 4230 .ndo_tx_timeout = qlge_tx_timeout,
3993 .ndo_vlan_rx_register = ql_vlan_rx_register, 4231 .ndo_vlan_rx_register = qlge_vlan_rx_register,
3994 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid, 4232 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
3995 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid, 4233 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
3996}; 4234};
3997 4235
3998static int __devinit qlge_probe(struct pci_dev *pdev, 4236static int __devinit qlge_probe(struct pci_dev *pdev,
@@ -4048,10 +4286,21 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
4048 } 4286 }
4049 ql_link_off(qdev); 4287 ql_link_off(qdev);
4050 ql_display_dev_info(ndev); 4288 ql_display_dev_info(ndev);
4289 atomic_set(&qdev->lb_count, 0);
4051 cards_found++; 4290 cards_found++;
4052 return 0; 4291 return 0;
4053} 4292}
4054 4293
4294netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4295{
4296 return qlge_send(skb, ndev);
4297}
4298
4299int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4300{
4301 return ql_clean_inbound_rx_ring(rx_ring, budget);
4302}
4303
4055static void __devexit qlge_remove(struct pci_dev *pdev) 4304static void __devexit qlge_remove(struct pci_dev *pdev)
4056{ 4305{
4057 struct net_device *ndev = pci_get_drvdata(pdev); 4306 struct net_device *ndev = pci_get_drvdata(pdev);
@@ -4061,6 +4310,33 @@ static void __devexit qlge_remove(struct pci_dev *pdev)
4061 free_netdev(ndev); 4310 free_netdev(ndev);
4062} 4311}
4063 4312
4313/* Clean up resources without touching hardware. */
4314static void ql_eeh_close(struct net_device *ndev)
4315{
4316 int i;
4317 struct ql_adapter *qdev = netdev_priv(ndev);
4318
4319 if (netif_carrier_ok(ndev)) {
4320 netif_carrier_off(ndev);
4321 netif_stop_queue(ndev);
4322 }
4323
4324 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4325 cancel_delayed_work_sync(&qdev->asic_reset_work);
4326 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4327 cancel_delayed_work_sync(&qdev->mpi_work);
4328 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4329 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4330
4331 for (i = 0; i < qdev->rss_ring_count; i++)
4332 netif_napi_del(&qdev->rx_ring[i].napi);
4333
4334 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4335 ql_tx_ring_clean(qdev);
4336 ql_free_rx_buffers(qdev);
4337 ql_release_adapter_resources(qdev);
4338}
4339
4064/* 4340/*
4065 * This callback is called by the PCI subsystem whenever 4341 * This callback is called by the PCI subsystem whenever
4066 * a PCI bus error is detected. 4342 * a PCI bus error is detected.
@@ -4069,17 +4345,21 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4069 enum pci_channel_state state) 4345 enum pci_channel_state state)
4070{ 4346{
4071 struct net_device *ndev = pci_get_drvdata(pdev); 4347 struct net_device *ndev = pci_get_drvdata(pdev);
4072 struct ql_adapter *qdev = netdev_priv(ndev);
4073 4348
4074 netif_device_detach(ndev); 4349 switch (state) {
4075 4350 case pci_channel_io_normal:
4076 if (state == pci_channel_io_perm_failure) 4351 return PCI_ERS_RESULT_CAN_RECOVER;
4352 case pci_channel_io_frozen:
4353 netif_device_detach(ndev);
4354 if (netif_running(ndev))
4355 ql_eeh_close(ndev);
4356 pci_disable_device(pdev);
4357 return PCI_ERS_RESULT_NEED_RESET;
4358 case pci_channel_io_perm_failure:
4359 dev_err(&pdev->dev,
4360 "%s: pci_channel_io_perm_failure.\n", __func__);
4077 return PCI_ERS_RESULT_DISCONNECT; 4361 return PCI_ERS_RESULT_DISCONNECT;
4078 4362 }
4079 if (netif_running(ndev))
4080 ql_adapter_down(qdev);
4081
4082 pci_disable_device(pdev);
4083 4363
4084 /* Request a slot reset. */ 4364 /* Request a slot reset. */
4085 return PCI_ERS_RESULT_NEED_RESET; 4365 return PCI_ERS_RESULT_NEED_RESET;
@@ -4096,25 +4376,15 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4096 struct net_device *ndev = pci_get_drvdata(pdev); 4376 struct net_device *ndev = pci_get_drvdata(pdev);
4097 struct ql_adapter *qdev = netdev_priv(ndev); 4377 struct ql_adapter *qdev = netdev_priv(ndev);
4098 4378
4379 pdev->error_state = pci_channel_io_normal;
4380
4381 pci_restore_state(pdev);
4099 if (pci_enable_device(pdev)) { 4382 if (pci_enable_device(pdev)) {
4100 QPRINTK(qdev, IFUP, ERR, 4383 QPRINTK(qdev, IFUP, ERR,
4101 "Cannot re-enable PCI device after reset.\n"); 4384 "Cannot re-enable PCI device after reset.\n");
4102 return PCI_ERS_RESULT_DISCONNECT; 4385 return PCI_ERS_RESULT_DISCONNECT;
4103 } 4386 }
4104
4105 pci_set_master(pdev); 4387 pci_set_master(pdev);
4106
4107 netif_carrier_off(ndev);
4108 ql_adapter_reset(qdev);
4109
4110 /* Make sure the EEPROM is good */
4111 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4112
4113 if (!is_valid_ether_addr(ndev->perm_addr)) {
4114 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4115 return PCI_ERS_RESULT_DISCONNECT;
4116 }
4117
4118 return PCI_ERS_RESULT_RECOVERED; 4388 return PCI_ERS_RESULT_RECOVERED;
4119} 4389}
4120 4390
@@ -4122,17 +4392,21 @@ static void qlge_io_resume(struct pci_dev *pdev)
4122{ 4392{
4123 struct net_device *ndev = pci_get_drvdata(pdev); 4393 struct net_device *ndev = pci_get_drvdata(pdev);
4124 struct ql_adapter *qdev = netdev_priv(ndev); 4394 struct ql_adapter *qdev = netdev_priv(ndev);
4395 int err = 0;
4125 4396
4126 pci_set_master(pdev); 4397 if (ql_adapter_reset(qdev))
4127 4398 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
4128 if (netif_running(ndev)) { 4399 if (netif_running(ndev)) {
4129 if (ql_adapter_up(qdev)) { 4400 err = qlge_open(ndev);
4401 if (err) {
4130 QPRINTK(qdev, IFUP, ERR, 4402 QPRINTK(qdev, IFUP, ERR,
4131 "Device initialization failed after reset.\n"); 4403 "Device initialization failed after reset.\n");
4132 return; 4404 return;
4133 } 4405 }
4406 } else {
4407 QPRINTK(qdev, IFUP, ERR,
4408 "Device was not running prior to EEH.\n");
4134 } 4409 }
4135
4136 netif_device_attach(ndev); 4410 netif_device_attach(ndev);
4137} 4411}
4138 4412
@@ -4156,6 +4430,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4156 return err; 4430 return err;
4157 } 4431 }
4158 4432
4433 ql_wol(qdev);
4159 err = pci_save_state(pdev); 4434 err = pci_save_state(pdev);
4160 if (err) 4435 if (err)
4161 return err; 4436 return err;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e497eac5eb45..f5619fe87bb2 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -454,7 +454,8 @@ end:
454 */ 454 */
455static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) 455static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
456{ 456{
457 int status, count; 457 int status;
458 unsigned long count;
458 459
459 460
460 /* Begin polled mode for MPI */ 461 /* Begin polled mode for MPI */
@@ -475,9 +476,9 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
475 /* Wait for the command to complete. We loop 476 /* Wait for the command to complete. We loop
476 * here because some AEN might arrive while 477 * here because some AEN might arrive while
477 * we're waiting for the mailbox command to 478 * we're waiting for the mailbox command to
478 * complete. If more than 5 arrive then we can 479 * complete. If more than 5 seconds expire we can
479 * assume something is wrong. */ 480 * assume something is wrong. */
480 count = 5; 481 count = jiffies + HZ * MAILBOX_TIMEOUT;
481 do { 482 do {
482 /* Wait for the interrupt to come in. */ 483 /* Wait for the interrupt to come in. */
483 status = ql_wait_mbx_cmd_cmplt(qdev); 484 status = ql_wait_mbx_cmd_cmplt(qdev);
@@ -501,15 +502,15 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
501 MB_CMD_STS_GOOD) || 502 MB_CMD_STS_GOOD) ||
502 ((mbcp->mbox_out[0] & 0x0000f000) == 503 ((mbcp->mbox_out[0] & 0x0000f000) ==
503 MB_CMD_STS_INTRMDT)) 504 MB_CMD_STS_INTRMDT))
504 break; 505 goto done;
505 } while (--count); 506 } while (time_before(jiffies, count));
506 507
507 if (!count) { 508 QPRINTK(qdev, DRV, ERR,
508 QPRINTK(qdev, DRV, ERR, 509 "Timed out waiting for mailbox complete.\n");
509 "Timed out waiting for mailbox complete.\n"); 510 status = -ETIMEDOUT;
510 status = -ETIMEDOUT; 511 goto end;
511 goto end; 512
512 } 513done:
513 514
514 /* Now we can clear the interrupt condition 515 /* Now we can clear the interrupt condition
515 * and look at our status. 516 * and look at our status.
@@ -637,7 +638,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev)
637 * for the current port. 638 * for the current port.
638 * Most likely will block. 639 * Most likely will block.
639 */ 640 */
640static int ql_mb_set_port_cfg(struct ql_adapter *qdev) 641int ql_mb_set_port_cfg(struct ql_adapter *qdev)
641{ 642{
642 struct mbox_params mbc; 643 struct mbox_params mbc;
643 struct mbox_params *mbcp = &mbc; 644 struct mbox_params *mbcp = &mbc;
@@ -672,7 +673,7 @@ static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
672 * for the current port. 673 * for the current port.
673 * Most likely will block. 674 * Most likely will block.
674 */ 675 */
675static int ql_mb_get_port_cfg(struct ql_adapter *qdev) 676int ql_mb_get_port_cfg(struct ql_adapter *qdev)
676{ 677{
677 struct mbox_params mbc; 678 struct mbox_params mbc;
678 struct mbox_params *mbcp = &mbc; 679 struct mbox_params *mbcp = &mbc;
@@ -702,6 +703,76 @@ static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
702 return status; 703 return status;
703} 704}
704 705
706int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
707{
708 struct mbox_params mbc;
709 struct mbox_params *mbcp = &mbc;
710 int status;
711
712 memset(mbcp, 0, sizeof(struct mbox_params));
713
714 mbcp->in_count = 2;
715 mbcp->out_count = 1;
716
717 mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
718 mbcp->mbox_in[1] = wol;
719
720
721 status = ql_mailbox_command(qdev, mbcp);
722 if (status)
723 return status;
724
725 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
726 QPRINTK(qdev, DRV, ERR,
727 "Failed to set WOL mode.\n");
728 status = -EIO;
729 }
730 return status;
731}
732
733int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
734{
735 struct mbox_params mbc;
736 struct mbox_params *mbcp = &mbc;
737 int status;
738 u8 *addr = qdev->ndev->dev_addr;
739
740 memset(mbcp, 0, sizeof(struct mbox_params));
741
742 mbcp->in_count = 8;
743 mbcp->out_count = 1;
744
745 mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
746 if (enable_wol) {
747 mbcp->mbox_in[1] = (u32)addr[0];
748 mbcp->mbox_in[2] = (u32)addr[1];
749 mbcp->mbox_in[3] = (u32)addr[2];
750 mbcp->mbox_in[4] = (u32)addr[3];
751 mbcp->mbox_in[5] = (u32)addr[4];
752 mbcp->mbox_in[6] = (u32)addr[5];
753 mbcp->mbox_in[7] = 0;
754 } else {
755 mbcp->mbox_in[1] = 0;
756 mbcp->mbox_in[2] = 1;
757 mbcp->mbox_in[3] = 1;
758 mbcp->mbox_in[4] = 1;
759 mbcp->mbox_in[5] = 1;
760 mbcp->mbox_in[6] = 1;
761 mbcp->mbox_in[7] = 0;
762 }
763
764 status = ql_mailbox_command(qdev, mbcp);
765 if (status)
766 return status;
767
768 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
769 QPRINTK(qdev, DRV, ERR,
770 "Failed to set WOL mode.\n");
771 status = -EIO;
772 }
773 return status;
774}
775
705/* IDC - Inter Device Communication... 776/* IDC - Inter Device Communication...
706 * Some firmware commands require consent of adjacent FCOE 777 * Some firmware commands require consent of adjacent FCOE
707 * function. This function waits for the OK, or a 778 * function. This function waits for the OK, or a
@@ -751,6 +822,61 @@ static int ql_idc_wait(struct ql_adapter *qdev)
751 return status; 822 return status;
752} 823}
753 824
825int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
826{
827 struct mbox_params mbc;
828 struct mbox_params *mbcp = &mbc;
829 int status;
830
831 memset(mbcp, 0, sizeof(struct mbox_params));
832
833 mbcp->in_count = 2;
834 mbcp->out_count = 1;
835
836 mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
837 mbcp->mbox_in[1] = led_config;
838
839
840 status = ql_mailbox_command(qdev, mbcp);
841 if (status)
842 return status;
843
844 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
845 QPRINTK(qdev, DRV, ERR,
846 "Failed to set LED Configuration.\n");
847 status = -EIO;
848 }
849
850 return status;
851}
852
853int ql_mb_get_led_cfg(struct ql_adapter *qdev)
854{
855 struct mbox_params mbc;
856 struct mbox_params *mbcp = &mbc;
857 int status;
858
859 memset(mbcp, 0, sizeof(struct mbox_params));
860
861 mbcp->in_count = 1;
862 mbcp->out_count = 2;
863
864 mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
865
866 status = ql_mailbox_command(qdev, mbcp);
867 if (status)
868 return status;
869
870 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
871 QPRINTK(qdev, DRV, ERR,
872 "Failed to get LED Configuration.\n");
873 status = -EIO;
874 } else
875 qdev->led_config = mbcp->mbox_out[1];
876
877 return status;
878}
879
754int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) 880int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
755{ 881{
756 struct mbox_params mbc; 882 struct mbox_params mbc;
@@ -912,8 +1038,11 @@ void ql_mpi_idc_work(struct work_struct *work)
912 int status; 1038 int status;
913 struct mbox_params *mbcp = &qdev->idc_mbc; 1039 struct mbox_params *mbcp = &qdev->idc_mbc;
914 u32 aen; 1040 u32 aen;
1041 int timeout;
915 1042
1043 rtnl_lock();
916 aen = mbcp->mbox_out[1] >> 16; 1044 aen = mbcp->mbox_out[1] >> 16;
1045 timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
917 1046
918 switch (aen) { 1047 switch (aen) {
919 default: 1048 default:
@@ -921,22 +1050,61 @@ void ql_mpi_idc_work(struct work_struct *work)
921 "Bug: Unhandled IDC action.\n"); 1050 "Bug: Unhandled IDC action.\n");
922 break; 1051 break;
923 case MB_CMD_PORT_RESET: 1052 case MB_CMD_PORT_RESET:
924 case MB_CMD_SET_PORT_CFG:
925 case MB_CMD_STOP_FW: 1053 case MB_CMD_STOP_FW:
926 ql_link_off(qdev); 1054 ql_link_off(qdev);
1055 case MB_CMD_SET_PORT_CFG:
927 /* Signal the resulting link up AEN 1056 /* Signal the resulting link up AEN
928 * that the frame routing and mac addr 1057 * that the frame routing and mac addr
929 * needs to be set. 1058 * needs to be set.
930 * */ 1059 * */
931 set_bit(QL_CAM_RT_SET, &qdev->flags); 1060 set_bit(QL_CAM_RT_SET, &qdev->flags);
932 rtnl_lock(); 1061 /* Do ACK if required */
933 status = ql_mb_idc_ack(qdev); 1062 if (timeout) {
934 rtnl_unlock(); 1063 status = ql_mb_idc_ack(qdev);
935 if (status) { 1064 if (status)
936 QPRINTK(qdev, DRV, ERR, 1065 QPRINTK(qdev, DRV, ERR,
937 "Bug: No pending IDC!\n"); 1066 "Bug: No pending IDC!\n");
1067 } else {
1068 QPRINTK(qdev, DRV, DEBUG,
1069 "IDC ACK not required\n");
1070 status = 0; /* success */
1071 }
1072 break;
1073
1074 /* These sub-commands issued by another (FCoE)
1075 * function are requesting to do an operation
1076 * on the shared resource (MPI environment).
1077 * We currently don't issue these so we just
1078 * ACK the request.
1079 */
1080 case MB_CMD_IOP_RESTART_MPI:
1081 case MB_CMD_IOP_PREP_LINK_DOWN:
1082 /* Drop the link, reload the routing
1083 * table when link comes up.
1084 */
1085 ql_link_off(qdev);
1086 set_bit(QL_CAM_RT_SET, &qdev->flags);
1087 /* Fall through. */
1088 case MB_CMD_IOP_DVR_START:
1089 case MB_CMD_IOP_FLASH_ACC:
1090 case MB_CMD_IOP_CORE_DUMP_MPI:
1091 case MB_CMD_IOP_PREP_UPDATE_MPI:
1092 case MB_CMD_IOP_COMP_UPDATE_MPI:
1093 case MB_CMD_IOP_NONE: /* an IDC without params */
1094 /* Do ACK if required */
1095 if (timeout) {
1096 status = ql_mb_idc_ack(qdev);
1097 if (status)
1098 QPRINTK(qdev, DRV, ERR,
1099 "Bug: No pending IDC!\n");
1100 } else {
1101 QPRINTK(qdev, DRV, DEBUG,
1102 "IDC ACK not required\n");
1103 status = 0; /* success */
938 } 1104 }
1105 break;
939 } 1106 }
1107 rtnl_unlock();
940} 1108}
941 1109
942void ql_mpi_work(struct work_struct *work) 1110void ql_mpi_work(struct work_struct *work)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7d9fc06ceb98..1b0aa4cf89bc 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1029,7 +1029,10 @@ static void rtl8169_vlan_rx_register(struct net_device *dev,
1029 1029
1030 spin_lock_irqsave(&tp->lock, flags); 1030 spin_lock_irqsave(&tp->lock, flags);
1031 tp->vlgrp = grp; 1031 tp->vlgrp = grp;
1032 if (tp->vlgrp) 1032 /*
1033 * Do not disable RxVlan on 8110SCd.
1034 */
1035 if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
1033 tp->cp_cmd |= RxVlan; 1036 tp->cp_cmd |= RxVlan;
1034 else 1037 else
1035 tp->cp_cmd &= ~RxVlan; 1038 tp->cp_cmd &= ~RxVlan;
@@ -3197,6 +3200,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3197 } 3200 }
3198 3201
3199 rtl8169_init_phy(dev, tp); 3202 rtl8169_init_phy(dev, tp);
3203
3204 /*
3205 * Pretend we are using VLANs; This bypasses a nasty bug where
3206 * Interrupts stop flowing on high load on 8110SCd controllers.
3207 */
3208 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3209 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3210
3200 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3211 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
3201 3212
3202out: 3213out:
@@ -3368,7 +3379,7 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
3368static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) 3379static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
3369{ 3380{
3370 /* Low hurts. Let's disable the filtering. */ 3381 /* Low hurts. Let's disable the filtering. */
3371 RTL_W16(RxMaxSize, rx_buf_sz); 3382 RTL_W16(RxMaxSize, rx_buf_sz + 1);
3372} 3383}
3373 3384
3374static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) 3385static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index b89f9be3cb13..7b52fe10d38f 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,6 +1,6 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \ 1sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \
2 falcon_xmac.o selftest.o ethtool.o xfp_phy.o \ 2 falcon_xmac.o selftest.o ethtool.o qt202x_phy.o \
3 mdio_10g.o tenxpress.o boards.o sfe4001.o 3 mdio_10g.o tenxpress.o falcon_boards.o
4sfc-$(CONFIG_SFC_MTD) += mtd.o 4sfc-$(CONFIG_SFC_MTD) += mtd.o
5 5
6obj-$(CONFIG_SFC) += sfc.o 6obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index d54d84c267b9..6ad909bba957 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -520,19 +520,6 @@ typedef union efx_oword {
520#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 520#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
521#endif 521#endif
522 522
523#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
524 if (falcon_rev(efx) >= FALCON_REV_B0) { \
525 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
526 } else { \
527 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
528 } \
529} while (0)
530
531#define EFX_QWORD_FIELD_VER(efx, qword, field) \
532 (falcon_rev(efx) >= FALCON_REV_B0 ? \
533 EFX_QWORD_FIELD((qword), field##_B0) : \
534 EFX_QWORD_FIELD((qword), field##_A1))
535
536/* Used to avoid compiler warnings about shift range exceeding width 523/* Used to avoid compiler warnings about shift range exceeding width
537 * of the data types when dma_addr_t is only 32 bits wide. 524 * of the data types when dma_addr_t is only 32 bits wide.
538 */ 525 */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
deleted file mode 100644
index 4a4c74c891b7..000000000000
--- a/drivers/net/sfc/boards.c
+++ /dev/null
@@ -1,328 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "phy.h"
12#include "boards.h"
13#include "efx.h"
14#include "workarounds.h"
15
16/* Macros for unpacking the board revision */
17/* The revision info is in host byte order. */
18#define BOARD_TYPE(_rev) (_rev >> 8)
19#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
20#define BOARD_MINOR(_rev) (_rev & 0xf)
21
22/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
23#define BLINK_INTERVAL (HZ/2)
24
25static void blink_led_timer(unsigned long context)
26{
27 struct efx_nic *efx = (struct efx_nic *)context;
28 struct efx_blinker *bl = &efx->board_info.blinker;
29 efx->board_info.set_id_led(efx, bl->state);
30 bl->state = !bl->state;
31 if (bl->resubmit)
32 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
33}
34
35static void board_blink(struct efx_nic *efx, bool blink)
36{
37 struct efx_blinker *blinker = &efx->board_info.blinker;
38
39 /* The rtnl mutex serialises all ethtool ioctls, so
40 * nothing special needs doing here. */
41 if (blink) {
42 blinker->resubmit = true;
43 blinker->state = false;
44 setup_timer(&blinker->timer, blink_led_timer,
45 (unsigned long)efx);
46 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
47 } else {
48 blinker->resubmit = false;
49 if (blinker->timer.function)
50 del_timer_sync(&blinker->timer);
51 efx->board_info.init_leds(efx);
52 }
53}
54
55/*****************************************************************************
56 * Support for LM87 sensor chip used on several boards
57 */
58#define LM87_REG_ALARMS1 0x41
59#define LM87_REG_ALARMS2 0x42
60#define LM87_IN_LIMITS(nr, _min, _max) \
61 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
62#define LM87_AIN_LIMITS(nr, _min, _max) \
63 0x3B + (nr), _max, 0x1A + (nr), _min
64#define LM87_TEMP_INT_LIMITS(_min, _max) \
65 0x39, _max, 0x3A, _min
66#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
67 0x37, _max, 0x38, _min
68
69#define LM87_ALARM_TEMP_INT 0x10
70#define LM87_ALARM_TEMP_EXT1 0x20
71
72#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
73
74static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
75 const u8 *reg_values)
76{
77 struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info);
78 int rc;
79
80 if (!client)
81 return -EIO;
82
83 while (*reg_values) {
84 u8 reg = *reg_values++;
85 u8 value = *reg_values++;
86 rc = i2c_smbus_write_byte_data(client, reg, value);
87 if (rc)
88 goto err;
89 }
90
91 efx->board_info.hwmon_client = client;
92 return 0;
93
94err:
95 i2c_unregister_device(client);
96 return rc;
97}
98
99static void efx_fini_lm87(struct efx_nic *efx)
100{
101 i2c_unregister_device(efx->board_info.hwmon_client);
102}
103
104static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
105{
106 struct i2c_client *client = efx->board_info.hwmon_client;
107 s32 alarms1, alarms2;
108
109 /* If link is up then do not monitor temperature */
110 if (EFX_WORKAROUND_7884(efx) && efx->link_up)
111 return 0;
112
113 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
114 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
115 if (alarms1 < 0)
116 return alarms1;
117 if (alarms2 < 0)
118 return alarms2;
119 alarms1 &= mask;
120 alarms2 &= mask >> 8;
121 if (alarms1 || alarms2) {
122 EFX_ERR(efx,
123 "LM87 detected a hardware failure (status %02x:%02x)"
124 "%s%s\n",
125 alarms1, alarms2,
126 (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
127 (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
128 return -ERANGE;
129 }
130
131 return 0;
132}
133
134#else /* !CONFIG_SENSORS_LM87 */
135
136static inline int
137efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
138 const u8 *reg_values)
139{
140 return 0;
141}
142static inline void efx_fini_lm87(struct efx_nic *efx)
143{
144}
145static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
146{
147 return 0;
148}
149
150#endif /* CONFIG_SENSORS_LM87 */
151
152/*****************************************************************************
153 * Support for the SFE4002
154 *
155 */
156static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
157
158static const u8 sfe4002_lm87_regs[] = {
159 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
160 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
161 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
162 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
163 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
164 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
165 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
166 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
167 LM87_TEMP_INT_LIMITS(10, 60), /* board */
168 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
169 0
170};
171
172static struct i2c_board_info sfe4002_hwmon_info = {
173 I2C_BOARD_INFO("lm87", 0x2e),
174 .platform_data = &sfe4002_lm87_channel,
175};
176
177/****************************************************************************/
178/* LED allocations. Note that on rev A0 boards the schematic and the reality
179 * differ: red and green are swapped. Below is the fixed (A1) layout (there
180 * are only 3 A0 boards in existence, so no real reason to make this
181 * conditional).
182 */
183#define SFE4002_FAULT_LED (2) /* Red */
184#define SFE4002_RX_LED (0) /* Green */
185#define SFE4002_TX_LED (1) /* Amber */
186
187static void sfe4002_init_leds(struct efx_nic *efx)
188{
189 /* Set the TX and RX LEDs to reflect status and activity, and the
190 * fault LED off */
191 xfp_set_led(efx, SFE4002_TX_LED,
192 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
193 xfp_set_led(efx, SFE4002_RX_LED,
194 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
195 xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
196}
197
198static void sfe4002_set_id_led(struct efx_nic *efx, bool state)
199{
200 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
201 QUAKE_LED_OFF);
202}
203
204static int sfe4002_check_hw(struct efx_nic *efx)
205{
206 /* A0 board rev. 4002s report a temperature fault the whole time
207 * (bad sensor) so we mask it out. */
208 unsigned alarm_mask =
209 (efx->board_info.major == 0 && efx->board_info.minor == 0) ?
210 ~LM87_ALARM_TEMP_EXT1 : ~0;
211
212 return efx_check_lm87(efx, alarm_mask);
213}
214
215static int sfe4002_init(struct efx_nic *efx)
216{
217 int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
218 if (rc)
219 return rc;
220 efx->board_info.monitor = sfe4002_check_hw;
221 efx->board_info.init_leds = sfe4002_init_leds;
222 efx->board_info.set_id_led = sfe4002_set_id_led;
223 efx->board_info.blink = board_blink;
224 efx->board_info.fini = efx_fini_lm87;
225 return 0;
226}
227
228/*****************************************************************************
229 * Support for the SFN4112F
230 *
231 */
232static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
233
234static const u8 sfn4112f_lm87_regs[] = {
235 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
236 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
237 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
238 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
239 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
240 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
241 LM87_TEMP_INT_LIMITS(10, 60), /* board */
242 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
243 0
244};
245
246static struct i2c_board_info sfn4112f_hwmon_info = {
247 I2C_BOARD_INFO("lm87", 0x2e),
248 .platform_data = &sfn4112f_lm87_channel,
249};
250
251#define SFN4112F_ACT_LED 0
252#define SFN4112F_LINK_LED 1
253
254static void sfn4112f_init_leds(struct efx_nic *efx)
255{
256 xfp_set_led(efx, SFN4112F_ACT_LED,
257 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
258 xfp_set_led(efx, SFN4112F_LINK_LED,
259 QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
260}
261
262static void sfn4112f_set_id_led(struct efx_nic *efx, bool state)
263{
264 xfp_set_led(efx, SFN4112F_LINK_LED,
265 state ? QUAKE_LED_ON : QUAKE_LED_OFF);
266}
267
268static int sfn4112f_check_hw(struct efx_nic *efx)
269{
270 /* Mask out unused sensors */
271 return efx_check_lm87(efx, ~0x48);
272}
273
274static int sfn4112f_init(struct efx_nic *efx)
275{
276 int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
277 if (rc)
278 return rc;
279 efx->board_info.monitor = sfn4112f_check_hw;
280 efx->board_info.init_leds = sfn4112f_init_leds;
281 efx->board_info.set_id_led = sfn4112f_set_id_led;
282 efx->board_info.blink = board_blink;
283 efx->board_info.fini = efx_fini_lm87;
284 return 0;
285}
286
287/* This will get expanded as board-specific details get moved out of the
288 * PHY drivers. */
289struct efx_board_data {
290 enum efx_board_type type;
291 const char *ref_model;
292 const char *gen_type;
293 int (*init) (struct efx_nic *nic);
294};
295
296
297static struct efx_board_data board_data[] = {
298 { EFX_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init },
299 { EFX_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
300 { EFX_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
301 sfn4111t_init },
302 { EFX_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter",
303 sfn4112f_init },
304};
305
306void efx_set_board_info(struct efx_nic *efx, u16 revision_info)
307{
308 struct efx_board_data *data = NULL;
309 int i;
310
311 efx->board_info.type = BOARD_TYPE(revision_info);
312 efx->board_info.major = BOARD_MAJOR(revision_info);
313 efx->board_info.minor = BOARD_MINOR(revision_info);
314
315 for (i = 0; i < ARRAY_SIZE(board_data); i++)
316 if (board_data[i].type == efx->board_info.type)
317 data = &board_data[i];
318
319 if (data) {
320 EFX_INFO(efx, "board is %s rev %c%d\n",
321 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
322 ? data->ref_model : data->gen_type,
323 'A' + efx->board_info.major, efx->board_info.minor);
324 efx->board_info.init = data->init;
325 } else {
326 EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
327 }
328}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
deleted file mode 100644
index 44942de0e080..000000000000
--- a/drivers/net/sfc/boards.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_BOARDS_H
11#define EFX_BOARDS_H
12
13/* Board IDs (must fit in 8 bits) */
14enum efx_board_type {
15 EFX_BOARD_SFE4001 = 1,
16 EFX_BOARD_SFE4002 = 2,
17 EFX_BOARD_SFN4111T = 0x51,
18 EFX_BOARD_SFN4112F = 0x52,
19};
20
21extern void efx_set_board_info(struct efx_nic *efx, u16 revision_info);
22
23/* SFE4001 (10GBASE-T) */
24extern int sfe4001_init(struct efx_nic *efx);
25/* SFN4111T (100/1000/10GBASE-T) */
26extern int sfn4111t_init(struct efx_nic *efx);
27
28#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index cc4b2f99989d..0d0243b7ac34 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -228,26 +228,20 @@ static int efx_poll(struct napi_struct *napi, int budget)
228 if (channel->used_flags & EFX_USED_BY_RX && 228 if (channel->used_flags & EFX_USED_BY_RX &&
229 efx->irq_rx_adaptive && 229 efx->irq_rx_adaptive &&
230 unlikely(++channel->irq_count == 1000)) { 230 unlikely(++channel->irq_count == 1000)) {
231 unsigned old_irq_moderation = channel->irq_moderation;
232
233 if (unlikely(channel->irq_mod_score < 231 if (unlikely(channel->irq_mod_score <
234 irq_adapt_low_thresh)) { 232 irq_adapt_low_thresh)) {
235 channel->irq_moderation = 233 if (channel->irq_moderation > 1) {
236 max_t(int, 234 channel->irq_moderation -= 1;
237 channel->irq_moderation - 235 falcon_set_int_moderation(channel);
238 FALCON_IRQ_MOD_RESOLUTION, 236 }
239 FALCON_IRQ_MOD_RESOLUTION);
240 } else if (unlikely(channel->irq_mod_score > 237 } else if (unlikely(channel->irq_mod_score >
241 irq_adapt_high_thresh)) { 238 irq_adapt_high_thresh)) {
242 channel->irq_moderation = 239 if (channel->irq_moderation <
243 min(channel->irq_moderation + 240 efx->irq_rx_moderation) {
244 FALCON_IRQ_MOD_RESOLUTION, 241 channel->irq_moderation += 1;
245 efx->irq_rx_moderation); 242 falcon_set_int_moderation(channel);
243 }
246 } 244 }
247
248 if (channel->irq_moderation != old_irq_moderation)
249 falcon_set_int_moderation(channel);
250
251 channel->irq_count = 0; 245 channel->irq_count = 0;
252 channel->irq_mod_score = 0; 246 channel->irq_mod_score = 0;
253 } 247 }
@@ -290,7 +284,7 @@ void efx_process_channel_now(struct efx_channel *channel)
290 napi_disable(&channel->napi_str); 284 napi_disable(&channel->napi_str);
291 285
292 /* Poll the channel */ 286 /* Poll the channel */
293 efx_process_channel(channel, efx->type->evq_size); 287 efx_process_channel(channel, EFX_EVQ_SIZE);
294 288
295 /* Ack the eventq. This may cause an interrupt to be generated 289 /* Ack the eventq. This may cause an interrupt to be generated
296 * when they are reenabled */ 290 * when they are reenabled */
@@ -824,9 +818,8 @@ static int efx_init_io(struct efx_nic *efx)
824 goto fail2; 818 goto fail2;
825 } 819 }
826 820
827 efx->membase_phys = pci_resource_start(efx->pci_dev, 821 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
828 efx->type->mem_bar); 822 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
829 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
830 if (rc) { 823 if (rc) {
831 EFX_ERR(efx, "request for memory BAR failed\n"); 824 EFX_ERR(efx, "request for memory BAR failed\n");
832 rc = -EIO; 825 rc = -EIO;
@@ -835,21 +828,20 @@ static int efx_init_io(struct efx_nic *efx)
835 efx->membase = ioremap_nocache(efx->membase_phys, 828 efx->membase = ioremap_nocache(efx->membase_phys,
836 efx->type->mem_map_size); 829 efx->type->mem_map_size);
837 if (!efx->membase) { 830 if (!efx->membase) {
838 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", 831 EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
839 efx->type->mem_bar,
840 (unsigned long long)efx->membase_phys, 832 (unsigned long long)efx->membase_phys,
841 efx->type->mem_map_size); 833 efx->type->mem_map_size);
842 rc = -ENOMEM; 834 rc = -ENOMEM;
843 goto fail4; 835 goto fail4;
844 } 836 }
845 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", 837 EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
846 efx->type->mem_bar, (unsigned long long)efx->membase_phys, 838 (unsigned long long)efx->membase_phys,
847 efx->type->mem_map_size, efx->membase); 839 efx->type->mem_map_size, efx->membase);
848 840
849 return 0; 841 return 0;
850 842
851 fail4: 843 fail4:
852 pci_release_region(efx->pci_dev, efx->type->mem_bar); 844 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
853 fail3: 845 fail3:
854 efx->membase_phys = 0; 846 efx->membase_phys = 0;
855 fail2: 847 fail2:
@@ -868,7 +860,7 @@ static void efx_fini_io(struct efx_nic *efx)
868 } 860 }
869 861
870 if (efx->membase_phys) { 862 if (efx->membase_phys) {
871 pci_release_region(efx->pci_dev, efx->type->mem_bar); 863 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
872 efx->membase_phys = 0; 864 efx->membase_phys = 0;
873 } 865 }
874 866
@@ -1220,22 +1212,33 @@ void efx_flush_queues(struct efx_nic *efx)
1220 * 1212 *
1221 **************************************************************************/ 1213 **************************************************************************/
1222 1214
1215static unsigned irq_mod_ticks(int usecs, int resolution)
1216{
1217 if (usecs <= 0)
1218 return 0; /* cannot receive interrupts ahead of time :-) */
1219 if (usecs < resolution)
1220 return 1; /* never round down to 0 */
1221 return usecs / resolution;
1222}
1223
1223/* Set interrupt moderation parameters */ 1224/* Set interrupt moderation parameters */
1224void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, 1225void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1225 bool rx_adaptive) 1226 bool rx_adaptive)
1226{ 1227{
1227 struct efx_tx_queue *tx_queue; 1228 struct efx_tx_queue *tx_queue;
1228 struct efx_rx_queue *rx_queue; 1229 struct efx_rx_queue *rx_queue;
1230 unsigned tx_ticks = irq_mod_ticks(tx_usecs, FALCON_IRQ_MOD_RESOLUTION);
1231 unsigned rx_ticks = irq_mod_ticks(rx_usecs, FALCON_IRQ_MOD_RESOLUTION);
1229 1232
1230 EFX_ASSERT_RESET_SERIALISED(efx); 1233 EFX_ASSERT_RESET_SERIALISED(efx);
1231 1234
1232 efx_for_each_tx_queue(tx_queue, efx) 1235 efx_for_each_tx_queue(tx_queue, efx)
1233 tx_queue->channel->irq_moderation = tx_usecs; 1236 tx_queue->channel->irq_moderation = tx_ticks;
1234 1237
1235 efx->irq_rx_adaptive = rx_adaptive; 1238 efx->irq_rx_adaptive = rx_adaptive;
1236 efx->irq_rx_moderation = rx_usecs; 1239 efx->irq_rx_moderation = rx_ticks;
1237 efx_for_each_rx_queue(rx_queue, efx) 1240 efx_for_each_rx_queue(rx_queue, efx)
1238 rx_queue->channel->irq_moderation = rx_usecs; 1241 rx_queue->channel->irq_moderation = rx_ticks;
1239} 1242}
1240 1243
1241/************************************************************************** 1244/**************************************************************************
@@ -1981,17 +1984,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1981 1984
1982 efx->type = type; 1985 efx->type = type;
1983 1986
1984 /* Sanity-check NIC type */
1985 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1986 (efx->type->txd_ring_mask + 1));
1987 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1988 (efx->type->rxd_ring_mask + 1));
1989 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1990 (efx->type->evq_size - 1));
1991 /* As close as we can get to guaranteeing that we don't overflow */ 1987 /* As close as we can get to guaranteeing that we don't overflow */
1992 EFX_BUG_ON_PARANOID(efx->type->evq_size < 1988 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
1993 (efx->type->txd_ring_mask + 1 + 1989
1994 efx->type->rxd_ring_mask + 1));
1995 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 1990 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1996 1991
1997 /* Higher numbered interrupt modes are less capable! */ 1992 /* Higher numbered interrupt modes are less capable! */
@@ -2027,18 +2022,12 @@ static void efx_fini_struct(struct efx_nic *efx)
2027 */ 2022 */
2028static void efx_pci_remove_main(struct efx_nic *efx) 2023static void efx_pci_remove_main(struct efx_nic *efx)
2029{ 2024{
2030 EFX_ASSERT_RESET_SERIALISED(efx); 2025 falcon_fini_interrupt(efx);
2031
2032 /* Skip everything if we never obtained a valid membase */
2033 if (!efx->membase)
2034 return;
2035
2036 efx_fini_channels(efx); 2026 efx_fini_channels(efx);
2037 efx_fini_port(efx); 2027 efx_fini_port(efx);
2038 2028
2039 /* Shutdown the board, then the NIC and board state */ 2029 /* Shutdown the board, then the NIC and board state */
2040 efx->board_info.fini(efx); 2030 efx->board_info.fini(efx);
2041 falcon_fini_interrupt(efx);
2042 2031
2043 efx_fini_napi(efx); 2032 efx_fini_napi(efx);
2044 efx_remove_all(efx); 2033 efx_remove_all(efx);
@@ -2063,9 +2052,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2063 /* Allow any queued efx_resets() to complete */ 2052 /* Allow any queued efx_resets() to complete */
2064 rtnl_unlock(); 2053 rtnl_unlock();
2065 2054
2066 if (efx->membase == NULL)
2067 goto out;
2068
2069 efx_unregister_netdev(efx); 2055 efx_unregister_netdev(efx);
2070 2056
2071 efx_mtd_remove(efx); 2057 efx_mtd_remove(efx);
@@ -2078,7 +2064,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2078 2064
2079 efx_pci_remove_main(efx); 2065 efx_pci_remove_main(efx);
2080 2066
2081out:
2082 efx_fini_io(efx); 2067 efx_fini_io(efx);
2083 EFX_LOG(efx, "shutdown successful\n"); 2068 EFX_LOG(efx, "shutdown successful\n");
2084 2069
@@ -2224,13 +2209,15 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2224 * MAC stats succeeds. */ 2209 * MAC stats succeeds. */
2225 efx->state = STATE_RUNNING; 2210 efx->state = STATE_RUNNING;
2226 2211
2227 efx_mtd_probe(efx); /* allowed to fail */
2228
2229 rc = efx_register_netdev(efx); 2212 rc = efx_register_netdev(efx);
2230 if (rc) 2213 if (rc)
2231 goto fail5; 2214 goto fail5;
2232 2215
2233 EFX_LOG(efx, "initialisation successful\n"); 2216 EFX_LOG(efx, "initialisation successful\n");
2217
2218 rtnl_lock();
2219 efx_mtd_probe(efx); /* allowed to fail */
2220 rtnl_unlock();
2234 return 0; 2221 return 0;
2235 2222
2236 fail5: 2223 fail5:
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index aecaf62f4929..179e0e3b0ec6 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -19,22 +19,31 @@
19#define FALCON_A_S_DEVID 0x6703 19#define FALCON_A_S_DEVID 0x6703
20#define FALCON_B_P_DEVID 0x0710 20#define FALCON_B_P_DEVID 0x0710
21 21
22/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
23#define EFX_MEM_BAR 2
24
22/* TX */ 25/* TX */
23extern netdev_tx_t efx_xmit(struct efx_nic *efx, 26extern netdev_tx_t efx_xmit(struct efx_nic *efx,
24 struct efx_tx_queue *tx_queue, 27 struct efx_tx_queue *tx_queue,
25 struct sk_buff *skb); 28 struct sk_buff *skb);
29extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
26extern void efx_stop_queue(struct efx_nic *efx); 30extern void efx_stop_queue(struct efx_nic *efx);
27extern void efx_wake_queue(struct efx_nic *efx); 31extern void efx_wake_queue(struct efx_nic *efx);
32#define EFX_TXQ_SIZE 1024
33#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
28 34
29/* RX */ 35/* RX */
30extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
31extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 36extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
32 unsigned int len, bool checksummed, bool discard); 37 unsigned int len, bool checksummed, bool discard);
33extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); 38extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
39#define EFX_RXQ_SIZE 1024
40#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
34 41
35/* Channels */ 42/* Channels */
36extern void efx_process_channel_now(struct efx_channel *channel); 43extern void efx_process_channel_now(struct efx_channel *channel);
37extern void efx_flush_queues(struct efx_nic *efx); 44extern void efx_flush_queues(struct efx_nic *efx);
45#define EFX_EVQ_SIZE 4096
46#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
38 47
39/* Ports */ 48/* Ports */
40extern void efx_stats_disable(struct efx_nic *efx); 49extern void efx_stats_disable(struct efx_nic *efx);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 45018f283ffa..a313b61c8ff4 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -618,6 +618,9 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
618 coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; 618 coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive;
619 coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; 619 coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation;
620 620
621 coalesce->tx_coalesce_usecs_irq *= FALCON_IRQ_MOD_RESOLUTION;
622 coalesce->rx_coalesce_usecs_irq *= FALCON_IRQ_MOD_RESOLUTION;
623
621 return 0; 624 return 0;
622} 625}
623 626
@@ -656,11 +659,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
656 } 659 }
657 660
658 efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive); 661 efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive);
659
660 /* Reset channel to pick up new moderation value. Note that
661 * this may change the value of the irq_moderation field
662 * (e.g. to allow for hardware timer granularity).
663 */
664 efx_for_each_channel(channel, efx) 662 efx_for_each_channel(channel, efx)
665 falcon_set_int_moderation(channel); 663 falcon_set_int_moderation(channel);
666 664
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index c049364aec46..865638b035bf 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -22,11 +22,10 @@
22#include "mac.h" 22#include "mac.h"
23#include "spi.h" 23#include "spi.h"
24#include "falcon.h" 24#include "falcon.h"
25#include "falcon_hwdefs.h" 25#include "regs.h"
26#include "falcon_io.h" 26#include "io.h"
27#include "mdio_10g.h" 27#include "mdio_10g.h"
28#include "phy.h" 28#include "phy.h"
29#include "boards.h"
30#include "workarounds.h" 29#include "workarounds.h"
31 30
32/* Falcon hardware control. 31/* Falcon hardware control.
@@ -36,19 +35,12 @@
36 35
37/** 36/**
38 * struct falcon_nic_data - Falcon NIC state 37 * struct falcon_nic_data - Falcon NIC state
39 * @next_buffer_table: First available buffer table id
40 * @pci_dev2: The secondary PCI device if present 38 * @pci_dev2: The secondary PCI device if present
41 * @i2c_data: Operations and state for I2C bit-bashing algorithm 39 * @i2c_data: Operations and state for I2C bit-bashing algorithm
42 * @int_error_count: Number of internal errors seen recently
43 * @int_error_expire: Time at which error count will be expired
44 */ 40 */
45struct falcon_nic_data { 41struct falcon_nic_data {
46 unsigned next_buffer_table;
47 struct pci_dev *pci_dev2; 42 struct pci_dev *pci_dev2;
48 struct i2c_algo_bit_data i2c_data; 43 struct i2c_algo_bit_data i2c_data;
49
50 unsigned int_error_count;
51 unsigned long int_error_expire;
52}; 44};
53 45
54/************************************************************************** 46/**************************************************************************
@@ -109,21 +101,6 @@ static int rx_xon_thresh_bytes = -1;
109module_param(rx_xon_thresh_bytes, int, 0644); 101module_param(rx_xon_thresh_bytes, int, 0644);
110MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); 102MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
111 103
112/* TX descriptor ring size - min 512 max 4k */
113#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
114#define FALCON_TXD_RING_SIZE 1024
115#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
116
117/* RX descriptor ring size - min 512 max 4k */
118#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
119#define FALCON_RXD_RING_SIZE 1024
120#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
121
122/* Event queue size - max 32k */
123#define FALCON_EVQ_ORDER EVQ_SIZE_4K
124#define FALCON_EVQ_SIZE 4096
125#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
126
127/* If FALCON_MAX_INT_ERRORS internal errors occur within 104/* If FALCON_MAX_INT_ERRORS internal errors occur within
128 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 105 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
129 * disable it. 106 * disable it.
@@ -143,12 +120,6 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
143 ************************************************************************** 120 **************************************************************************
144 */ 121 */
145 122
146/* DMA address mask */
147#define FALCON_DMA_MASK DMA_BIT_MASK(46)
148
149/* TX DMA length mask (13-bit) */
150#define FALCON_TX_DMA_MASK (4096 - 1)
151
152/* Size and alignment of special buffers (4KB) */ 123/* Size and alignment of special buffers (4KB) */
153#define FALCON_BUF_SIZE 4096 124#define FALCON_BUF_SIZE 4096
154 125
@@ -164,6 +135,13 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
164 * 135 *
165 **************************************************************************/ 136 **************************************************************************/
166 137
138static inline void falcon_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
139 unsigned int index)
140{
141 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
142 value, index);
143}
144
167/* Read the current event from the event queue */ 145/* Read the current event from the event queue */
168static inline efx_qword_t *falcon_event(struct efx_channel *channel, 146static inline efx_qword_t *falcon_event(struct efx_channel *channel,
169 unsigned int index) 147 unsigned int index)
@@ -200,9 +178,9 @@ static void falcon_setsda(void *data, int state)
200 struct efx_nic *efx = (struct efx_nic *)data; 178 struct efx_nic *efx = (struct efx_nic *)data;
201 efx_oword_t reg; 179 efx_oword_t reg;
202 180
203 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 181 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
204 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state); 182 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
205 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 183 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
206} 184}
207 185
208static void falcon_setscl(void *data, int state) 186static void falcon_setscl(void *data, int state)
@@ -210,9 +188,9 @@ static void falcon_setscl(void *data, int state)
210 struct efx_nic *efx = (struct efx_nic *)data; 188 struct efx_nic *efx = (struct efx_nic *)data;
211 efx_oword_t reg; 189 efx_oword_t reg;
212 190
213 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 191 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
214 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state); 192 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
215 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 193 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
216} 194}
217 195
218static int falcon_getsda(void *data) 196static int falcon_getsda(void *data)
@@ -220,8 +198,8 @@ static int falcon_getsda(void *data)
220 struct efx_nic *efx = (struct efx_nic *)data; 198 struct efx_nic *efx = (struct efx_nic *)data;
221 efx_oword_t reg; 199 efx_oword_t reg;
222 200
223 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 201 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
224 return EFX_OWORD_FIELD(reg, GPIO3_IN); 202 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
225} 203}
226 204
227static int falcon_getscl(void *data) 205static int falcon_getscl(void *data)
@@ -229,8 +207,8 @@ static int falcon_getscl(void *data)
229 struct efx_nic *efx = (struct efx_nic *)data; 207 struct efx_nic *efx = (struct efx_nic *)data;
230 efx_oword_t reg; 208 efx_oword_t reg;
231 209
232 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 210 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
233 return EFX_OWORD_FIELD(reg, GPIO0_IN); 211 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
234} 212}
235 213
236static struct i2c_algo_bit_data falcon_i2c_bit_operations = { 214static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
@@ -275,12 +253,11 @@ falcon_init_special_buffer(struct efx_nic *efx,
275 dma_addr = buffer->dma_addr + (i * 4096); 253 dma_addr = buffer->dma_addr + (i * 4096);
276 EFX_LOG(efx, "mapping special buffer %d at %llx\n", 254 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
277 index, (unsigned long long)dma_addr); 255 index, (unsigned long long)dma_addr);
278 EFX_POPULATE_QWORD_4(buf_desc, 256 EFX_POPULATE_QWORD_3(buf_desc,
279 IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K, 257 FRF_AZ_BUF_ADR_REGION, 0,
280 BUF_ADR_REGION, 0, 258 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
281 BUF_ADR_FBUF, (dma_addr >> 12), 259 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
282 BUF_OWNER_ID_FBUF, 0); 260 falcon_write_buf_tbl(efx, &buf_desc, index);
283 falcon_write_sram(efx, &buf_desc, index);
284 } 261 }
285} 262}
286 263
@@ -300,11 +277,11 @@ falcon_fini_special_buffer(struct efx_nic *efx,
300 buffer->index, buffer->index + buffer->entries - 1); 277 buffer->index, buffer->index + buffer->entries - 1);
301 278
302 EFX_POPULATE_OWORD_4(buf_tbl_upd, 279 EFX_POPULATE_OWORD_4(buf_tbl_upd,
303 BUF_UPD_CMD, 0, 280 FRF_AZ_BUF_UPD_CMD, 0,
304 BUF_CLR_CMD, 1, 281 FRF_AZ_BUF_CLR_CMD, 1,
305 BUF_CLR_END_ID, end, 282 FRF_AZ_BUF_CLR_END_ID, end,
306 BUF_CLR_START_ID, start); 283 FRF_AZ_BUF_CLR_START_ID, start);
307 falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER); 284 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
308} 285}
309 286
310/* 287/*
@@ -320,8 +297,6 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
320 struct efx_special_buffer *buffer, 297 struct efx_special_buffer *buffer,
321 unsigned int len) 298 unsigned int len)
322{ 299{
323 struct falcon_nic_data *nic_data = efx->nic_data;
324
325 len = ALIGN(len, FALCON_BUF_SIZE); 300 len = ALIGN(len, FALCON_BUF_SIZE);
326 301
327 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 302 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
@@ -336,8 +311,8 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
336 memset(buffer->addr, 0xff, len); 311 memset(buffer->addr, 0xff, len);
337 312
338 /* Select new buffer ID */ 313 /* Select new buffer ID */
339 buffer->index = nic_data->next_buffer_table; 314 buffer->index = efx->next_buffer_table;
340 nic_data->next_buffer_table += buffer->entries; 315 efx->next_buffer_table += buffer->entries;
341 316
342 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " 317 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
343 "(virt %p phys %llx)\n", buffer->index, 318 "(virt %p phys %llx)\n", buffer->index,
@@ -415,10 +390,10 @@ static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
415 unsigned write_ptr; 390 unsigned write_ptr;
416 efx_dword_t reg; 391 efx_dword_t reg;
417 392
418 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; 393 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
419 EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr); 394 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
420 falcon_writel_page(tx_queue->efx, &reg, 395 efx_writed_page(tx_queue->efx, &reg,
421 TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue); 396 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
422} 397}
423 398
424 399
@@ -436,18 +411,17 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
436 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 411 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
437 412
438 do { 413 do {
439 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; 414 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
440 buffer = &tx_queue->buffer[write_ptr]; 415 buffer = &tx_queue->buffer[write_ptr];
441 txd = falcon_tx_desc(tx_queue, write_ptr); 416 txd = falcon_tx_desc(tx_queue, write_ptr);
442 ++tx_queue->write_count; 417 ++tx_queue->write_count;
443 418
444 /* Create TX descriptor ring entry */ 419 /* Create TX descriptor ring entry */
445 EFX_POPULATE_QWORD_5(*txd, 420 EFX_POPULATE_QWORD_4(*txd,
446 TX_KER_PORT, 0, 421 FSF_AZ_TX_KER_CONT, buffer->continuation,
447 TX_KER_CONT, buffer->continuation, 422 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
448 TX_KER_BYTE_CNT, buffer->len, 423 FSF_AZ_TX_KER_BUF_REGION, 0,
449 TX_KER_BUF_REGION, 0, 424 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
450 TX_KER_BUF_ADR, buffer->dma_addr);
451 } while (tx_queue->write_count != tx_queue->insert_count); 425 } while (tx_queue->write_count != tx_queue->insert_count);
452 426
453 wmb(); /* Ensure descriptors are written before they are fetched */ 427 wmb(); /* Ensure descriptors are written before they are fetched */
@@ -458,9 +432,10 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
458int falcon_probe_tx(struct efx_tx_queue *tx_queue) 432int falcon_probe_tx(struct efx_tx_queue *tx_queue)
459{ 433{
460 struct efx_nic *efx = tx_queue->efx; 434 struct efx_nic *efx = tx_queue->efx;
435 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
436 EFX_TXQ_SIZE & EFX_TXQ_MASK);
461 return falcon_alloc_special_buffer(efx, &tx_queue->txd, 437 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
462 FALCON_TXD_RING_SIZE * 438 EFX_TXQ_SIZE * sizeof(efx_qword_t));
463 sizeof(efx_qword_t));
464} 439}
465 440
466void falcon_init_tx(struct efx_tx_queue *tx_queue) 441void falcon_init_tx(struct efx_tx_queue *tx_queue)
@@ -475,25 +450,28 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
475 450
476 /* Push TX descriptor ring to card */ 451 /* Push TX descriptor ring to card */
477 EFX_POPULATE_OWORD_10(tx_desc_ptr, 452 EFX_POPULATE_OWORD_10(tx_desc_ptr,
478 TX_DESCQ_EN, 1, 453 FRF_AZ_TX_DESCQ_EN, 1,
479 TX_ISCSI_DDIG_EN, 0, 454 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
480 TX_ISCSI_HDIG_EN, 0, 455 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
481 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 456 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
482 TX_DESCQ_EVQ_ID, tx_queue->channel->channel, 457 FRF_AZ_TX_DESCQ_EVQ_ID,
483 TX_DESCQ_OWNER_ID, 0, 458 tx_queue->channel->channel,
484 TX_DESCQ_LABEL, tx_queue->queue, 459 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
485 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, 460 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
486 TX_DESCQ_TYPE, 0, 461 FRF_AZ_TX_DESCQ_SIZE,
487 TX_NON_IP_DROP_DIS_B0, 1); 462 __ffs(tx_queue->txd.entries),
463 FRF_AZ_TX_DESCQ_TYPE, 0,
464 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
488 465
489 if (falcon_rev(efx) >= FALCON_REV_B0) { 466 if (falcon_rev(efx) >= FALCON_REV_B0) {
490 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; 467 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
491 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum); 468 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
492 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum); 469 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
470 !csum);
493 } 471 }
494 472
495 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 473 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
496 tx_queue->queue); 474 tx_queue->queue);
497 475
498 if (falcon_rev(efx) < FALCON_REV_B0) { 476 if (falcon_rev(efx) < FALCON_REV_B0) {
499 efx_oword_t reg; 477 efx_oword_t reg;
@@ -501,12 +479,12 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
501 /* Only 128 bits in this register */ 479 /* Only 128 bits in this register */
502 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); 480 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
503 481
504 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 482 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
505 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) 483 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
506 clear_bit_le(tx_queue->queue, (void *)&reg); 484 clear_bit_le(tx_queue->queue, (void *)&reg);
507 else 485 else
508 set_bit_le(tx_queue->queue, (void *)&reg); 486 set_bit_le(tx_queue->queue, (void *)&reg);
509 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 487 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
510 } 488 }
511} 489}
512 490
@@ -517,9 +495,9 @@ static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
517 495
518 /* Post a flush command */ 496 /* Post a flush command */
519 EFX_POPULATE_OWORD_2(tx_flush_descq, 497 EFX_POPULATE_OWORD_2(tx_flush_descq,
520 TX_FLUSH_DESCQ_CMD, 1, 498 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
521 TX_FLUSH_DESCQ, tx_queue->queue); 499 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
522 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); 500 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
523} 501}
524 502
525void falcon_fini_tx(struct efx_tx_queue *tx_queue) 503void falcon_fini_tx(struct efx_tx_queue *tx_queue)
@@ -532,8 +510,8 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue)
532 510
533 /* Remove TX descriptor ring from card */ 511 /* Remove TX descriptor ring from card */
534 EFX_ZERO_OWORD(tx_desc_ptr); 512 EFX_ZERO_OWORD(tx_desc_ptr);
535 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 513 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
536 tx_queue->queue); 514 tx_queue->queue);
537 515
538 /* Unpin TX descriptor ring */ 516 /* Unpin TX descriptor ring */
539 falcon_fini_special_buffer(efx, &tx_queue->txd); 517 falcon_fini_special_buffer(efx, &tx_queue->txd);
@@ -568,11 +546,11 @@ static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
568 rxd = falcon_rx_desc(rx_queue, index); 546 rxd = falcon_rx_desc(rx_queue, index);
569 rx_buf = efx_rx_buffer(rx_queue, index); 547 rx_buf = efx_rx_buffer(rx_queue, index);
570 EFX_POPULATE_QWORD_3(*rxd, 548 EFX_POPULATE_QWORD_3(*rxd,
571 RX_KER_BUF_SIZE, 549 FSF_AZ_RX_KER_BUF_SIZE,
572 rx_buf->len - 550 rx_buf->len -
573 rx_queue->efx->type->rx_buffer_padding, 551 rx_queue->efx->type->rx_buffer_padding,
574 RX_KER_BUF_REGION, 0, 552 FSF_AZ_RX_KER_BUF_REGION, 0,
575 RX_KER_BUF_ADR, rx_buf->dma_addr); 553 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
576} 554}
577 555
578/* This writes to the RX_DESC_WPTR register for the specified receive 556/* This writes to the RX_DESC_WPTR register for the specified receive
@@ -586,23 +564,24 @@ void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
586 while (rx_queue->notified_count != rx_queue->added_count) { 564 while (rx_queue->notified_count != rx_queue->added_count) {
587 falcon_build_rx_desc(rx_queue, 565 falcon_build_rx_desc(rx_queue,
588 rx_queue->notified_count & 566 rx_queue->notified_count &
589 FALCON_RXD_RING_MASK); 567 EFX_RXQ_MASK);
590 ++rx_queue->notified_count; 568 ++rx_queue->notified_count;
591 } 569 }
592 570
593 wmb(); 571 wmb();
594 write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK; 572 write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
595 EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr); 573 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
596 falcon_writel_page(rx_queue->efx, &reg, 574 efx_writed_page(rx_queue->efx, &reg,
597 RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue); 575 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
598} 576}
599 577
600int falcon_probe_rx(struct efx_rx_queue *rx_queue) 578int falcon_probe_rx(struct efx_rx_queue *rx_queue)
601{ 579{
602 struct efx_nic *efx = rx_queue->efx; 580 struct efx_nic *efx = rx_queue->efx;
581 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
582 EFX_RXQ_SIZE & EFX_RXQ_MASK);
603 return falcon_alloc_special_buffer(efx, &rx_queue->rxd, 583 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
604 FALCON_RXD_RING_SIZE * 584 EFX_RXQ_SIZE * sizeof(efx_qword_t));
605 sizeof(efx_qword_t));
606} 585}
607 586
608void falcon_init_rx(struct efx_rx_queue *rx_queue) 587void falcon_init_rx(struct efx_rx_queue *rx_queue)
@@ -623,19 +602,21 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue)
623 602
624 /* Push RX descriptor ring to card */ 603 /* Push RX descriptor ring to card */
625 EFX_POPULATE_OWORD_10(rx_desc_ptr, 604 EFX_POPULATE_OWORD_10(rx_desc_ptr,
626 RX_ISCSI_DDIG_EN, iscsi_digest_en, 605 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
627 RX_ISCSI_HDIG_EN, iscsi_digest_en, 606 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
628 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 607 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
629 RX_DESCQ_EVQ_ID, rx_queue->channel->channel, 608 FRF_AZ_RX_DESCQ_EVQ_ID,
630 RX_DESCQ_OWNER_ID, 0, 609 rx_queue->channel->channel,
631 RX_DESCQ_LABEL, rx_queue->queue, 610 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
632 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, 611 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
633 RX_DESCQ_TYPE, 0 /* kernel queue */ , 612 FRF_AZ_RX_DESCQ_SIZE,
613 __ffs(rx_queue->rxd.entries),
614 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
634 /* For >=B0 this is scatter so disable */ 615 /* For >=B0 this is scatter so disable */
635 RX_DESCQ_JUMBO, !is_b0, 616 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
636 RX_DESCQ_EN, 1); 617 FRF_AZ_RX_DESCQ_EN, 1);
637 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 618 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
638 rx_queue->queue); 619 rx_queue->queue);
639} 620}
640 621
641static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) 622static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
@@ -645,9 +626,9 @@ static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
645 626
646 /* Post a flush command */ 627 /* Post a flush command */
647 EFX_POPULATE_OWORD_2(rx_flush_descq, 628 EFX_POPULATE_OWORD_2(rx_flush_descq,
648 RX_FLUSH_DESCQ_CMD, 1, 629 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
649 RX_FLUSH_DESCQ, rx_queue->queue); 630 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
650 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); 631 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
651} 632}
652 633
653void falcon_fini_rx(struct efx_rx_queue *rx_queue) 634void falcon_fini_rx(struct efx_rx_queue *rx_queue)
@@ -660,8 +641,8 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue)
660 641
661 /* Remove RX descriptor ring from card */ 642 /* Remove RX descriptor ring from card */
662 EFX_ZERO_OWORD(rx_desc_ptr); 643 EFX_ZERO_OWORD(rx_desc_ptr);
663 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 644 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
664 rx_queue->queue); 645 rx_queue->queue);
665 646
666 /* Unpin RX descriptor ring */ 647 /* Unpin RX descriptor ring */
667 falcon_fini_special_buffer(efx, &rx_queue->rxd); 648 falcon_fini_special_buffer(efx, &rx_queue->rxd);
@@ -694,8 +675,8 @@ void falcon_eventq_read_ack(struct efx_channel *channel)
694 efx_dword_t reg; 675 efx_dword_t reg;
695 struct efx_nic *efx = channel->efx; 676 struct efx_nic *efx = channel->efx;
696 677
697 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr); 678 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
698 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base, 679 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
699 channel->channel); 680 channel->channel);
700} 681}
701 682
@@ -704,11 +685,14 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
704{ 685{
705 efx_oword_t drv_ev_reg; 686 efx_oword_t drv_ev_reg;
706 687
707 EFX_POPULATE_OWORD_2(drv_ev_reg, 688 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
708 DRV_EV_QID, channel->channel, 689 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
709 DRV_EV_DATA, 690 drv_ev_reg.u32[0] = event->u32[0];
710 EFX_QWORD_FIELD64(*event, WHOLE_EVENT)); 691 drv_ev_reg.u32[1] = event->u32[1];
711 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER); 692 drv_ev_reg.u32[2] = 0;
693 drv_ev_reg.u32[3] = 0;
694 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
695 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
712} 696}
713 697
714/* Handle a transmit completion event 698/* Handle a transmit completion event
@@ -724,18 +708,18 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
724 struct efx_tx_queue *tx_queue; 708 struct efx_tx_queue *tx_queue;
725 struct efx_nic *efx = channel->efx; 709 struct efx_nic *efx = channel->efx;
726 710
727 if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) { 711 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
728 /* Transmit completion */ 712 /* Transmit completion */
729 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR); 713 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
730 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 714 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
731 tx_queue = &efx->tx_queue[tx_ev_q_label]; 715 tx_queue = &efx->tx_queue[tx_ev_q_label];
732 channel->irq_mod_score += 716 channel->irq_mod_score +=
733 (tx_ev_desc_ptr - tx_queue->read_count) & 717 (tx_ev_desc_ptr - tx_queue->read_count) &
734 efx->type->txd_ring_mask; 718 EFX_TXQ_MASK;
735 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 719 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
736 } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) { 720 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
737 /* Rewrite the FIFO write pointer */ 721 /* Rewrite the FIFO write pointer */
738 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 722 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
739 tx_queue = &efx->tx_queue[tx_ev_q_label]; 723 tx_queue = &efx->tx_queue[tx_ev_q_label];
740 724
741 if (efx_dev_registered(efx)) 725 if (efx_dev_registered(efx))
@@ -743,7 +727,7 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
743 falcon_notify_tx_desc(tx_queue); 727 falcon_notify_tx_desc(tx_queue);
744 if (efx_dev_registered(efx)) 728 if (efx_dev_registered(efx))
745 netif_tx_unlock(efx->net_dev); 729 netif_tx_unlock(efx->net_dev);
746 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && 730 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
747 EFX_WORKAROUND_10727(efx)) { 731 EFX_WORKAROUND_10727(efx)) {
748 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 732 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
749 } else { 733 } else {
@@ -767,22 +751,22 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
767 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; 751 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
768 unsigned rx_ev_pkt_type; 752 unsigned rx_ev_pkt_type;
769 753
770 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); 754 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
771 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); 755 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
772 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC); 756 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
773 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE); 757 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
774 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 758 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
775 RX_EV_BUF_OWNER_ID_ERR); 759 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
776 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR); 760 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_IP_FRAG_ERR);
777 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 761 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
778 RX_EV_IP_HDR_CHKSUM_ERR); 762 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
779 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 763 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
780 RX_EV_TCP_UDP_CHKSUM_ERR); 764 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
781 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); 765 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
782 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); 766 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
783 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? 767 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
784 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 768 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
785 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); 769 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
786 770
787 /* Every error apart from tobe_disc and pause_frm */ 771 /* Every error apart from tobe_disc and pause_frm */
788 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 772 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
@@ -838,9 +822,8 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
838 struct efx_nic *efx = rx_queue->efx; 822 struct efx_nic *efx = rx_queue->efx;
839 unsigned expected, dropped; 823 unsigned expected, dropped;
840 824
841 expected = rx_queue->removed_count & FALCON_RXD_RING_MASK; 825 expected = rx_queue->removed_count & EFX_RXQ_MASK;
842 dropped = ((index + FALCON_RXD_RING_SIZE - expected) & 826 dropped = (index - expected) & EFX_RXQ_MASK;
843 FALCON_RXD_RING_MASK);
844 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", 827 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
845 dropped, index, expected); 828 dropped, index, expected);
846 829
@@ -866,17 +849,18 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
866 struct efx_nic *efx = channel->efx; 849 struct efx_nic *efx = channel->efx;
867 850
868 /* Basic packet information */ 851 /* Basic packet information */
869 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT); 852 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
870 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK); 853 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
871 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); 854 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
872 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT)); 855 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
873 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1); 856 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
874 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel); 857 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
858 channel->channel);
875 859
876 rx_queue = &efx->rx_queue[channel->channel]; 860 rx_queue = &efx->rx_queue[channel->channel];
877 861
878 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); 862 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
879 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; 863 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
880 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 864 if (unlikely(rx_ev_desc_ptr != expected_ptr))
881 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 865 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
882 866
@@ -884,7 +868,10 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
884 /* If packet is marked as OK and packet type is TCP/IPv4 or 868 /* If packet is marked as OK and packet type is TCP/IPv4 or
885 * UDP/IPv4, then we can rely on the hardware checksum. 869 * UDP/IPv4, then we can rely on the hardware checksum.
886 */ 870 */
887 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type); 871 checksummed =
872 efx->rx_checksum_enabled &&
873 (rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP ||
874 rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP);
888 } else { 875 } else {
889 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, 876 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
890 &discard); 877 &discard);
@@ -892,10 +879,10 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
892 } 879 }
893 880
894 /* Detect multicast packets that didn't match the filter */ 881 /* Detect multicast packets that didn't match the filter */
895 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); 882 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
896 if (rx_ev_mcast_pkt) { 883 if (rx_ev_mcast_pkt) {
897 unsigned int rx_ev_mcast_hash_match = 884 unsigned int rx_ev_mcast_hash_match =
898 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH); 885 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
899 886
900 if (unlikely(!rx_ev_mcast_hash_match)) 887 if (unlikely(!rx_ev_mcast_hash_match))
901 discard = true; 888 discard = true;
@@ -915,22 +902,23 @@ static void falcon_handle_global_event(struct efx_channel *channel,
915 struct efx_nic *efx = channel->efx; 902 struct efx_nic *efx = channel->efx;
916 bool handled = false; 903 bool handled = false;
917 904
918 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || 905 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
919 EFX_QWORD_FIELD(*event, G_PHY1_INTR) || 906 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
920 EFX_QWORD_FIELD(*event, XG_PHY_INTR) || 907 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
921 EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) {
922 efx->phy_op->clear_interrupt(efx); 908 efx->phy_op->clear_interrupt(efx);
923 queue_work(efx->workqueue, &efx->phy_work); 909 queue_work(efx->workqueue, &efx->phy_work);
924 handled = true; 910 handled = true;
925 } 911 }
926 912
927 if ((falcon_rev(efx) >= FALCON_REV_B0) && 913 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
928 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) { 914 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
929 queue_work(efx->workqueue, &efx->mac_work); 915 queue_work(efx->workqueue, &efx->mac_work);
930 handled = true; 916 handled = true;
931 } 917 }
932 918
933 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) { 919 if (falcon_rev(efx) <= FALCON_REV_A1 ?
920 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
921 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
934 EFX_ERR(efx, "channel %d seen global RX_RESET " 922 EFX_ERR(efx, "channel %d seen global RX_RESET "
935 "event. Resetting.\n", channel->channel); 923 "event. Resetting.\n", channel->channel);
936 924
@@ -953,35 +941,35 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
953 unsigned int ev_sub_code; 941 unsigned int ev_sub_code;
954 unsigned int ev_sub_data; 942 unsigned int ev_sub_data;
955 943
956 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); 944 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
957 ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA); 945 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
958 946
959 switch (ev_sub_code) { 947 switch (ev_sub_code) {
960 case TX_DESCQ_FLS_DONE_EV_DECODE: 948 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
961 EFX_TRACE(efx, "channel %d TXQ %d flushed\n", 949 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
962 channel->channel, ev_sub_data); 950 channel->channel, ev_sub_data);
963 break; 951 break;
964 case RX_DESCQ_FLS_DONE_EV_DECODE: 952 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
965 EFX_TRACE(efx, "channel %d RXQ %d flushed\n", 953 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
966 channel->channel, ev_sub_data); 954 channel->channel, ev_sub_data);
967 break; 955 break;
968 case EVQ_INIT_DONE_EV_DECODE: 956 case FSE_AZ_EVQ_INIT_DONE_EV:
969 EFX_LOG(efx, "channel %d EVQ %d initialised\n", 957 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
970 channel->channel, ev_sub_data); 958 channel->channel, ev_sub_data);
971 break; 959 break;
972 case SRM_UPD_DONE_EV_DECODE: 960 case FSE_AZ_SRM_UPD_DONE_EV:
973 EFX_TRACE(efx, "channel %d SRAM update done\n", 961 EFX_TRACE(efx, "channel %d SRAM update done\n",
974 channel->channel); 962 channel->channel);
975 break; 963 break;
976 case WAKE_UP_EV_DECODE: 964 case FSE_AZ_WAKE_UP_EV:
977 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", 965 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
978 channel->channel, ev_sub_data); 966 channel->channel, ev_sub_data);
979 break; 967 break;
980 case TIMER_EV_DECODE: 968 case FSE_AZ_TIMER_EV:
981 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", 969 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
982 channel->channel, ev_sub_data); 970 channel->channel, ev_sub_data);
983 break; 971 break;
984 case RX_RECOVERY_EV_DECODE: 972 case FSE_AA_RX_RECOVER_EV:
985 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " 973 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
986 "Resetting.\n", channel->channel); 974 "Resetting.\n", channel->channel);
987 atomic_inc(&efx->rx_reset); 975 atomic_inc(&efx->rx_reset);
@@ -990,12 +978,12 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
990 RESET_TYPE_RX_RECOVERY : 978 RESET_TYPE_RX_RECOVERY :
991 RESET_TYPE_DISABLE); 979 RESET_TYPE_DISABLE);
992 break; 980 break;
993 case RX_DSC_ERROR_EV_DECODE: 981 case FSE_BZ_RX_DSC_ERROR_EV:
994 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." 982 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
995 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 983 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
996 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 984 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
997 break; 985 break;
998 case TX_DSC_ERROR_EV_DECODE: 986 case FSE_BZ_TX_DSC_ERROR_EV:
999 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." 987 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
1000 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 988 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1001 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 989 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
@@ -1031,27 +1019,27 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1031 /* Clear this event by marking it all ones */ 1019 /* Clear this event by marking it all ones */
1032 EFX_SET_QWORD(*p_event); 1020 EFX_SET_QWORD(*p_event);
1033 1021
1034 ev_code = EFX_QWORD_FIELD(event, EV_CODE); 1022 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1035 1023
1036 switch (ev_code) { 1024 switch (ev_code) {
1037 case RX_IP_EV_DECODE: 1025 case FSE_AZ_EV_CODE_RX_EV:
1038 falcon_handle_rx_event(channel, &event); 1026 falcon_handle_rx_event(channel, &event);
1039 ++rx_packets; 1027 ++rx_packets;
1040 break; 1028 break;
1041 case TX_IP_EV_DECODE: 1029 case FSE_AZ_EV_CODE_TX_EV:
1042 falcon_handle_tx_event(channel, &event); 1030 falcon_handle_tx_event(channel, &event);
1043 break; 1031 break;
1044 case DRV_GEN_EV_DECODE: 1032 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1045 channel->eventq_magic 1033 channel->eventq_magic = EFX_QWORD_FIELD(
1046 = EFX_QWORD_FIELD(event, EVQ_MAGIC); 1034 event, FSF_AZ_DRV_GEN_EV_MAGIC);
1047 EFX_LOG(channel->efx, "channel %d received generated " 1035 EFX_LOG(channel->efx, "channel %d received generated "
1048 "event "EFX_QWORD_FMT"\n", channel->channel, 1036 "event "EFX_QWORD_FMT"\n", channel->channel,
1049 EFX_QWORD_VAL(event)); 1037 EFX_QWORD_VAL(event));
1050 break; 1038 break;
1051 case GLOBAL_EV_DECODE: 1039 case FSE_AZ_EV_CODE_GLOBAL_EV:
1052 falcon_handle_global_event(channel, &event); 1040 falcon_handle_global_event(channel, &event);
1053 break; 1041 break;
1054 case DRIVER_EV_DECODE: 1042 case FSE_AZ_EV_CODE_DRIVER_EV:
1055 falcon_handle_driver_event(channel, &event); 1043 falcon_handle_driver_event(channel, &event);
1056 break; 1044 break;
1057 default: 1045 default:
@@ -1061,7 +1049,7 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1061 } 1049 }
1062 1050
1063 /* Increment read pointer */ 1051 /* Increment read pointer */
1064 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1052 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1065 1053
1066 } while (rx_packets < rx_quota); 1054 } while (rx_packets < rx_quota);
1067 1055
@@ -1076,26 +1064,20 @@ void falcon_set_int_moderation(struct efx_channel *channel)
1076 1064
1077 /* Set timer register */ 1065 /* Set timer register */
1078 if (channel->irq_moderation) { 1066 if (channel->irq_moderation) {
1079 /* Round to resolution supported by hardware. The value we
1080 * program is based at 0. So actual interrupt moderation
1081 * achieved is ((x + 1) * res).
1082 */
1083 channel->irq_moderation -= (channel->irq_moderation %
1084 FALCON_IRQ_MOD_RESOLUTION);
1085 if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION)
1086 channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION;
1087 EFX_POPULATE_DWORD_2(timer_cmd, 1067 EFX_POPULATE_DWORD_2(timer_cmd,
1088 TIMER_MODE, TIMER_MODE_INT_HLDOFF, 1068 FRF_AB_TC_TIMER_MODE,
1089 TIMER_VAL, 1069 FFE_BB_TIMER_MODE_INT_HLDOFF,
1090 channel->irq_moderation / 1070 FRF_AB_TC_TIMER_VAL,
1091 FALCON_IRQ_MOD_RESOLUTION - 1); 1071 channel->irq_moderation - 1);
1092 } else { 1072 } else {
1093 EFX_POPULATE_DWORD_2(timer_cmd, 1073 EFX_POPULATE_DWORD_2(timer_cmd,
1094 TIMER_MODE, TIMER_MODE_DIS, 1074 FRF_AB_TC_TIMER_MODE,
1095 TIMER_VAL, 0); 1075 FFE_BB_TIMER_MODE_DIS,
1076 FRF_AB_TC_TIMER_VAL, 0);
1096 } 1077 }
1097 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, 1078 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
1098 channel->channel); 1079 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
1080 channel->channel);
1099 1081
1100} 1082}
1101 1083
@@ -1103,10 +1085,10 @@ void falcon_set_int_moderation(struct efx_channel *channel)
1103int falcon_probe_eventq(struct efx_channel *channel) 1085int falcon_probe_eventq(struct efx_channel *channel)
1104{ 1086{
1105 struct efx_nic *efx = channel->efx; 1087 struct efx_nic *efx = channel->efx;
1106 unsigned int evq_size; 1088 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
1107 1089 EFX_EVQ_SIZE & EFX_EVQ_MASK);
1108 evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t); 1090 return falcon_alloc_special_buffer(efx, &channel->eventq,
1109 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); 1091 EFX_EVQ_SIZE * sizeof(efx_qword_t));
1110} 1092}
1111 1093
1112void falcon_init_eventq(struct efx_channel *channel) 1094void falcon_init_eventq(struct efx_channel *channel)
@@ -1126,11 +1108,11 @@ void falcon_init_eventq(struct efx_channel *channel)
1126 1108
1127 /* Push event queue to card */ 1109 /* Push event queue to card */
1128 EFX_POPULATE_OWORD_3(evq_ptr, 1110 EFX_POPULATE_OWORD_3(evq_ptr,
1129 EVQ_EN, 1, 1111 FRF_AZ_EVQ_EN, 1,
1130 EVQ_SIZE, FALCON_EVQ_ORDER, 1112 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1131 EVQ_BUF_BASE_ID, channel->eventq.index); 1113 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1132 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, 1114 efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1133 channel->channel); 1115 channel->channel);
1134 1116
1135 falcon_set_int_moderation(channel); 1117 falcon_set_int_moderation(channel);
1136} 1118}
@@ -1142,8 +1124,8 @@ void falcon_fini_eventq(struct efx_channel *channel)
1142 1124
1143 /* Remove event queue from card */ 1125 /* Remove event queue from card */
1144 EFX_ZERO_OWORD(eventq_ptr); 1126 EFX_ZERO_OWORD(eventq_ptr);
1145 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base, 1127 efx_writeo_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1146 channel->channel); 1128 channel->channel);
1147 1129
1148 /* Unpin event queue */ 1130 /* Unpin event queue */
1149 falcon_fini_special_buffer(efx, &channel->eventq); 1131 falcon_fini_special_buffer(efx, &channel->eventq);
@@ -1164,9 +1146,9 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1164{ 1146{
1165 efx_qword_t test_event; 1147 efx_qword_t test_event;
1166 1148
1167 EFX_POPULATE_QWORD_2(test_event, 1149 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1168 EV_CODE, DRV_GEN_EV_DECODE, 1150 FSE_AZ_EV_CODE_DRV_GEN_EV,
1169 EVQ_MAGIC, magic); 1151 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1170 falcon_generate_event(channel, &test_event); 1152 falcon_generate_event(channel, &test_event);
1171} 1153}
1172 1154
@@ -1174,11 +1156,12 @@ void falcon_sim_phy_event(struct efx_nic *efx)
1174{ 1156{
1175 efx_qword_t phy_event; 1157 efx_qword_t phy_event;
1176 1158
1177 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE); 1159 EFX_POPULATE_QWORD_1(phy_event, FSF_AZ_EV_CODE,
1160 FSE_AZ_EV_CODE_GLOBAL_EV);
1178 if (EFX_IS10G(efx)) 1161 if (EFX_IS10G(efx))
1179 EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1); 1162 EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_XG_PHY0_INTR, 1);
1180 else 1163 else
1181 EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1); 1164 EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_G_PHY0_INTR, 1);
1182 1165
1183 falcon_generate_event(&efx->channel[0], &phy_event); 1166 falcon_generate_event(&efx->channel[0], &phy_event);
1184} 1167}
@@ -1196,7 +1179,7 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1196 struct efx_tx_queue *tx_queue; 1179 struct efx_tx_queue *tx_queue;
1197 struct efx_rx_queue *rx_queue; 1180 struct efx_rx_queue *rx_queue;
1198 unsigned int read_ptr = channel->eventq_read_ptr; 1181 unsigned int read_ptr = channel->eventq_read_ptr;
1199 unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK; 1182 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1200 1183
1201 do { 1184 do {
1202 efx_qword_t *event = falcon_event(channel, read_ptr); 1185 efx_qword_t *event = falcon_event(channel, read_ptr);
@@ -1206,22 +1189,23 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1206 if (!falcon_event_present(event)) 1189 if (!falcon_event_present(event))
1207 break; 1190 break;
1208 1191
1209 ev_code = EFX_QWORD_FIELD(*event, EV_CODE); 1192 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1210 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); 1193 ev_sub_code = EFX_QWORD_FIELD(*event,
1211 if (ev_code == DRIVER_EV_DECODE && 1194 FSF_AZ_DRIVER_EV_SUBCODE);
1212 ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) { 1195 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1196 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1213 ev_queue = EFX_QWORD_FIELD(*event, 1197 ev_queue = EFX_QWORD_FIELD(*event,
1214 DRIVER_EV_TX_DESCQ_ID); 1198 FSF_AZ_DRIVER_EV_SUBDATA);
1215 if (ev_queue < EFX_TX_QUEUE_COUNT) { 1199 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1216 tx_queue = efx->tx_queue + ev_queue; 1200 tx_queue = efx->tx_queue + ev_queue;
1217 tx_queue->flushed = true; 1201 tx_queue->flushed = true;
1218 } 1202 }
1219 } else if (ev_code == DRIVER_EV_DECODE && 1203 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1220 ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) { 1204 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1221 ev_queue = EFX_QWORD_FIELD(*event, 1205 ev_queue = EFX_QWORD_FIELD(
1222 DRIVER_EV_RX_DESCQ_ID); 1206 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1223 ev_failed = EFX_QWORD_FIELD(*event, 1207 ev_failed = EFX_QWORD_FIELD(
1224 DRIVER_EV_RX_FLUSH_FAIL); 1208 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1225 if (ev_queue < efx->n_rx_queues) { 1209 if (ev_queue < efx->n_rx_queues) {
1226 rx_queue = efx->rx_queue + ev_queue; 1210 rx_queue = efx->rx_queue + ev_queue;
1227 1211
@@ -1233,7 +1217,7 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1233 } 1217 }
1234 } 1218 }
1235 1219
1236 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1220 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1237 } while (read_ptr != end_ptr); 1221 } while (read_ptr != end_ptr);
1238} 1222}
1239 1223
@@ -1311,9 +1295,9 @@ static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1311 efx_oword_t int_en_reg_ker; 1295 efx_oword_t int_en_reg_ker;
1312 1296
1313 EFX_POPULATE_OWORD_2(int_en_reg_ker, 1297 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1314 KER_INT_KER, force, 1298 FRF_AZ_KER_INT_KER, force,
1315 DRV_INT_EN_KER, enabled); 1299 FRF_AZ_DRV_INT_EN_KER, enabled);
1316 falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER); 1300 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1317} 1301}
1318 1302
1319void falcon_enable_interrupts(struct efx_nic *efx) 1303void falcon_enable_interrupts(struct efx_nic *efx)
@@ -1326,9 +1310,10 @@ void falcon_enable_interrupts(struct efx_nic *efx)
1326 1310
1327 /* Program address */ 1311 /* Program address */
1328 EFX_POPULATE_OWORD_2(int_adr_reg_ker, 1312 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1329 NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx), 1313 FRF_AZ_NORM_INT_VEC_DIS_KER,
1330 INT_ADR_KER, efx->irq_status.dma_addr); 1314 EFX_INT_MODE_USE_MSI(efx),
1331 falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER); 1315 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1316 efx_writeo(efx, &int_adr_reg_ker, FR_AZ_INT_ADR_KER);
1332 1317
1333 /* Enable interrupts */ 1318 /* Enable interrupts */
1334 falcon_interrupts(efx, 1, 0); 1319 falcon_interrupts(efx, 1, 0);
@@ -1368,9 +1353,9 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1368{ 1353{
1369 efx_dword_t reg; 1354 efx_dword_t reg;
1370 1355
1371 EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e); 1356 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
1372 falcon_writel(efx, &reg, INT_ACK_REG_KER_A1); 1357 efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
1373 falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1); 1358 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
1374} 1359}
1375 1360
1376/* Process a fatal interrupt 1361/* Process a fatal interrupt
@@ -1383,8 +1368,8 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1383 efx_oword_t fatal_intr; 1368 efx_oword_t fatal_intr;
1384 int error, mem_perr; 1369 int error, mem_perr;
1385 1370
1386 falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER); 1371 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1387 error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR); 1372 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1388 1373
1389 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " 1374 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1390 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1375 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
@@ -1394,10 +1379,10 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1394 goto out; 1379 goto out;
1395 1380
1396 /* If this is a memory parity error dump which blocks are offending */ 1381 /* If this is a memory parity error dump which blocks are offending */
1397 mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER); 1382 mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
1398 if (mem_perr) { 1383 if (mem_perr) {
1399 efx_oword_t reg; 1384 efx_oword_t reg;
1400 falcon_read(efx, &reg, MEM_STAT_REG_KER); 1385 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1401 EFX_ERR(efx, "SYSTEM ERROR: memory parity error " 1386 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1402 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); 1387 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1403 } 1388 }
@@ -1409,13 +1394,13 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1409 falcon_disable_interrupts(efx); 1394 falcon_disable_interrupts(efx);
1410 1395
1411 /* Count errors and reset or disable the NIC accordingly */ 1396 /* Count errors and reset or disable the NIC accordingly */
1412 if (nic_data->int_error_count == 0 || 1397 if (efx->int_error_count == 0 ||
1413 time_after(jiffies, nic_data->int_error_expire)) { 1398 time_after(jiffies, efx->int_error_expire)) {
1414 nic_data->int_error_count = 0; 1399 efx->int_error_count = 0;
1415 nic_data->int_error_expire = 1400 efx->int_error_expire =
1416 jiffies + FALCON_INT_ERROR_EXPIRE * HZ; 1401 jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
1417 } 1402 }
1418 if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) { 1403 if (++efx->int_error_count < FALCON_MAX_INT_ERRORS) {
1419 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); 1404 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1420 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1405 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1421 } else { 1406 } else {
@@ -1441,11 +1426,11 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1441 int syserr; 1426 int syserr;
1442 1427
1443 /* Read the ISR which also ACKs the interrupts */ 1428 /* Read the ISR which also ACKs the interrupts */
1444 falcon_readl(efx, &reg, INT_ISR0_B0); 1429 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1445 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1430 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1446 1431
1447 /* Check to see if we have a serious error condition */ 1432 /* Check to see if we have a serious error condition */
1448 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); 1433 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1449 if (unlikely(syserr)) 1434 if (unlikely(syserr))
1450 return falcon_fatal_interrupt(efx); 1435 return falcon_fatal_interrupt(efx);
1451 1436
@@ -1491,7 +1476,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1491 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1476 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1492 1477
1493 /* Check to see if we have a serious error condition */ 1478 /* Check to see if we have a serious error condition */
1494 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); 1479 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1495 if (unlikely(syserr)) 1480 if (unlikely(syserr))
1496 return falcon_fatal_interrupt(efx); 1481 return falcon_fatal_interrupt(efx);
1497 1482
@@ -1558,12 +1543,12 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1558 if (falcon_rev(efx) < FALCON_REV_B0) 1543 if (falcon_rev(efx) < FALCON_REV_B0)
1559 return; 1544 return;
1560 1545
1561 for (offset = RX_RSS_INDIR_TBL_B0; 1546 for (offset = FR_BZ_RX_INDIRECTION_TBL;
1562 offset < RX_RSS_INDIR_TBL_B0 + 0x800; 1547 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1563 offset += 0x10) { 1548 offset += 0x10) {
1564 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0, 1549 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1565 i % efx->n_rx_queues); 1550 i % efx->n_rx_queues);
1566 falcon_writel(efx, &dword, offset); 1551 efx_writed(efx, &dword, offset);
1567 i++; 1552 i++;
1568 } 1553 }
1569} 1554}
@@ -1626,7 +1611,7 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1626 1611
1627 /* ACK legacy interrupt */ 1612 /* ACK legacy interrupt */
1628 if (falcon_rev(efx) >= FALCON_REV_B0) 1613 if (falcon_rev(efx) >= FALCON_REV_B0)
1629 falcon_read(efx, &reg, INT_ISR0_B0); 1614 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1630 else 1615 else
1631 falcon_irq_ack_a1(efx); 1616 falcon_irq_ack_a1(efx);
1632 1617
@@ -1647,8 +1632,8 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1647static int falcon_spi_poll(struct efx_nic *efx) 1632static int falcon_spi_poll(struct efx_nic *efx)
1648{ 1633{
1649 efx_oword_t reg; 1634 efx_oword_t reg;
1650 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER); 1635 efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
1651 return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; 1636 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1652} 1637}
1653 1638
1654/* Wait for SPI command completion */ 1639/* Wait for SPI command completion */
@@ -1700,27 +1685,27 @@ int falcon_spi_cmd(const struct efx_spi_device *spi,
1700 1685
1701 /* Program address register, if we have an address */ 1686 /* Program address register, if we have an address */
1702 if (addressed) { 1687 if (addressed) {
1703 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); 1688 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
1704 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER); 1689 efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
1705 } 1690 }
1706 1691
1707 /* Program data register, if we have data */ 1692 /* Program data register, if we have data */
1708 if (in != NULL) { 1693 if (in != NULL) {
1709 memcpy(&reg, in, len); 1694 memcpy(&reg, in, len);
1710 falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER); 1695 efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
1711 } 1696 }
1712 1697
1713 /* Issue read/write command */ 1698 /* Issue read/write command */
1714 EFX_POPULATE_OWORD_7(reg, 1699 EFX_POPULATE_OWORD_7(reg,
1715 EE_SPI_HCMD_CMD_EN, 1, 1700 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
1716 EE_SPI_HCMD_SF_SEL, spi->device_id, 1701 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
1717 EE_SPI_HCMD_DABCNT, len, 1702 FRF_AB_EE_SPI_HCMD_DABCNT, len,
1718 EE_SPI_HCMD_READ, reading, 1703 FRF_AB_EE_SPI_HCMD_READ, reading,
1719 EE_SPI_HCMD_DUBCNT, 0, 1704 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
1720 EE_SPI_HCMD_ADBCNT, 1705 FRF_AB_EE_SPI_HCMD_ADBCNT,
1721 (addressed ? spi->addr_len : 0), 1706 (addressed ? spi->addr_len : 0),
1722 EE_SPI_HCMD_ENC, command); 1707 FRF_AB_EE_SPI_HCMD_ENC, command);
1723 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER); 1708 efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
1724 1709
1725 /* Wait for read/write to complete */ 1710 /* Wait for read/write to complete */
1726 rc = falcon_spi_wait(efx); 1711 rc = falcon_spi_wait(efx);
@@ -1729,7 +1714,7 @@ int falcon_spi_cmd(const struct efx_spi_device *spi,
1729 1714
1730 /* Read data */ 1715 /* Read data */
1731 if (out != NULL) { 1716 if (out != NULL) {
1732 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER); 1717 efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
1733 memcpy(out, &reg, len); 1718 memcpy(out, &reg, len);
1734 } 1719 }
1735 1720
@@ -1870,21 +1855,22 @@ static int falcon_reset_macs(struct efx_nic *efx)
1870 * macs, so instead use the internal MAC resets 1855 * macs, so instead use the internal MAC resets
1871 */ 1856 */
1872 if (!EFX_IS10G(efx)) { 1857 if (!EFX_IS10G(efx)) {
1873 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1); 1858 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
1874 falcon_write(efx, &reg, GM_CFG1_REG); 1859 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
1875 udelay(1000); 1860 udelay(1000);
1876 1861
1877 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0); 1862 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
1878 falcon_write(efx, &reg, GM_CFG1_REG); 1863 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
1879 udelay(1000); 1864 udelay(1000);
1880 return 0; 1865 return 0;
1881 } else { 1866 } else {
1882 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1); 1867 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1883 falcon_write(efx, &reg, XM_GLB_CFG_REG); 1868 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1884 1869
1885 for (count = 0; count < 10000; count++) { 1870 for (count = 0; count < 10000; count++) {
1886 falcon_read(efx, &reg, XM_GLB_CFG_REG); 1871 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1887 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0) 1872 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1873 0)
1888 return 0; 1874 return 0;
1889 udelay(10); 1875 udelay(10);
1890 } 1876 }
@@ -1898,22 +1884,22 @@ static int falcon_reset_macs(struct efx_nic *efx)
1898 * the drain sequence with the statistics fetch */ 1884 * the drain sequence with the statistics fetch */
1899 efx_stats_disable(efx); 1885 efx_stats_disable(efx);
1900 1886
1901 falcon_read(efx, &reg, MAC0_CTRL_REG_KER); 1887 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1902 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); 1888 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1);
1903 falcon_write(efx, &reg, MAC0_CTRL_REG_KER); 1889 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
1904 1890
1905 falcon_read(efx, &reg, GLB_CTL_REG_KER); 1891 efx_reado(efx, &reg, FR_AB_GLB_CTL);
1906 EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1); 1892 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1907 EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1); 1893 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1908 EFX_SET_OWORD_FIELD(reg, RST_EM, 1); 1894 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1909 falcon_write(efx, &reg, GLB_CTL_REG_KER); 1895 efx_writeo(efx, &reg, FR_AB_GLB_CTL);
1910 1896
1911 count = 0; 1897 count = 0;
1912 while (1) { 1898 while (1) {
1913 falcon_read(efx, &reg, GLB_CTL_REG_KER); 1899 efx_reado(efx, &reg, FR_AB_GLB_CTL);
1914 if (!EFX_OWORD_FIELD(reg, RST_XGTX) && 1900 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1915 !EFX_OWORD_FIELD(reg, RST_XGRX) && 1901 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1916 !EFX_OWORD_FIELD(reg, RST_EM)) { 1902 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1917 EFX_LOG(efx, "Completed MAC reset after %d loops\n", 1903 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1918 count); 1904 count);
1919 break; 1905 break;
@@ -1944,9 +1930,9 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
1944 (efx->loopback_mode != LOOPBACK_NONE)) 1930 (efx->loopback_mode != LOOPBACK_NONE))
1945 return; 1931 return;
1946 1932
1947 falcon_read(efx, &reg, MAC0_CTRL_REG_KER); 1933 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1948 /* There is no point in draining more than once */ 1934 /* There is no point in draining more than once */
1949 if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0)) 1935 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1950 return; 1936 return;
1951 1937
1952 falcon_reset_macs(efx); 1938 falcon_reset_macs(efx);
@@ -1960,9 +1946,9 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1960 return; 1946 return;
1961 1947
1962 /* Isolate the MAC -> RX */ 1948 /* Isolate the MAC -> RX */
1963 falcon_read(efx, &reg, RX_CFG_REG_KER); 1949 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1964 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0); 1950 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1965 falcon_write(efx, &reg, RX_CFG_REG_KER); 1951 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1966 1952
1967 if (!efx->link_up) 1953 if (!efx->link_up)
1968 falcon_drain_tx_fifo(efx); 1954 falcon_drain_tx_fifo(efx);
@@ -1985,19 +1971,19 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1985 * indefinitely held and TX queue can be flushed at any point 1971 * indefinitely held and TX queue can be flushed at any point
1986 * while the link is down. */ 1972 * while the link is down. */
1987 EFX_POPULATE_OWORD_5(reg, 1973 EFX_POPULATE_OWORD_5(reg,
1988 MAC_XOFF_VAL, 0xffff /* max pause time */, 1974 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1989 MAC_BCAD_ACPT, 1, 1975 FRF_AB_MAC_BCAD_ACPT, 1,
1990 MAC_UC_PROM, efx->promiscuous, 1976 FRF_AB_MAC_UC_PROM, efx->promiscuous,
1991 MAC_LINK_STATUS, 1, /* always set */ 1977 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1992 MAC_SPEED, link_speed); 1978 FRF_AB_MAC_SPEED, link_speed);
1993 /* On B0, MAC backpressure can be disabled and packets get 1979 /* On B0, MAC backpressure can be disabled and packets get
1994 * discarded. */ 1980 * discarded. */
1995 if (falcon_rev(efx) >= FALCON_REV_B0) { 1981 if (falcon_rev(efx) >= FALCON_REV_B0) {
1996 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1982 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1997 !efx->link_up); 1983 !efx->link_up);
1998 } 1984 }
1999 1985
2000 falcon_write(efx, &reg, MAC0_CTRL_REG_KER); 1986 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
2001 1987
2002 /* Restore the multicast hash registers. */ 1988 /* Restore the multicast hash registers. */
2003 falcon_set_multicast_hash(efx); 1989 falcon_set_multicast_hash(efx);
@@ -2006,13 +1992,13 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
2006 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. 1992 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
2007 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ 1993 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
2008 tx_fc = !!(efx->link_fc & EFX_FC_TX); 1994 tx_fc = !!(efx->link_fc & EFX_FC_TX);
2009 falcon_read(efx, &reg, RX_CFG_REG_KER); 1995 efx_reado(efx, &reg, FR_AZ_RX_CFG);
2010 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1996 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, tx_fc);
2011 1997
2012 /* Unisolate the MAC -> RX */ 1998 /* Unisolate the MAC -> RX */
2013 if (falcon_rev(efx) >= FALCON_REV_B0) 1999 if (falcon_rev(efx) >= FALCON_REV_B0)
2014 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 2000 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2015 falcon_write(efx, &reg, RX_CFG_REG_KER); 2001 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
2016} 2002}
2017 2003
2018int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) 2004int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
@@ -2027,8 +2013,8 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
2027 /* Statistics fetch will fail if the MAC is in TX drain */ 2013 /* Statistics fetch will fail if the MAC is in TX drain */
2028 if (falcon_rev(efx) >= FALCON_REV_B0) { 2014 if (falcon_rev(efx) >= FALCON_REV_B0) {
2029 efx_oword_t temp; 2015 efx_oword_t temp;
2030 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 2016 efx_reado(efx, &temp, FR_AB_MAC_CTRL);
2031 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) 2017 if (EFX_OWORD_FIELD(temp, FRF_BB_TXFIFO_DRAIN_EN))
2032 return 0; 2018 return 0;
2033 } 2019 }
2034 2020
@@ -2038,10 +2024,10 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
2038 2024
2039 /* Initiate DMA transfer of stats */ 2025 /* Initiate DMA transfer of stats */
2040 EFX_POPULATE_OWORD_2(reg, 2026 EFX_POPULATE_OWORD_2(reg,
2041 MAC_STAT_DMA_CMD, 1, 2027 FRF_AB_MAC_STAT_DMA_CMD, 1,
2042 MAC_STAT_DMA_ADR, 2028 FRF_AB_MAC_STAT_DMA_ADR,
2043 efx->stats_buffer.dma_addr); 2029 efx->stats_buffer.dma_addr);
2044 falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER); 2030 efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
2045 2031
2046 /* Wait for transfer to complete */ 2032 /* Wait for transfer to complete */
2047 for (i = 0; i < 400; i++) { 2033 for (i = 0; i < 400; i++) {
@@ -2071,10 +2057,10 @@ static int falcon_gmii_wait(struct efx_nic *efx)
2071 2057
2072 /* wait upto 50ms - taken max from datasheet */ 2058 /* wait upto 50ms - taken max from datasheet */
2073 for (count = 0; count < 5000; count++) { 2059 for (count = 0; count < 5000; count++) {
2074 falcon_readl(efx, &md_stat, MD_STAT_REG_KER); 2060 efx_readd(efx, &md_stat, FR_AB_MD_STAT);
2075 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) { 2061 if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
2076 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 || 2062 if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
2077 EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) { 2063 EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
2078 EFX_ERR(efx, "error from GMII access " 2064 EFX_ERR(efx, "error from GMII access "
2079 EFX_DWORD_FMT"\n", 2065 EFX_DWORD_FMT"\n",
2080 EFX_DWORD_VAL(md_stat)); 2066 EFX_DWORD_VAL(md_stat));
@@ -2107,29 +2093,30 @@ static int falcon_mdio_write(struct net_device *net_dev,
2107 goto out; 2093 goto out;
2108 2094
2109 /* Write the address/ID register */ 2095 /* Write the address/ID register */
2110 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); 2096 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2111 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER); 2097 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
2112 2098
2113 EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); 2099 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2114 falcon_write(efx, &reg, MD_ID_REG_KER); 2100 FRF_AB_MD_DEV_ADR, devad);
2101 efx_writeo(efx, &reg, FR_AB_MD_ID);
2115 2102
2116 /* Write data */ 2103 /* Write data */
2117 EFX_POPULATE_OWORD_1(reg, MD_TXD, value); 2104 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
2118 falcon_write(efx, &reg, MD_TXD_REG_KER); 2105 efx_writeo(efx, &reg, FR_AB_MD_TXD);
2119 2106
2120 EFX_POPULATE_OWORD_2(reg, 2107 EFX_POPULATE_OWORD_2(reg,
2121 MD_WRC, 1, 2108 FRF_AB_MD_WRC, 1,
2122 MD_GC, 0); 2109 FRF_AB_MD_GC, 0);
2123 falcon_write(efx, &reg, MD_CS_REG_KER); 2110 efx_writeo(efx, &reg, FR_AB_MD_CS);
2124 2111
2125 /* Wait for data to be written */ 2112 /* Wait for data to be written */
2126 rc = falcon_gmii_wait(efx); 2113 rc = falcon_gmii_wait(efx);
2127 if (rc) { 2114 if (rc) {
2128 /* Abort the write operation */ 2115 /* Abort the write operation */
2129 EFX_POPULATE_OWORD_2(reg, 2116 EFX_POPULATE_OWORD_2(reg,
2130 MD_WRC, 0, 2117 FRF_AB_MD_WRC, 0,
2131 MD_GC, 1); 2118 FRF_AB_MD_GC, 1);
2132 falcon_write(efx, &reg, MD_CS_REG_KER); 2119 efx_writeo(efx, &reg, FR_AB_MD_CS);
2133 udelay(10); 2120 udelay(10);
2134 } 2121 }
2135 2122
@@ -2153,29 +2140,30 @@ static int falcon_mdio_read(struct net_device *net_dev,
2153 if (rc) 2140 if (rc)
2154 goto out; 2141 goto out;
2155 2142
2156 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); 2143 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2157 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER); 2144 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
2158 2145
2159 EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); 2146 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2160 falcon_write(efx, &reg, MD_ID_REG_KER); 2147 FRF_AB_MD_DEV_ADR, devad);
2148 efx_writeo(efx, &reg, FR_AB_MD_ID);
2161 2149
2162 /* Request data to be read */ 2150 /* Request data to be read */
2163 EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0); 2151 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
2164 falcon_write(efx, &reg, MD_CS_REG_KER); 2152 efx_writeo(efx, &reg, FR_AB_MD_CS);
2165 2153
2166 /* Wait for data to become available */ 2154 /* Wait for data to become available */
2167 rc = falcon_gmii_wait(efx); 2155 rc = falcon_gmii_wait(efx);
2168 if (rc == 0) { 2156 if (rc == 0) {
2169 falcon_read(efx, &reg, MD_RXD_REG_KER); 2157 efx_reado(efx, &reg, FR_AB_MD_RXD);
2170 rc = EFX_OWORD_FIELD(reg, MD_RXD); 2158 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
2171 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", 2159 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
2172 prtad, devad, addr, rc); 2160 prtad, devad, addr, rc);
2173 } else { 2161 } else {
2174 /* Abort the read operation */ 2162 /* Abort the read operation */
2175 EFX_POPULATE_OWORD_2(reg, 2163 EFX_POPULATE_OWORD_2(reg,
2176 MD_RIC, 0, 2164 FRF_AB_MD_RIC, 0,
2177 MD_GC, 1); 2165 FRF_AB_MD_GC, 1);
2178 falcon_write(efx, &reg, MD_CS_REG_KER); 2166 efx_writeo(efx, &reg, FR_AB_MD_CS);
2179 2167
2180 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", 2168 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
2181 prtad, devad, addr, rc); 2169 prtad, devad, addr, rc);
@@ -2186,37 +2174,6 @@ static int falcon_mdio_read(struct net_device *net_dev,
2186 return rc; 2174 return rc;
2187} 2175}
2188 2176
2189static int falcon_probe_phy(struct efx_nic *efx)
2190{
2191 switch (efx->phy_type) {
2192 case PHY_TYPE_SFX7101:
2193 efx->phy_op = &falcon_sfx7101_phy_ops;
2194 break;
2195 case PHY_TYPE_SFT9001A:
2196 case PHY_TYPE_SFT9001B:
2197 efx->phy_op = &falcon_sft9001_phy_ops;
2198 break;
2199 case PHY_TYPE_QT2022C2:
2200 case PHY_TYPE_QT2025C:
2201 efx->phy_op = &falcon_xfp_phy_ops;
2202 break;
2203 default:
2204 EFX_ERR(efx, "Unknown PHY type %d\n",
2205 efx->phy_type);
2206 return -1;
2207 }
2208
2209 if (efx->phy_op->macs & EFX_XMAC)
2210 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2211 (1 << LOOPBACK_XGXS) |
2212 (1 << LOOPBACK_XAUI));
2213 if (efx->phy_op->macs & EFX_GMAC)
2214 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2215 efx->loopback_modes |= efx->phy_op->loopbacks;
2216
2217 return 0;
2218}
2219
2220int falcon_switch_mac(struct efx_nic *efx) 2177int falcon_switch_mac(struct efx_nic *efx)
2221{ 2178{
2222 struct efx_mac_operations *old_mac_op = efx->mac_op; 2179 struct efx_mac_operations *old_mac_op = efx->mac_op;
@@ -2242,16 +2199,17 @@ int falcon_switch_mac(struct efx_nic *efx)
2242 2199
2243 /* Always push the NIC_STAT_REG setting even if the mac hasn't 2200 /* Always push the NIC_STAT_REG setting even if the mac hasn't
2244 * changed, because this function is run post online reset */ 2201 * changed, because this function is run post online reset */
2245 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2202 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2246 strap_val = EFX_IS10G(efx) ? 5 : 3; 2203 strap_val = EFX_IS10G(efx) ? 5 : 3;
2247 if (falcon_rev(efx) >= FALCON_REV_B0) { 2204 if (falcon_rev(efx) >= FALCON_REV_B0) {
2248 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1); 2205 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
2249 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val); 2206 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
2250 falcon_write(efx, &nic_stat, NIC_STAT_REG); 2207 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
2251 } else { 2208 } else {
2252 /* Falcon A1 does not support 1G/10G speed switching 2209 /* Falcon A1 does not support 1G/10G speed switching
2253 * and must not be used with a PHY that does. */ 2210 * and must not be used with a PHY that does. */
2254 BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val); 2211 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
2212 strap_val);
2255 } 2213 }
2256 2214
2257 if (old_mac_op == efx->mac_op) 2215 if (old_mac_op == efx->mac_op)
@@ -2272,10 +2230,31 @@ int falcon_probe_port(struct efx_nic *efx)
2272{ 2230{
2273 int rc; 2231 int rc;
2274 2232
2275 /* Hook in PHY operations table */ 2233 switch (efx->phy_type) {
2276 rc = falcon_probe_phy(efx); 2234 case PHY_TYPE_SFX7101:
2277 if (rc) 2235 efx->phy_op = &falcon_sfx7101_phy_ops;
2278 return rc; 2236 break;
2237 case PHY_TYPE_SFT9001A:
2238 case PHY_TYPE_SFT9001B:
2239 efx->phy_op = &falcon_sft9001_phy_ops;
2240 break;
2241 case PHY_TYPE_QT2022C2:
2242 case PHY_TYPE_QT2025C:
2243 efx->phy_op = &falcon_qt202x_phy_ops;
2244 break;
2245 default:
2246 EFX_ERR(efx, "Unknown PHY type %d\n",
2247 efx->phy_type);
2248 return -ENODEV;
2249 }
2250
2251 if (efx->phy_op->macs & EFX_XMAC)
2252 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2253 (1 << LOOPBACK_XGXS) |
2254 (1 << LOOPBACK_XAUI));
2255 if (efx->phy_op->macs & EFX_GMAC)
2256 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2257 efx->loopback_modes |= efx->phy_op->loopbacks;
2279 2258
2280 /* Set up MDIO structure for PHY */ 2259 /* Set up MDIO structure for PHY */
2281 efx->mdio.mmds = efx->phy_op->mmds; 2260 efx->mdio.mmds = efx->phy_op->mmds;
@@ -2324,8 +2303,8 @@ void falcon_set_multicast_hash(struct efx_nic *efx)
2324 */ 2303 */
2325 set_bit_le(0xff, mc_hash->byte); 2304 set_bit_le(0xff, mc_hash->byte);
2326 2305
2327 falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER); 2306 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
2328 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER); 2307 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
2329} 2308}
2330 2309
2331 2310
@@ -2351,7 +2330,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2351 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); 2330 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2352 if (!region) 2331 if (!region)
2353 return -ENOMEM; 2332 return -ENOMEM;
2354 nvconfig = region + NVCONFIG_OFFSET; 2333 nvconfig = region + FALCON_NVCONFIG_OFFSET;
2355 2334
2356 mutex_lock(&efx->spi_lock); 2335 mutex_lock(&efx->spi_lock);
2357 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region); 2336 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
@@ -2367,7 +2346,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2367 struct_ver = le16_to_cpu(nvconfig->board_struct_ver); 2346 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2368 2347
2369 rc = -EINVAL; 2348 rc = -EINVAL;
2370 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) { 2349 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
2371 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); 2350 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2372 goto out; 2351 goto out;
2373 } 2352 }
@@ -2403,41 +2382,41 @@ static struct {
2403 unsigned address; 2382 unsigned address;
2404 efx_oword_t mask; 2383 efx_oword_t mask;
2405} efx_test_registers[] = { 2384} efx_test_registers[] = {
2406 { ADR_REGION_REG_KER, 2385 { FR_AZ_ADR_REGION,
2407 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, 2386 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2408 { RX_CFG_REG_KER, 2387 { FR_AZ_RX_CFG,
2409 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, 2388 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2410 { TX_CFG_REG_KER, 2389 { FR_AZ_TX_CFG,
2411 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, 2390 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2412 { TX_CFG2_REG_KER, 2391 { FR_AZ_TX_RESERVED,
2413 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, 2392 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2414 { MAC0_CTRL_REG_KER, 2393 { FR_AB_MAC_CTRL,
2415 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, 2394 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2416 { SRM_TX_DC_CFG_REG_KER, 2395 { FR_AZ_SRM_TX_DC_CFG,
2417 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, 2396 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2418 { RX_DC_CFG_REG_KER, 2397 { FR_AZ_RX_DC_CFG,
2419 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, 2398 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2420 { RX_DC_PF_WM_REG_KER, 2399 { FR_AZ_RX_DC_PF_WM,
2421 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, 2400 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2422 { DP_CTRL_REG, 2401 { FR_BZ_DP_CTRL,
2423 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, 2402 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2424 { GM_CFG2_REG, 2403 { FR_AB_GM_CFG2,
2425 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, 2404 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2426 { GMF_CFG0_REG, 2405 { FR_AB_GMF_CFG0,
2427 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, 2406 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2428 { XM_GLB_CFG_REG, 2407 { FR_AB_XM_GLB_CFG,
2429 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, 2408 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2430 { XM_TX_CFG_REG, 2409 { FR_AB_XM_TX_CFG,
2431 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, 2410 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2432 { XM_RX_CFG_REG, 2411 { FR_AB_XM_RX_CFG,
2433 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, 2412 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2434 { XM_RX_PARAM_REG, 2413 { FR_AB_XM_RX_PARAM,
2435 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, 2414 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2436 { XM_FC_REG, 2415 { FR_AB_XM_FC,
2437 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, 2416 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2438 { XM_ADR_LO_REG, 2417 { FR_AB_XM_ADR_LO,
2439 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, 2418 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2440 { XX_SD_CTL_REG, 2419 { FR_AB_XX_SD_CTL,
2441 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, 2420 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2442}; 2421};
2443 2422
@@ -2461,7 +2440,7 @@ int falcon_test_registers(struct efx_nic *efx)
2461 mask = imask = efx_test_registers[i].mask; 2440 mask = imask = efx_test_registers[i].mask;
2462 EFX_INVERT_OWORD(imask); 2441 EFX_INVERT_OWORD(imask);
2463 2442
2464 falcon_read(efx, &original, address); 2443 efx_reado(efx, &original, address);
2465 2444
2466 /* bit sweep on and off */ 2445 /* bit sweep on and off */
2467 for (j = 0; j < 128; j++) { 2446 for (j = 0; j < 128; j++) {
@@ -2472,8 +2451,8 @@ int falcon_test_registers(struct efx_nic *efx)
2472 EFX_AND_OWORD(reg, original, mask); 2451 EFX_AND_OWORD(reg, original, mask);
2473 EFX_SET_OWORD32(reg, j, j, 1); 2452 EFX_SET_OWORD32(reg, j, j, 1);
2474 2453
2475 falcon_write(efx, &reg, address); 2454 efx_writeo(efx, &reg, address);
2476 falcon_read(efx, &buf, address); 2455 efx_reado(efx, &buf, address);
2477 2456
2478 if (efx_masked_compare_oword(&reg, &buf, &mask)) 2457 if (efx_masked_compare_oword(&reg, &buf, &mask))
2479 goto fail; 2458 goto fail;
@@ -2482,14 +2461,14 @@ int falcon_test_registers(struct efx_nic *efx)
2482 EFX_OR_OWORD(reg, original, mask); 2461 EFX_OR_OWORD(reg, original, mask);
2483 EFX_SET_OWORD32(reg, j, j, 0); 2462 EFX_SET_OWORD32(reg, j, j, 0);
2484 2463
2485 falcon_write(efx, &reg, address); 2464 efx_writeo(efx, &reg, address);
2486 falcon_read(efx, &buf, address); 2465 efx_reado(efx, &buf, address);
2487 2466
2488 if (efx_masked_compare_oword(&reg, &buf, &mask)) 2467 if (efx_masked_compare_oword(&reg, &buf, &mask))
2489 goto fail; 2468 goto fail;
2490 } 2469 }
2491 2470
2492 falcon_write(efx, &original, address); 2471 efx_writeo(efx, &original, address);
2493 } 2472 }
2494 2473
2495 return 0; 2474 return 0;
@@ -2537,22 +2516,24 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2537 } 2516 }
2538 2517
2539 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, 2518 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2540 EXT_PHY_RST_DUR, 0x7, 2519 FRF_AB_EXT_PHY_RST_DUR,
2541 SWRST, 1); 2520 FFE_AB_EXT_PHY_RST_DUR_10240US,
2521 FRF_AB_SWRST, 1);
2542 } else { 2522 } else {
2543 int reset_phy = (method == RESET_TYPE_INVISIBLE ?
2544 EXCLUDE_FROM_RESET : 0);
2545
2546 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, 2523 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2547 EXT_PHY_RST_CTL, reset_phy, 2524 /* exclude PHY from "invisible" reset */
2548 PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET, 2525 FRF_AB_EXT_PHY_RST_CTL,
2549 PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET, 2526 method == RESET_TYPE_INVISIBLE,
2550 PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET, 2527 /* exclude EEPROM/flash and PCIe */
2551 EE_RST_CTL, EXCLUDE_FROM_RESET, 2528 FRF_AB_PCIE_CORE_RST_CTL, 1,
2552 EXT_PHY_RST_DUR, 0x7 /* 10ms */, 2529 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2553 SWRST, 1); 2530 FRF_AB_PCIE_SD_RST_CTL, 1,
2554 } 2531 FRF_AB_EE_RST_CTL, 1,
2555 falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); 2532 FRF_AB_EXT_PHY_RST_DUR,
2533 FFE_AB_EXT_PHY_RST_DUR_10240US,
2534 FRF_AB_SWRST, 1);
2535 }
2536 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2556 2537
2557 EFX_LOG(efx, "waiting for hardware reset\n"); 2538 EFX_LOG(efx, "waiting for hardware reset\n");
2558 schedule_timeout_uninterruptible(HZ / 20); 2539 schedule_timeout_uninterruptible(HZ / 20);
@@ -2577,8 +2558,8 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2577 } 2558 }
2578 2559
2579 /* Assert that reset complete */ 2560 /* Assert that reset complete */
2580 falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); 2561 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2581 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) { 2562 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2582 rc = -ETIMEDOUT; 2563 rc = -ETIMEDOUT;
2583 EFX_ERR(efx, "timed out waiting for hardware reset\n"); 2564 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2584 goto fail5; 2565 goto fail5;
@@ -2606,16 +2587,16 @@ static int falcon_reset_sram(struct efx_nic *efx)
2606 int count; 2587 int count;
2607 2588
2608 /* Set the SRAM wake/sleep GPIO appropriately. */ 2589 /* Set the SRAM wake/sleep GPIO appropriately. */
2609 falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); 2590 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2610 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1); 2591 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2611 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1); 2592 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2612 falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); 2593 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2613 2594
2614 /* Initiate SRAM reset */ 2595 /* Initiate SRAM reset */
2615 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, 2596 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2616 SRAM_OOB_BT_INIT_EN, 1, 2597 FRF_AZ_SRM_INIT_EN, 1,
2617 SRM_NUM_BANKS_AND_BANK_SIZE, 0); 2598 FRF_AZ_SRM_NB_SZ, 0);
2618 falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); 2599 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2619 2600
2620 /* Wait for SRAM reset to complete */ 2601 /* Wait for SRAM reset to complete */
2621 count = 0; 2602 count = 0;
@@ -2626,8 +2607,8 @@ static int falcon_reset_sram(struct efx_nic *efx)
2626 schedule_timeout_uninterruptible(HZ / 50); 2607 schedule_timeout_uninterruptible(HZ / 50);
2627 2608
2628 /* Check for reset complete */ 2609 /* Check for reset complete */
2629 falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); 2610 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2630 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) { 2611 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2631 EFX_LOG(efx, "SRAM reset complete\n"); 2612 EFX_LOG(efx, "SRAM reset complete\n");
2632 2613
2633 return 0; 2614 return 0;
@@ -2712,16 +2693,16 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2712 board_rev = le16_to_cpu(v2->board_revision); 2693 board_rev = le16_to_cpu(v2->board_revision);
2713 2694
2714 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { 2695 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2715 __le32 fl = v3->spi_device_type[EE_SPI_FLASH]; 2696 rc = falcon_spi_device_init(
2716 __le32 ee = v3->spi_device_type[EE_SPI_EEPROM]; 2697 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2717 rc = falcon_spi_device_init(efx, &efx->spi_flash, 2698 le32_to_cpu(v3->spi_device_type
2718 EE_SPI_FLASH, 2699 [FFE_AB_SPI_DEVICE_FLASH]));
2719 le32_to_cpu(fl));
2720 if (rc) 2700 if (rc)
2721 goto fail2; 2701 goto fail2;
2722 rc = falcon_spi_device_init(efx, &efx->spi_eeprom, 2702 rc = falcon_spi_device_init(
2723 EE_SPI_EEPROM, 2703 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2724 le32_to_cpu(ee)); 2704 le32_to_cpu(v3->spi_device_type
2705 [FFE_AB_SPI_DEVICE_EEPROM]));
2725 if (rc) 2706 if (rc)
2726 goto fail2; 2707 goto fail2;
2727 } 2708 }
@@ -2732,7 +2713,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2732 2713
2733 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 2714 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
2734 2715
2735 efx_set_board_info(efx, board_rev); 2716 falcon_probe_board(efx, board_rev);
2736 2717
2737 kfree(nvconfig); 2718 kfree(nvconfig);
2738 return 0; 2719 return 0;
@@ -2752,13 +2733,13 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2752 efx_oword_t altera_build; 2733 efx_oword_t altera_build;
2753 efx_oword_t nic_stat; 2734 efx_oword_t nic_stat;
2754 2735
2755 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER); 2736 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
2756 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) { 2737 if (EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER)) {
2757 EFX_ERR(efx, "Falcon FPGA not supported\n"); 2738 EFX_ERR(efx, "Falcon FPGA not supported\n");
2758 return -ENODEV; 2739 return -ENODEV;
2759 } 2740 }
2760 2741
2761 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2742 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2762 2743
2763 switch (falcon_rev(efx)) { 2744 switch (falcon_rev(efx)) {
2764 case FALCON_REV_A0: 2745 case FALCON_REV_A0:
@@ -2767,7 +2748,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2767 return -ENODEV; 2748 return -ENODEV;
2768 2749
2769 case FALCON_REV_A1: 2750 case FALCON_REV_A1:
2770 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) { 2751 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2771 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); 2752 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2772 return -ENODEV; 2753 return -ENODEV;
2773 } 2754 }
@@ -2782,7 +2763,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2782 } 2763 }
2783 2764
2784 /* Initial assumed speed */ 2765 /* Initial assumed speed */
2785 efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000; 2766 efx->link_speed = EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) ? 10000 : 1000;
2786 2767
2787 return 0; 2768 return 0;
2788} 2769}
@@ -2793,34 +2774,36 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
2793 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 2774 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2794 int boot_dev; 2775 int boot_dev;
2795 2776
2796 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER); 2777 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2797 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2778 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2798 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); 2779 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2799 2780
2800 if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) { 2781 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2801 boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ? 2782 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2802 EE_SPI_FLASH : EE_SPI_EEPROM); 2783 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2803 EFX_LOG(efx, "Booted from %s\n", 2784 EFX_LOG(efx, "Booted from %s\n",
2804 boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM"); 2785 boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
2805 } else { 2786 } else {
2806 /* Disable VPD and set clock dividers to safe 2787 /* Disable VPD and set clock dividers to safe
2807 * values for initial programming. */ 2788 * values for initial programming. */
2808 boot_dev = -1; 2789 boot_dev = -1;
2809 EFX_LOG(efx, "Booted from internal ASIC settings;" 2790 EFX_LOG(efx, "Booted from internal ASIC settings;"
2810 " setting SPI config\n"); 2791 " setting SPI config\n");
2811 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0, 2792 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2812 /* 125 MHz / 7 ~= 20 MHz */ 2793 /* 125 MHz / 7 ~= 20 MHz */
2813 EE_SF_CLOCK_DIV, 7, 2794 FRF_AB_EE_SF_CLOCK_DIV, 7,
2814 /* 125 MHz / 63 ~= 2 MHz */ 2795 /* 125 MHz / 63 ~= 2 MHz */
2815 EE_EE_CLOCK_DIV, 63); 2796 FRF_AB_EE_EE_CLOCK_DIV, 63);
2816 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); 2797 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2817 } 2798 }
2818 2799
2819 if (boot_dev == EE_SPI_FLASH) 2800 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2820 falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH, 2801 falcon_spi_device_init(efx, &efx->spi_flash,
2802 FFE_AB_SPI_DEVICE_FLASH,
2821 default_flash_type); 2803 default_flash_type);
2822 if (boot_dev == EE_SPI_EEPROM) 2804 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2823 falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM, 2805 falcon_spi_device_init(efx, &efx->spi_eeprom,
2806 FFE_AB_SPI_DEVICE_EEPROM,
2824 large_eeprom_type); 2807 large_eeprom_type);
2825} 2808}
2826 2809
@@ -2911,6 +2894,52 @@ int falcon_probe_nic(struct efx_nic *efx)
2911 return rc; 2894 return rc;
2912} 2895}
2913 2896
2897static void falcon_init_rx_cfg(struct efx_nic *efx)
2898{
2899 /* Prior to Siena the RX DMA engine will split each frame at
2900 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
2901 * be so large that that never happens. */
2902 const unsigned huge_buf_size = (3 * 4096) >> 5;
2903 /* RX control FIFO thresholds (32 entries) */
2904 const unsigned ctrl_xon_thr = 20;
2905 const unsigned ctrl_xoff_thr = 25;
2906 /* RX data FIFO thresholds (256-byte units; size varies) */
2907 int data_xon_thr = rx_xon_thresh_bytes >> 8;
2908 int data_xoff_thr = rx_xoff_thresh_bytes >> 8;
2909 efx_oword_t reg;
2910
2911 efx_reado(efx, &reg, FR_AZ_RX_CFG);
2912 if (falcon_rev(efx) <= FALCON_REV_A1) {
2913 /* Data FIFO size is 5.5K */
2914 if (data_xon_thr < 0)
2915 data_xon_thr = 512 >> 8;
2916 if (data_xoff_thr < 0)
2917 data_xoff_thr = 2048 >> 8;
2918 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2919 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2920 huge_buf_size);
2921 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
2922 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
2923 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2924 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2925 } else {
2926 /* Data FIFO size is 80K; register fields moved */
2927 if (data_xon_thr < 0)
2928 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
2929 if (data_xoff_thr < 0)
2930 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
2931 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2932 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2933 huge_buf_size);
2934 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
2935 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
2936 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2937 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2938 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2939 }
2940 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
2941}
2942
2914/* This call performs hardware-specific global initialisation, such as 2943/* This call performs hardware-specific global initialisation, such as
2915 * defining the descriptor cache sizes and number of RSS channels. 2944 * defining the descriptor cache sizes and number of RSS channels.
2916 * It does not set up any buffers, descriptor rings or event queues. 2945 * It does not set up any buffers, descriptor rings or event queues.
@@ -2918,56 +2947,51 @@ int falcon_probe_nic(struct efx_nic *efx)
2918int falcon_init_nic(struct efx_nic *efx) 2947int falcon_init_nic(struct efx_nic *efx)
2919{ 2948{
2920 efx_oword_t temp; 2949 efx_oword_t temp;
2921 unsigned thresh;
2922 int rc; 2950 int rc;
2923 2951
2924 /* Use on-chip SRAM */ 2952 /* Use on-chip SRAM */
2925 falcon_read(efx, &temp, NIC_STAT_REG); 2953 efx_reado(efx, &temp, FR_AB_NIC_STAT);
2926 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1); 2954 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2927 falcon_write(efx, &temp, NIC_STAT_REG); 2955 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2928 2956
2929 /* Set the source of the GMAC clock */ 2957 /* Set the source of the GMAC clock */
2930 if (falcon_rev(efx) == FALCON_REV_B0) { 2958 if (falcon_rev(efx) == FALCON_REV_B0) {
2931 falcon_read(efx, &temp, GPIO_CTL_REG_KER); 2959 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
2932 EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true); 2960 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
2933 falcon_write(efx, &temp, GPIO_CTL_REG_KER); 2961 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
2934 } 2962 }
2935 2963
2936 /* Set buffer table mode */
2937 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
2938 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
2939
2940 rc = falcon_reset_sram(efx); 2964 rc = falcon_reset_sram(efx);
2941 if (rc) 2965 if (rc)
2942 return rc; 2966 return rc;
2943 2967
2944 /* Set positions of descriptor caches in SRAM. */ 2968 /* Set positions of descriptor caches in SRAM. */
2945 EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8); 2969 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
2946 falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER); 2970 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
2947 EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8); 2971 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
2948 falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER); 2972 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
2949 2973
2950 /* Set TX descriptor cache size. */ 2974 /* Set TX descriptor cache size. */
2951 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER)); 2975 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
2952 EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 2976 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
2953 falcon_write(efx, &temp, TX_DC_CFG_REG_KER); 2977 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
2954 2978
2955 /* Set RX descriptor cache size. Set low watermark to size-8, as 2979 /* Set RX descriptor cache size. Set low watermark to size-8, as
2956 * this allows most efficient prefetching. 2980 * this allows most efficient prefetching.
2957 */ 2981 */
2958 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER)); 2982 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
2959 EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 2983 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
2960 falcon_write(efx, &temp, RX_DC_CFG_REG_KER); 2984 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
2961 EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 2985 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
2962 falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER); 2986 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
2963 2987
2964 /* Clear the parity enables on the TX data fifos as 2988 /* Clear the parity enables on the TX data fifos as
2965 * they produce false parity errors because of timing issues 2989 * they produce false parity errors because of timing issues
2966 */ 2990 */
2967 if (EFX_WORKAROUND_5129(efx)) { 2991 if (EFX_WORKAROUND_5129(efx)) {
2968 falcon_read(efx, &temp, SPARE_REG_KER); 2992 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
2969 EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0); 2993 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2970 falcon_write(efx, &temp, SPARE_REG_KER); 2994 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2971 } 2995 }
2972 2996
2973 /* Enable all the genuinely fatal interrupts. (They are still 2997 /* Enable all the genuinely fatal interrupts. (They are still
@@ -2977,83 +3001,65 @@ int falcon_init_nic(struct efx_nic *efx)
2977 * Note: All other fatal interrupts are enabled 3001 * Note: All other fatal interrupts are enabled
2978 */ 3002 */
2979 EFX_POPULATE_OWORD_3(temp, 3003 EFX_POPULATE_OWORD_3(temp,
2980 ILL_ADR_INT_KER_EN, 1, 3004 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
2981 RBUF_OWN_INT_KER_EN, 1, 3005 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
2982 TBUF_OWN_INT_KER_EN, 1); 3006 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
2983 EFX_INVERT_OWORD(temp); 3007 EFX_INVERT_OWORD(temp);
2984 falcon_write(efx, &temp, FATAL_INTR_REG_KER); 3008 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
2985 3009
2986 if (EFX_WORKAROUND_7244(efx)) { 3010 if (EFX_WORKAROUND_7244(efx)) {
2987 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 3011 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2988 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); 3012 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2989 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); 3013 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2990 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); 3014 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2991 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); 3015 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2992 falcon_write(efx, &temp, RX_FILTER_CTL_REG); 3016 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2993 } 3017 }
2994 3018
2995 falcon_setup_rss_indir_table(efx); 3019 falcon_setup_rss_indir_table(efx);
2996 3020
3021 /* XXX This is documented only for Falcon A0/A1 */
2997 /* Setup RX. Wait for descriptor is broken and must 3022 /* Setup RX. Wait for descriptor is broken and must
2998 * be disabled. RXDP recovery shouldn't be needed, but is. 3023 * be disabled. RXDP recovery shouldn't be needed, but is.
2999 */ 3024 */
3000 falcon_read(efx, &temp, RX_SELF_RST_REG_KER); 3025 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
3001 EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1); 3026 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
3002 EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1); 3027 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
3003 if (EFX_WORKAROUND_5583(efx)) 3028 if (EFX_WORKAROUND_5583(efx))
3004 EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1); 3029 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
3005 falcon_write(efx, &temp, RX_SELF_RST_REG_KER); 3030 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
3006 3031
3007 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 3032 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3008 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 3033 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3009 */ 3034 */
3010 falcon_read(efx, &temp, TX_CFG2_REG_KER); 3035 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
3011 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe); 3036 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
3012 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1); 3037 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
3013 EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1); 3038 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
3014 EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0); 3039 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
3015 EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1); 3040 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
3016 /* Enable SW_EV to inherit in char driver - assume harmless here */ 3041 /* Enable SW_EV to inherit in char driver - assume harmless here */
3017 EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1); 3042 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
3018 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 3043 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3019 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); 3044 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
3020 /* Squash TX of packets of 16 bytes or less */ 3045 /* Squash TX of packets of 16 bytes or less */
3021 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 3046 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
3022 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); 3047 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
3023 falcon_write(efx, &temp, TX_CFG2_REG_KER); 3048 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
3024 3049
3025 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 3050 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3026 * descriptors (which is bad). 3051 * descriptors (which is bad).
3027 */ 3052 */
3028 falcon_read(efx, &temp, TX_CFG_REG_KER); 3053 efx_reado(efx, &temp, FR_AZ_TX_CFG);
3029 EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0); 3054 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
3030 falcon_write(efx, &temp, TX_CFG_REG_KER); 3055 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
3031 3056
3032 /* RX config */ 3057 falcon_init_rx_cfg(efx);
3033 falcon_read(efx, &temp, RX_CFG_REG_KER);
3034 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
3035 if (EFX_WORKAROUND_7575(efx))
3036 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
3037 (3 * 4096) / 32);
3038 if (falcon_rev(efx) >= FALCON_REV_B0)
3039 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
3040
3041 /* RX FIFO flow control thresholds */
3042 thresh = ((rx_xon_thresh_bytes >= 0) ?
3043 rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
3044 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
3045 thresh = ((rx_xoff_thresh_bytes >= 0) ?
3046 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
3047 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
3048 /* RX control FIFO thresholds [32 entries] */
3049 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
3050 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
3051 falcon_write(efx, &temp, RX_CFG_REG_KER);
3052 3058
3053 /* Set destination of both TX and RX Flush events */ 3059 /* Set destination of both TX and RX Flush events */
3054 if (falcon_rev(efx) >= FALCON_REV_B0) { 3060 if (falcon_rev(efx) >= FALCON_REV_B0) {
3055 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 3061 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
3056 falcon_write(efx, &temp, DP_CTRL_REG); 3062 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
3057 } 3063 }
3058 3064
3059 return 0; 3065 return 0;
@@ -3089,8 +3095,9 @@ void falcon_update_nic_stats(struct efx_nic *efx)
3089{ 3095{
3090 efx_oword_t cnt; 3096 efx_oword_t cnt;
3091 3097
3092 falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER); 3098 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
3093 efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT); 3099 efx->n_rx_nodesc_drop_cnt +=
3100 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
3094} 3101}
3095 3102
3096/************************************************************************** 3103/**************************************************************************
@@ -3101,45 +3108,31 @@ void falcon_update_nic_stats(struct efx_nic *efx)
3101 */ 3108 */
3102 3109
3103struct efx_nic_type falcon_a_nic_type = { 3110struct efx_nic_type falcon_a_nic_type = {
3104 .mem_bar = 2,
3105 .mem_map_size = 0x20000, 3111 .mem_map_size = 0x20000,
3106 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1, 3112 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
3107 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1, 3113 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
3108 .buf_tbl_base = BUF_TBL_KER_A1, 3114 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
3109 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1, 3115 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
3110 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1, 3116 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
3111 .txd_ring_mask = FALCON_TXD_RING_MASK, 3117 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3112 .rxd_ring_mask = FALCON_RXD_RING_MASK,
3113 .evq_size = FALCON_EVQ_SIZE,
3114 .max_dma_mask = FALCON_DMA_MASK,
3115 .tx_dma_mask = FALCON_TX_DMA_MASK,
3116 .bug5391_mask = 0xf,
3117 .rx_xoff_thresh = 2048,
3118 .rx_xon_thresh = 512,
3119 .rx_buffer_padding = 0x24, 3118 .rx_buffer_padding = 0x24,
3120 .max_interrupt_mode = EFX_INT_MODE_MSI, 3119 .max_interrupt_mode = EFX_INT_MODE_MSI,
3121 .phys_addr_channels = 4, 3120 .phys_addr_channels = 4,
3122}; 3121};
3123 3122
3124struct efx_nic_type falcon_b_nic_type = { 3123struct efx_nic_type falcon_b_nic_type = {
3125 .mem_bar = 2,
3126 /* Map everything up to and including the RSS indirection 3124 /* Map everything up to and including the RSS indirection
3127 * table. Don't map MSI-X table, MSI-X PBA since Linux 3125 * table. Don't map MSI-X table, MSI-X PBA since Linux
3128 * requires that they not be mapped. */ 3126 * requires that they not be mapped. */
3129 .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800, 3127 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
3130 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0, 3128 FR_BZ_RX_INDIRECTION_TBL_STEP *
3131 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0, 3129 FR_BZ_RX_INDIRECTION_TBL_ROWS),
3132 .buf_tbl_base = BUF_TBL_KER_B0, 3130 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
3133 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0, 3131 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
3134 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0, 3132 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
3135 .txd_ring_mask = FALCON_TXD_RING_MASK, 3133 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
3136 .rxd_ring_mask = FALCON_RXD_RING_MASK, 3134 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
3137 .evq_size = FALCON_EVQ_SIZE, 3135 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3138 .max_dma_mask = FALCON_DMA_MASK,
3139 .tx_dma_mask = FALCON_TX_DMA_MASK,
3140 .bug5391_mask = 0,
3141 .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
3142 .rx_xon_thresh = 27648, /* ~3*max MTU */
3143 .rx_buffer_padding = 0, 3136 .rx_buffer_padding = 0,
3144 .max_interrupt_mode = EFX_INT_MODE_MSIX, 3137 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3145 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 3138 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 77f2e0db7ca1..4dd965774a90 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -39,6 +39,8 @@ extern struct efx_nic_type falcon_b_nic_type;
39 ************************************************************************** 39 **************************************************************************
40 */ 40 */
41 41
42extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
43
42/* TX data path */ 44/* TX data path */
43extern int falcon_probe_tx(struct efx_tx_queue *tx_queue); 45extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
44extern void falcon_init_tx(struct efx_tx_queue *tx_queue); 46extern void falcon_init_tx(struct efx_tx_queue *tx_queue);
@@ -89,11 +91,9 @@ extern void falcon_fini_interrupt(struct efx_nic *efx);
89 91
90/* Global Resources */ 92/* Global Resources */
91extern int falcon_probe_nic(struct efx_nic *efx); 93extern int falcon_probe_nic(struct efx_nic *efx);
92extern int falcon_probe_resources(struct efx_nic *efx);
93extern int falcon_init_nic(struct efx_nic *efx); 94extern int falcon_init_nic(struct efx_nic *efx);
94extern int falcon_flush_queues(struct efx_nic *efx); 95extern int falcon_flush_queues(struct efx_nic *efx);
95extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); 96extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
96extern void falcon_remove_resources(struct efx_nic *efx);
97extern void falcon_remove_nic(struct efx_nic *efx); 97extern void falcon_remove_nic(struct efx_nic *efx);
98extern void falcon_update_nic_stats(struct efx_nic *efx); 98extern void falcon_update_nic_stats(struct efx_nic *efx);
99extern void falcon_set_multicast_hash(struct efx_nic *efx); 99extern void falcon_set_multicast_hash(struct efx_nic *efx);
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/falcon_boards.c
index cee00ad49b57..99f737223b10 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -7,6 +7,159 @@
7 * by the Free Software Foundation, incorporated herein by reference. 7 * by the Free Software Foundation, incorporated herein by reference.
8 */ 8 */
9 9
10#include <linux/rtnetlink.h>
11
12#include "net_driver.h"
13#include "phy.h"
14#include "efx.h"
15#include "falcon.h"
16#include "regs.h"
17#include "io.h"
18#include "workarounds.h"
19
20/* Macros for unpacking the board revision */
21/* The revision info is in host byte order. */
22#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
23#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
24#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
25
26/* Board types */
27#define FALCON_BOARD_SFE4001 0x01
28#define FALCON_BOARD_SFE4002 0x02
29#define FALCON_BOARD_SFN4111T 0x51
30#define FALCON_BOARD_SFN4112F 0x52
31
32/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
33#define BLINK_INTERVAL (HZ/2)
34
35static void blink_led_timer(unsigned long context)
36{
37 struct efx_nic *efx = (struct efx_nic *)context;
38 struct efx_board *board = &efx->board_info;
39
40 board->set_id_led(efx, board->blink_state);
41 board->blink_state = !board->blink_state;
42 if (board->blink_resubmit)
43 mod_timer(&board->blink_timer, jiffies + BLINK_INTERVAL);
44}
45
46static void board_blink(struct efx_nic *efx, bool blink)
47{
48 struct efx_board *board = &efx->board_info;
49
50 /* The rtnl mutex serialises all ethtool ioctls, so
51 * nothing special needs doing here. */
52 if (blink) {
53 board->blink_resubmit = true;
54 board->blink_state = false;
55 setup_timer(&board->blink_timer, blink_led_timer,
56 (unsigned long)efx);
57 mod_timer(&board->blink_timer, jiffies + BLINK_INTERVAL);
58 } else {
59 board->blink_resubmit = false;
60 if (board->blink_timer.function)
61 del_timer_sync(&board->blink_timer);
62 board->init_leds(efx);
63 }
64}
65
66/*****************************************************************************
67 * Support for LM87 sensor chip used on several boards
68 */
69#define LM87_REG_ALARMS1 0x41
70#define LM87_REG_ALARMS2 0x42
71#define LM87_IN_LIMITS(nr, _min, _max) \
72 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
73#define LM87_AIN_LIMITS(nr, _min, _max) \
74 0x3B + (nr), _max, 0x1A + (nr), _min
75#define LM87_TEMP_INT_LIMITS(_min, _max) \
76 0x39, _max, 0x3A, _min
77#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
78 0x37, _max, 0x38, _min
79
80#define LM87_ALARM_TEMP_INT 0x10
81#define LM87_ALARM_TEMP_EXT1 0x20
82
83#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
84
85static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
86 const u8 *reg_values)
87{
88 struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info);
89 int rc;
90
91 if (!client)
92 return -EIO;
93
94 while (*reg_values) {
95 u8 reg = *reg_values++;
96 u8 value = *reg_values++;
97 rc = i2c_smbus_write_byte_data(client, reg, value);
98 if (rc)
99 goto err;
100 }
101
102 efx->board_info.hwmon_client = client;
103 return 0;
104
105err:
106 i2c_unregister_device(client);
107 return rc;
108}
109
110static void efx_fini_lm87(struct efx_nic *efx)
111{
112 i2c_unregister_device(efx->board_info.hwmon_client);
113}
114
115static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
116{
117 struct i2c_client *client = efx->board_info.hwmon_client;
118 s32 alarms1, alarms2;
119
120 /* If link is up then do not monitor temperature */
121 if (EFX_WORKAROUND_7884(efx) && efx->link_up)
122 return 0;
123
124 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
125 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
126 if (alarms1 < 0)
127 return alarms1;
128 if (alarms2 < 0)
129 return alarms2;
130 alarms1 &= mask;
131 alarms2 &= mask >> 8;
132 if (alarms1 || alarms2) {
133 EFX_ERR(efx,
134 "LM87 detected a hardware failure (status %02x:%02x)"
135 "%s%s\n",
136 alarms1, alarms2,
137 (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
138 (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
139 return -ERANGE;
140 }
141
142 return 0;
143}
144
145#else /* !CONFIG_SENSORS_LM87 */
146
147static inline int
148efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
149 const u8 *reg_values)
150{
151 return 0;
152}
153static inline void efx_fini_lm87(struct efx_nic *efx)
154{
155}
156static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
157{
158 return 0;
159}
160
161#endif /* CONFIG_SENSORS_LM87 */
162
10/***************************************************************************** 163/*****************************************************************************
11 * Support for the SFE4001 and SFN4111T NICs. 164 * Support for the SFE4001 and SFN4111T NICs.
12 * 165 *
@@ -23,23 +176,9 @@
23 * exclusive with the network device being open. 176 * exclusive with the network device being open.
24 */ 177 */
25 178
26#include <linux/delay.h>
27#include <linux/rtnetlink.h>
28#include "net_driver.h"
29#include "efx.h"
30#include "phy.h"
31#include "boards.h"
32#include "falcon.h"
33#include "falcon_hwdefs.h"
34#include "falcon_io.h"
35#include "mac.h"
36#include "workarounds.h"
37
38/************************************************************************** 179/**************************************************************************
39 * 180 * Support for I2C IO Expander device on SFE40001
40 * I2C IO Expander device 181 */
41 *
42 **************************************************************************/
43#define PCA9539 0x74 182#define PCA9539 0x74
44 183
45#define P0_IN 0x00 184#define P0_IN 0x00
@@ -194,14 +333,14 @@ static int sfn4111t_reset(struct efx_nic *efx)
194 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the 333 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
195 * output enables; the output levels should always be 0 (low) 334 * output enables; the output levels should always be 0 (low)
196 * and we rely on external pull-ups. */ 335 * and we rely on external pull-ups. */
197 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 336 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
198 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true); 337 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true);
199 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 338 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
200 msleep(1000); 339 msleep(1000);
201 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false); 340 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false);
202 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, 341 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN,
203 !!(efx->phy_mode & PHY_MODE_SPECIAL)); 342 !!(efx->phy_mode & PHY_MODE_SPECIAL));
204 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 343 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
205 msleep(1); 344 msleep(1);
206 345
207 mutex_unlock(&efx->i2c_adap.bus_lock); 346 mutex_unlock(&efx->i2c_adap.bus_lock);
@@ -241,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
241 efx->phy_mode = new_mode; 380 efx->phy_mode = new_mode;
242 if (new_mode & PHY_MODE_SPECIAL) 381 if (new_mode & PHY_MODE_SPECIAL)
243 efx_stats_disable(efx); 382 efx_stats_disable(efx);
244 if (efx->board_info.type == EFX_BOARD_SFE4001) 383 if (efx->board_info.type == FALCON_BOARD_SFE4001)
245 err = sfe4001_poweron(efx); 384 err = sfe4001_poweron(efx);
246 else 385 else
247 err = sfn4111t_reset(efx); 386 err = sfn4111t_reset(efx);
@@ -302,7 +441,7 @@ static struct i2c_board_info sfe4001_hwmon_info = {
302 * be turned on before the PHY can be used. 441 * be turned on before the PHY can be used.
303 * Context: Process context, rtnl lock held 442 * Context: Process context, rtnl lock held
304 */ 443 */
305int sfe4001_init(struct efx_nic *efx) 444static int sfe4001_init(struct efx_nic *efx)
306{ 445{
307 int rc; 446 int rc;
308 447
@@ -394,7 +533,7 @@ static struct i2c_board_info sfn4111t_r5_hwmon_info = {
394 I2C_BOARD_INFO("max6646", 0x4d), 533 I2C_BOARD_INFO("max6646", 0x4d),
395}; 534};
396 535
397int sfn4111t_init(struct efx_nic *efx) 536static int sfn4111t_init(struct efx_nic *efx)
398{ 537{
399 int i = 0; 538 int i = 0;
400 int rc; 539 int rc;
@@ -433,3 +572,181 @@ fail_hwmon:
433 i2c_unregister_device(efx->board_info.hwmon_client); 572 i2c_unregister_device(efx->board_info.hwmon_client);
434 return rc; 573 return rc;
435} 574}
575
576/*****************************************************************************
577 * Support for the SFE4002
578 *
579 */
580static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
581
582static const u8 sfe4002_lm87_regs[] = {
583 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
584 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
585 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
586 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
587 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
588 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
589 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
590 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
591 LM87_TEMP_INT_LIMITS(10, 60), /* board */
592 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
593 0
594};
595
596static struct i2c_board_info sfe4002_hwmon_info = {
597 I2C_BOARD_INFO("lm87", 0x2e),
598 .platform_data = &sfe4002_lm87_channel,
599};
600
601/****************************************************************************/
602/* LED allocations. Note that on rev A0 boards the schematic and the reality
603 * differ: red and green are swapped. Below is the fixed (A1) layout (there
604 * are only 3 A0 boards in existence, so no real reason to make this
605 * conditional).
606 */
607#define SFE4002_FAULT_LED (2) /* Red */
608#define SFE4002_RX_LED (0) /* Green */
609#define SFE4002_TX_LED (1) /* Amber */
610
611static void sfe4002_init_leds(struct efx_nic *efx)
612{
613 /* Set the TX and RX LEDs to reflect status and activity, and the
614 * fault LED off */
615 falcon_qt202x_set_led(efx, SFE4002_TX_LED,
616 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
617 falcon_qt202x_set_led(efx, SFE4002_RX_LED,
618 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
619 falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
620}
621
622static void sfe4002_set_id_led(struct efx_nic *efx, bool state)
623{
624 falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
625 QUAKE_LED_OFF);
626}
627
628static int sfe4002_check_hw(struct efx_nic *efx)
629{
630 /* A0 board rev. 4002s report a temperature fault the whole time
631 * (bad sensor) so we mask it out. */
632 unsigned alarm_mask =
633 (efx->board_info.major == 0 && efx->board_info.minor == 0) ?
634 ~LM87_ALARM_TEMP_EXT1 : ~0;
635
636 return efx_check_lm87(efx, alarm_mask);
637}
638
639static int sfe4002_init(struct efx_nic *efx)
640{
641 int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
642 if (rc)
643 return rc;
644 efx->board_info.monitor = sfe4002_check_hw;
645 efx->board_info.init_leds = sfe4002_init_leds;
646 efx->board_info.set_id_led = sfe4002_set_id_led;
647 efx->board_info.blink = board_blink;
648 efx->board_info.fini = efx_fini_lm87;
649 return 0;
650}
651
652/*****************************************************************************
653 * Support for the SFN4112F
654 *
655 */
656static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
657
658static const u8 sfn4112f_lm87_regs[] = {
659 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
660 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
661 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
662 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
663 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
664 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
665 LM87_TEMP_INT_LIMITS(10, 60), /* board */
666 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
667 0
668};
669
670static struct i2c_board_info sfn4112f_hwmon_info = {
671 I2C_BOARD_INFO("lm87", 0x2e),
672 .platform_data = &sfn4112f_lm87_channel,
673};
674
675#define SFN4112F_ACT_LED 0
676#define SFN4112F_LINK_LED 1
677
678static void sfn4112f_init_leds(struct efx_nic *efx)
679{
680 falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
681 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
682 falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
683 QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
684}
685
686static void sfn4112f_set_id_led(struct efx_nic *efx, bool state)
687{
688 falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
689 state ? QUAKE_LED_ON : QUAKE_LED_OFF);
690}
691
692static int sfn4112f_check_hw(struct efx_nic *efx)
693{
694 /* Mask out unused sensors */
695 return efx_check_lm87(efx, ~0x48);
696}
697
698static int sfn4112f_init(struct efx_nic *efx)
699{
700 int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
701 if (rc)
702 return rc;
703 efx->board_info.monitor = sfn4112f_check_hw;
704 efx->board_info.init_leds = sfn4112f_init_leds;
705 efx->board_info.set_id_led = sfn4112f_set_id_led;
706 efx->board_info.blink = board_blink;
707 efx->board_info.fini = efx_fini_lm87;
708 return 0;
709}
710
711/* This will get expanded as board-specific details get moved out of the
712 * PHY drivers. */
713struct falcon_board_data {
714 u8 type;
715 const char *ref_model;
716 const char *gen_type;
717 int (*init) (struct efx_nic *nic);
718};
719
720
721static struct falcon_board_data board_data[] = {
722 { FALCON_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init },
723 { FALCON_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
724 { FALCON_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
725 sfn4111t_init },
726 { FALCON_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter",
727 sfn4112f_init },
728};
729
730void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
731{
732 struct falcon_board_data *data = NULL;
733 int i;
734
735 efx->board_info.type = FALCON_BOARD_TYPE(revision_info);
736 efx->board_info.major = FALCON_BOARD_MAJOR(revision_info);
737 efx->board_info.minor = FALCON_BOARD_MINOR(revision_info);
738
739 for (i = 0; i < ARRAY_SIZE(board_data); i++)
740 if (board_data[i].type == efx->board_info.type)
741 data = &board_data[i];
742
743 if (data) {
744 EFX_INFO(efx, "board is %s rev %c%d\n",
745 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
746 ? data->ref_model : data->gen_type,
747 'A' + efx->board_info.major, efx->board_info.minor);
748 efx->board_info.init = data->init;
749 } else {
750 EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
751 }
752}
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
index 8865eae20ac5..8a1b80d1ff28 100644
--- a/drivers/net/sfc/falcon_gmac.c
+++ b/drivers/net/sfc/falcon_gmac.c
@@ -13,9 +13,8 @@
13#include "efx.h" 13#include "efx.h"
14#include "falcon.h" 14#include "falcon.h"
15#include "mac.h" 15#include "mac.h"
16#include "falcon_hwdefs.h" 16#include "regs.h"
17#include "falcon_io.h" 17#include "io.h"
18#include "gmii.h"
19 18
20/************************************************************************** 19/**************************************************************************
21 * 20 *
@@ -37,89 +36,89 @@ static void falcon_reconfigure_gmac(struct efx_nic *efx)
37 bytemode = (efx->link_speed == 1000); 36 bytemode = (efx->link_speed == 1000);
38 37
39 EFX_POPULATE_OWORD_5(reg, 38 EFX_POPULATE_OWORD_5(reg,
40 GM_LOOP, loopback, 39 FRF_AB_GM_LOOP, loopback,
41 GM_TX_EN, 1, 40 FRF_AB_GM_TX_EN, 1,
42 GM_TX_FC_EN, tx_fc, 41 FRF_AB_GM_TX_FC_EN, tx_fc,
43 GM_RX_EN, 1, 42 FRF_AB_GM_RX_EN, 1,
44 GM_RX_FC_EN, rx_fc); 43 FRF_AB_GM_RX_FC_EN, rx_fc);
45 falcon_write(efx, &reg, GM_CFG1_REG); 44 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
46 udelay(10); 45 udelay(10);
47 46
48 /* Configuration register 2 */ 47 /* Configuration register 2 */
49 if_mode = (bytemode) ? 2 : 1; 48 if_mode = (bytemode) ? 2 : 1;
50 EFX_POPULATE_OWORD_5(reg, 49 EFX_POPULATE_OWORD_5(reg,
51 GM_IF_MODE, if_mode, 50 FRF_AB_GM_IF_MODE, if_mode,
52 GM_PAD_CRC_EN, 1, 51 FRF_AB_GM_PAD_CRC_EN, 1,
53 GM_LEN_CHK, 1, 52 FRF_AB_GM_LEN_CHK, 1,
54 GM_FD, efx->link_fd, 53 FRF_AB_GM_FD, efx->link_fd,
55 GM_PAMBL_LEN, 0x7/*datasheet recommended */); 54 FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */);
56 55
57 falcon_write(efx, &reg, GM_CFG2_REG); 56 efx_writeo(efx, &reg, FR_AB_GM_CFG2);
58 udelay(10); 57 udelay(10);
59 58
60 /* Max frame len register */ 59 /* Max frame len register */
61 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); 60 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
62 EFX_POPULATE_OWORD_1(reg, GM_MAX_FLEN, max_frame_len); 61 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len);
63 falcon_write(efx, &reg, GM_MAX_FLEN_REG); 62 efx_writeo(efx, &reg, FR_AB_GM_MAX_FLEN);
64 udelay(10); 63 udelay(10);
65 64
66 /* FIFO configuration register 0 */ 65 /* FIFO configuration register 0 */
67 EFX_POPULATE_OWORD_5(reg, 66 EFX_POPULATE_OWORD_5(reg,
68 GMF_FTFENREQ, 1, 67 FRF_AB_GMF_FTFENREQ, 1,
69 GMF_STFENREQ, 1, 68 FRF_AB_GMF_STFENREQ, 1,
70 GMF_FRFENREQ, 1, 69 FRF_AB_GMF_FRFENREQ, 1,
71 GMF_SRFENREQ, 1, 70 FRF_AB_GMF_SRFENREQ, 1,
72 GMF_WTMENREQ, 1); 71 FRF_AB_GMF_WTMENREQ, 1);
73 falcon_write(efx, &reg, GMF_CFG0_REG); 72 efx_writeo(efx, &reg, FR_AB_GMF_CFG0);
74 udelay(10); 73 udelay(10);
75 74
76 /* FIFO configuration register 1 */ 75 /* FIFO configuration register 1 */
77 EFX_POPULATE_OWORD_2(reg, 76 EFX_POPULATE_OWORD_2(reg,
78 GMF_CFGFRTH, 0x12, 77 FRF_AB_GMF_CFGFRTH, 0x12,
79 GMF_CFGXOFFRTX, 0xffff); 78 FRF_AB_GMF_CFGXOFFRTX, 0xffff);
80 falcon_write(efx, &reg, GMF_CFG1_REG); 79 efx_writeo(efx, &reg, FR_AB_GMF_CFG1);
81 udelay(10); 80 udelay(10);
82 81
83 /* FIFO configuration register 2 */ 82 /* FIFO configuration register 2 */
84 EFX_POPULATE_OWORD_2(reg, 83 EFX_POPULATE_OWORD_2(reg,
85 GMF_CFGHWM, 0x3f, 84 FRF_AB_GMF_CFGHWM, 0x3f,
86 GMF_CFGLWM, 0xa); 85 FRF_AB_GMF_CFGLWM, 0xa);
87 falcon_write(efx, &reg, GMF_CFG2_REG); 86 efx_writeo(efx, &reg, FR_AB_GMF_CFG2);
88 udelay(10); 87 udelay(10);
89 88
90 /* FIFO configuration register 3 */ 89 /* FIFO configuration register 3 */
91 EFX_POPULATE_OWORD_2(reg, 90 EFX_POPULATE_OWORD_2(reg,
92 GMF_CFGHWMFT, 0x1c, 91 FRF_AB_GMF_CFGHWMFT, 0x1c,
93 GMF_CFGFTTH, 0x08); 92 FRF_AB_GMF_CFGFTTH, 0x08);
94 falcon_write(efx, &reg, GMF_CFG3_REG); 93 efx_writeo(efx, &reg, FR_AB_GMF_CFG3);
95 udelay(10); 94 udelay(10);
96 95
97 /* FIFO configuration register 4 */ 96 /* FIFO configuration register 4 */
98 EFX_POPULATE_OWORD_1(reg, GMF_HSTFLTRFRM_PAUSE, 1); 97 EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1);
99 falcon_write(efx, &reg, GMF_CFG4_REG); 98 efx_writeo(efx, &reg, FR_AB_GMF_CFG4);
100 udelay(10); 99 udelay(10);
101 100
102 /* FIFO configuration register 5 */ 101 /* FIFO configuration register 5 */
103 falcon_read(efx, &reg, GMF_CFG5_REG); 102 efx_reado(efx, &reg, FR_AB_GMF_CFG5);
104 EFX_SET_OWORD_FIELD(reg, GMF_CFGBYTMODE, bytemode); 103 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode);
105 EFX_SET_OWORD_FIELD(reg, GMF_CFGHDPLX, !efx->link_fd); 104 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !efx->link_fd);
106 EFX_SET_OWORD_FIELD(reg, GMF_HSTDRPLT64, !efx->link_fd); 105 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !efx->link_fd);
107 EFX_SET_OWORD_FIELD(reg, GMF_HSTFLTRFRMDC_PAUSE, 0); 106 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0);
108 falcon_write(efx, &reg, GMF_CFG5_REG); 107 efx_writeo(efx, &reg, FR_AB_GMF_CFG5);
109 udelay(10); 108 udelay(10);
110 109
111 /* MAC address */ 110 /* MAC address */
112 EFX_POPULATE_OWORD_4(reg, 111 EFX_POPULATE_OWORD_4(reg,
113 GM_HWADDR_5, efx->net_dev->dev_addr[5], 112 FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5],
114 GM_HWADDR_4, efx->net_dev->dev_addr[4], 113 FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4],
115 GM_HWADDR_3, efx->net_dev->dev_addr[3], 114 FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3],
116 GM_HWADDR_2, efx->net_dev->dev_addr[2]); 115 FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]);
117 falcon_write(efx, &reg, GM_ADR1_REG); 116 efx_writeo(efx, &reg, FR_AB_GM_ADR1);
118 udelay(10); 117 udelay(10);
119 EFX_POPULATE_OWORD_2(reg, 118 EFX_POPULATE_OWORD_2(reg,
120 GM_HWADDR_1, efx->net_dev->dev_addr[1], 119 FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1],
121 GM_HWADDR_0, efx->net_dev->dev_addr[0]); 120 FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]);
122 falcon_write(efx, &reg, GM_ADR2_REG); 121 efx_writeo(efx, &reg, FR_AB_GM_ADR2);
123 udelay(10); 122 udelay(10);
124 123
125 falcon_reconfigure_mac_wrapper(efx); 124 falcon_reconfigure_mac_wrapper(efx);
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
deleted file mode 100644
index 2d2261117ace..000000000000
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ /dev/null
@@ -1,1333 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_HWDEFS_H
12#define EFX_FALCON_HWDEFS_H
13
14/*
15 * Falcon hardware value definitions.
16 * Falcon is the internal codename for the SFC4000 controller that is
17 * present in SFE400X evaluation boards
18 */
19
20/**************************************************************************
21 *
22 * Falcon registers
23 *
24 **************************************************************************
25 */
26
27/* Address region register */
28#define ADR_REGION_REG_KER 0x00
29#define ADR_REGION0_LBN 0
30#define ADR_REGION0_WIDTH 18
31#define ADR_REGION1_LBN 32
32#define ADR_REGION1_WIDTH 18
33#define ADR_REGION2_LBN 64
34#define ADR_REGION2_WIDTH 18
35#define ADR_REGION3_LBN 96
36#define ADR_REGION3_WIDTH 18
37
38/* Interrupt enable register */
39#define INT_EN_REG_KER 0x0010
40#define KER_INT_KER_LBN 3
41#define KER_INT_KER_WIDTH 1
42#define DRV_INT_EN_KER_LBN 0
43#define DRV_INT_EN_KER_WIDTH 1
44
45/* Interrupt status address register */
46#define INT_ADR_REG_KER 0x0030
47#define NORM_INT_VEC_DIS_KER_LBN 64
48#define NORM_INT_VEC_DIS_KER_WIDTH 1
49#define INT_ADR_KER_LBN 0
50#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
51
52/* Interrupt status register (B0 only) */
53#define INT_ISR0_B0 0x90
54#define INT_ISR1_B0 0xA0
55
56/* Interrupt acknowledge register (A0/A1 only) */
57#define INT_ACK_REG_KER_A1 0x0050
58#define INT_ACK_DUMMY_DATA_LBN 0
59#define INT_ACK_DUMMY_DATA_WIDTH 32
60
61/* Interrupt acknowledge work-around register (A0/A1 only )*/
62#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
63
64/* SPI host command register */
65#define EE_SPI_HCMD_REG_KER 0x0100
66#define EE_SPI_HCMD_CMD_EN_LBN 31
67#define EE_SPI_HCMD_CMD_EN_WIDTH 1
68#define EE_WR_TIMER_ACTIVE_LBN 28
69#define EE_WR_TIMER_ACTIVE_WIDTH 1
70#define EE_SPI_HCMD_SF_SEL_LBN 24
71#define EE_SPI_HCMD_SF_SEL_WIDTH 1
72#define EE_SPI_EEPROM 0
73#define EE_SPI_FLASH 1
74#define EE_SPI_HCMD_DABCNT_LBN 16
75#define EE_SPI_HCMD_DABCNT_WIDTH 5
76#define EE_SPI_HCMD_READ_LBN 15
77#define EE_SPI_HCMD_READ_WIDTH 1
78#define EE_SPI_READ 1
79#define EE_SPI_WRITE 0
80#define EE_SPI_HCMD_DUBCNT_LBN 12
81#define EE_SPI_HCMD_DUBCNT_WIDTH 2
82#define EE_SPI_HCMD_ADBCNT_LBN 8
83#define EE_SPI_HCMD_ADBCNT_WIDTH 2
84#define EE_SPI_HCMD_ENC_LBN 0
85#define EE_SPI_HCMD_ENC_WIDTH 8
86
87/* SPI host address register */
88#define EE_SPI_HADR_REG_KER 0x0110
89#define EE_SPI_HADR_ADR_LBN 0
90#define EE_SPI_HADR_ADR_WIDTH 24
91
92/* SPI host data register */
93#define EE_SPI_HDATA_REG_KER 0x0120
94
95/* SPI/VPD config register */
96#define EE_VPD_CFG_REG_KER 0x0140
97#define EE_VPD_EN_LBN 0
98#define EE_VPD_EN_WIDTH 1
99#define EE_VPD_EN_AD9_MODE_LBN 1
100#define EE_VPD_EN_AD9_MODE_WIDTH 1
101#define EE_EE_CLOCK_DIV_LBN 112
102#define EE_EE_CLOCK_DIV_WIDTH 7
103#define EE_SF_CLOCK_DIV_LBN 120
104#define EE_SF_CLOCK_DIV_WIDTH 7
105
106/* PCIE CORE ACCESS REG */
107#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
108#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
109#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
110#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
111
112/* NIC status register */
113#define NIC_STAT_REG 0x0200
114#define EE_STRAP_EN_LBN 31
115#define EE_STRAP_EN_WIDTH 1
116#define EE_STRAP_OVR_LBN 24
117#define EE_STRAP_OVR_WIDTH 4
118#define ONCHIP_SRAM_LBN 16
119#define ONCHIP_SRAM_WIDTH 1
120#define SF_PRST_LBN 9
121#define SF_PRST_WIDTH 1
122#define EE_PRST_LBN 8
123#define EE_PRST_WIDTH 1
124#define STRAP_PINS_LBN 0
125#define STRAP_PINS_WIDTH 3
126/* These bit definitions are extrapolated from the list of numerical
127 * values for STRAP_PINS.
128 */
129#define STRAP_10G_LBN 2
130#define STRAP_10G_WIDTH 1
131#define STRAP_PCIE_LBN 0
132#define STRAP_PCIE_WIDTH 1
133
134#define BOOTED_USING_NVDEVICE_LBN 3
135#define BOOTED_USING_NVDEVICE_WIDTH 1
136
137/* GPIO control register */
138#define GPIO_CTL_REG_KER 0x0210
139#define GPIO_USE_NIC_CLK_LBN (30)
140#define GPIO_USE_NIC_CLK_WIDTH (1)
141#define GPIO_OUTPUTS_LBN (16)
142#define GPIO_OUTPUTS_WIDTH (4)
143#define GPIO_INPUTS_LBN (8)
144#define GPIO_DIRECTION_LBN (24)
145#define GPIO_DIRECTION_WIDTH (4)
146#define GPIO_DIRECTION_OUT (1)
147#define GPIO_SRAM_SLEEP (1 << 1)
148
149#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
150#define GPIO3_OEN_WIDTH 1
151#define GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
152#define GPIO2_OEN_WIDTH 1
153#define GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
154#define GPIO1_OEN_WIDTH 1
155#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
156#define GPIO0_OEN_WIDTH 1
157
158#define GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
159#define GPIO3_OUT_WIDTH 1
160#define GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
161#define GPIO2_OUT_WIDTH 1
162#define GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
163#define GPIO1_OUT_WIDTH 1
164#define GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
165#define GPIO0_OUT_WIDTH 1
166
167#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
168#define GPIO3_IN_WIDTH 1
169#define GPIO2_IN_WIDTH 1
170#define GPIO1_IN_WIDTH 1
171#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
172#define GPIO0_IN_WIDTH 1
173
174/* Global control register */
175#define GLB_CTL_REG_KER 0x0220
176#define EXT_PHY_RST_CTL_LBN 63
177#define EXT_PHY_RST_CTL_WIDTH 1
178#define PCIE_SD_RST_CTL_LBN 61
179#define PCIE_SD_RST_CTL_WIDTH 1
180
181#define PCIE_NSTCK_RST_CTL_LBN 58
182#define PCIE_NSTCK_RST_CTL_WIDTH 1
183#define PCIE_CORE_RST_CTL_LBN 57
184#define PCIE_CORE_RST_CTL_WIDTH 1
185#define EE_RST_CTL_LBN 49
186#define EE_RST_CTL_WIDTH 1
187#define RST_XGRX_LBN 24
188#define RST_XGRX_WIDTH 1
189#define RST_XGTX_LBN 23
190#define RST_XGTX_WIDTH 1
191#define RST_EM_LBN 22
192#define RST_EM_WIDTH 1
193#define EXT_PHY_RST_DUR_LBN 1
194#define EXT_PHY_RST_DUR_WIDTH 3
195#define SWRST_LBN 0
196#define SWRST_WIDTH 1
197#define INCLUDE_IN_RESET 0
198#define EXCLUDE_FROM_RESET 1
199
200/* Fatal interrupt register */
201#define FATAL_INTR_REG_KER 0x0230
202#define RBUF_OWN_INT_KER_EN_LBN 39
203#define RBUF_OWN_INT_KER_EN_WIDTH 1
204#define TBUF_OWN_INT_KER_EN_LBN 38
205#define TBUF_OWN_INT_KER_EN_WIDTH 1
206#define ILL_ADR_INT_KER_EN_LBN 33
207#define ILL_ADR_INT_KER_EN_WIDTH 1
208#define MEM_PERR_INT_KER_LBN 8
209#define MEM_PERR_INT_KER_WIDTH 1
210#define INT_KER_ERROR_LBN 0
211#define INT_KER_ERROR_WIDTH 12
212
213#define DP_CTRL_REG 0x250
214#define FLS_EVQ_ID_LBN 0
215#define FLS_EVQ_ID_WIDTH 11
216
217#define MEM_STAT_REG_KER 0x260
218
219/* Debug probe register */
220#define DEBUG_BLK_SEL_MISC 7
221#define DEBUG_BLK_SEL_SERDES 6
222#define DEBUG_BLK_SEL_EM 5
223#define DEBUG_BLK_SEL_SR 4
224#define DEBUG_BLK_SEL_EV 3
225#define DEBUG_BLK_SEL_RX 2
226#define DEBUG_BLK_SEL_TX 1
227#define DEBUG_BLK_SEL_BIU 0
228
229/* FPGA build version */
230#define ALTERA_BUILD_REG_KER 0x0300
231#define VER_ALL_LBN 0
232#define VER_ALL_WIDTH 32
233
234/* Spare EEPROM bits register (flash 0x390) */
235#define SPARE_REG_KER 0x310
236#define MEM_PERR_EN_TX_DATA_LBN 72
237#define MEM_PERR_EN_TX_DATA_WIDTH 2
238
239/* Timer table for kernel access */
240#define TIMER_CMD_REG_KER 0x420
241#define TIMER_MODE_LBN 12
242#define TIMER_MODE_WIDTH 2
243#define TIMER_MODE_DIS 0
244#define TIMER_MODE_INT_HLDOFF 2
245#define TIMER_VAL_LBN 0
246#define TIMER_VAL_WIDTH 12
247
248/* Driver generated event register */
249#define DRV_EV_REG_KER 0x440
250#define DRV_EV_QID_LBN 64
251#define DRV_EV_QID_WIDTH 12
252#define DRV_EV_DATA_LBN 0
253#define DRV_EV_DATA_WIDTH 64
254
255/* Buffer table configuration register */
256#define BUF_TBL_CFG_REG_KER 0x600
257#define BUF_TBL_MODE_LBN 3
258#define BUF_TBL_MODE_WIDTH 1
259#define BUF_TBL_MODE_HALF 0
260#define BUF_TBL_MODE_FULL 1
261
262/* SRAM receive descriptor cache configuration register */
263#define SRM_RX_DC_CFG_REG_KER 0x610
264#define SRM_RX_DC_BASE_ADR_LBN 0
265#define SRM_RX_DC_BASE_ADR_WIDTH 21
266
267/* SRAM transmit descriptor cache configuration register */
268#define SRM_TX_DC_CFG_REG_KER 0x620
269#define SRM_TX_DC_BASE_ADR_LBN 0
270#define SRM_TX_DC_BASE_ADR_WIDTH 21
271
272/* SRAM configuration register */
273#define SRM_CFG_REG_KER 0x630
274#define SRAM_OOB_BT_INIT_EN_LBN 3
275#define SRAM_OOB_BT_INIT_EN_WIDTH 1
276#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
277#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
278#define SRM_NB_BSZ_1BANKS_2M 0
279#define SRM_NB_BSZ_1BANKS_4M 1
280#define SRM_NB_BSZ_1BANKS_8M 2
281#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
282#define SRM_NB_BSZ_2BANKS_4M 4
283#define SRM_NB_BSZ_2BANKS_8M 5
284#define SRM_NB_BSZ_2BANKS_16M 6
285#define SRM_NB_BSZ_RESERVED 7
286
287/* Special buffer table update register */
288#define BUF_TBL_UPD_REG_KER 0x0650
289#define BUF_UPD_CMD_LBN 63
290#define BUF_UPD_CMD_WIDTH 1
291#define BUF_CLR_CMD_LBN 62
292#define BUF_CLR_CMD_WIDTH 1
293#define BUF_CLR_END_ID_LBN 32
294#define BUF_CLR_END_ID_WIDTH 20
295#define BUF_CLR_START_ID_LBN 0
296#define BUF_CLR_START_ID_WIDTH 20
297
298/* Receive configuration register */
299#define RX_CFG_REG_KER 0x800
300
301/* B0 */
302#define RX_INGR_EN_B0_LBN 47
303#define RX_INGR_EN_B0_WIDTH 1
304#define RX_DESC_PUSH_EN_B0_LBN 43
305#define RX_DESC_PUSH_EN_B0_WIDTH 1
306#define RX_XON_TX_TH_B0_LBN 33
307#define RX_XON_TX_TH_B0_WIDTH 5
308#define RX_XOFF_TX_TH_B0_LBN 28
309#define RX_XOFF_TX_TH_B0_WIDTH 5
310#define RX_USR_BUF_SIZE_B0_LBN 19
311#define RX_USR_BUF_SIZE_B0_WIDTH 9
312#define RX_XON_MAC_TH_B0_LBN 10
313#define RX_XON_MAC_TH_B0_WIDTH 9
314#define RX_XOFF_MAC_TH_B0_LBN 1
315#define RX_XOFF_MAC_TH_B0_WIDTH 9
316#define RX_XOFF_MAC_EN_B0_LBN 0
317#define RX_XOFF_MAC_EN_B0_WIDTH 1
318
319/* A1 */
320#define RX_DESC_PUSH_EN_A1_LBN 35
321#define RX_DESC_PUSH_EN_A1_WIDTH 1
322#define RX_XON_TX_TH_A1_LBN 25
323#define RX_XON_TX_TH_A1_WIDTH 5
324#define RX_XOFF_TX_TH_A1_LBN 20
325#define RX_XOFF_TX_TH_A1_WIDTH 5
326#define RX_USR_BUF_SIZE_A1_LBN 11
327#define RX_USR_BUF_SIZE_A1_WIDTH 9
328#define RX_XON_MAC_TH_A1_LBN 6
329#define RX_XON_MAC_TH_A1_WIDTH 5
330#define RX_XOFF_MAC_TH_A1_LBN 1
331#define RX_XOFF_MAC_TH_A1_WIDTH 5
332#define RX_XOFF_MAC_EN_A1_LBN 0
333#define RX_XOFF_MAC_EN_A1_WIDTH 1
334
335/* Receive filter control register */
336#define RX_FILTER_CTL_REG 0x810
337#define UDP_FULL_SRCH_LIMIT_LBN 32
338#define UDP_FULL_SRCH_LIMIT_WIDTH 8
339#define NUM_KER_LBN 24
340#define NUM_KER_WIDTH 2
341#define UDP_WILD_SRCH_LIMIT_LBN 16
342#define UDP_WILD_SRCH_LIMIT_WIDTH 8
343#define TCP_WILD_SRCH_LIMIT_LBN 8
344#define TCP_WILD_SRCH_LIMIT_WIDTH 8
345#define TCP_FULL_SRCH_LIMIT_LBN 0
346#define TCP_FULL_SRCH_LIMIT_WIDTH 8
347
348/* RX queue flush register */
349#define RX_FLUSH_DESCQ_REG_KER 0x0820
350#define RX_FLUSH_DESCQ_CMD_LBN 24
351#define RX_FLUSH_DESCQ_CMD_WIDTH 1
352#define RX_FLUSH_DESCQ_LBN 0
353#define RX_FLUSH_DESCQ_WIDTH 12
354
355/* Receive descriptor update register */
356#define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12)
357#define RX_DESC_WPTR_DWORD_LBN 0
358#define RX_DESC_WPTR_DWORD_WIDTH 12
359
360/* Receive descriptor cache configuration register */
361#define RX_DC_CFG_REG_KER 0x840
362#define RX_DC_SIZE_LBN 0
363#define RX_DC_SIZE_WIDTH 2
364
365#define RX_DC_PF_WM_REG_KER 0x850
366#define RX_DC_PF_LWM_LBN 0
367#define RX_DC_PF_LWM_WIDTH 6
368
369/* RX no descriptor drop counter */
370#define RX_NODESC_DROP_REG_KER 0x880
371#define RX_NODESC_DROP_CNT_LBN 0
372#define RX_NODESC_DROP_CNT_WIDTH 16
373
374/* RX black magic register */
375#define RX_SELF_RST_REG_KER 0x890
376#define RX_ISCSI_DIS_LBN 17
377#define RX_ISCSI_DIS_WIDTH 1
378#define RX_NODESC_WAIT_DIS_LBN 9
379#define RX_NODESC_WAIT_DIS_WIDTH 1
380#define RX_RECOVERY_EN_LBN 8
381#define RX_RECOVERY_EN_WIDTH 1
382
383/* TX queue flush register */
384#define TX_FLUSH_DESCQ_REG_KER 0x0a00
385#define TX_FLUSH_DESCQ_CMD_LBN 12
386#define TX_FLUSH_DESCQ_CMD_WIDTH 1
387#define TX_FLUSH_DESCQ_LBN 0
388#define TX_FLUSH_DESCQ_WIDTH 12
389
390/* Transmit descriptor update register */
391#define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12)
392#define TX_DESC_WPTR_DWORD_LBN 0
393#define TX_DESC_WPTR_DWORD_WIDTH 12
394
395/* Transmit descriptor cache configuration register */
396#define TX_DC_CFG_REG_KER 0xa20
397#define TX_DC_SIZE_LBN 0
398#define TX_DC_SIZE_WIDTH 2
399
400/* Transmit checksum configuration register (A0/A1 only) */
401#define TX_CHKSM_CFG_REG_KER_A1 0xa30
402
403/* Transmit configuration register */
404#define TX_CFG_REG_KER 0xa50
405#define TX_NO_EOP_DISC_EN_LBN 5
406#define TX_NO_EOP_DISC_EN_WIDTH 1
407
408/* Transmit configuration register 2 */
409#define TX_CFG2_REG_KER 0xa80
410#define TX_CSR_PUSH_EN_LBN 89
411#define TX_CSR_PUSH_EN_WIDTH 1
412#define TX_RX_SPACER_LBN 64
413#define TX_RX_SPACER_WIDTH 8
414#define TX_SW_EV_EN_LBN 59
415#define TX_SW_EV_EN_WIDTH 1
416#define TX_RX_SPACER_EN_LBN 57
417#define TX_RX_SPACER_EN_WIDTH 1
418#define TX_PREF_THRESHOLD_LBN 19
419#define TX_PREF_THRESHOLD_WIDTH 2
420#define TX_ONE_PKT_PER_Q_LBN 18
421#define TX_ONE_PKT_PER_Q_WIDTH 1
422#define TX_DIS_NON_IP_EV_LBN 17
423#define TX_DIS_NON_IP_EV_WIDTH 1
424#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
425#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
426
427/* PHY management transmit data register */
428#define MD_TXD_REG_KER 0xc00
429#define MD_TXD_LBN 0
430#define MD_TXD_WIDTH 16
431
432/* PHY management receive data register */
433#define MD_RXD_REG_KER 0xc10
434#define MD_RXD_LBN 0
435#define MD_RXD_WIDTH 16
436
437/* PHY management configuration & status register */
438#define MD_CS_REG_KER 0xc20
439#define MD_GC_LBN 4
440#define MD_GC_WIDTH 1
441#define MD_RIC_LBN 2
442#define MD_RIC_WIDTH 1
443#define MD_RDC_LBN 1
444#define MD_RDC_WIDTH 1
445#define MD_WRC_LBN 0
446#define MD_WRC_WIDTH 1
447
448/* PHY management PHY address register */
449#define MD_PHY_ADR_REG_KER 0xc30
450#define MD_PHY_ADR_LBN 0
451#define MD_PHY_ADR_WIDTH 16
452
453/* PHY management ID register */
454#define MD_ID_REG_KER 0xc40
455#define MD_PRT_ADR_LBN 11
456#define MD_PRT_ADR_WIDTH 5
457#define MD_DEV_ADR_LBN 6
458#define MD_DEV_ADR_WIDTH 5
459
460/* PHY management status & mask register (DWORD read only) */
461#define MD_STAT_REG_KER 0xc50
462#define MD_BSERR_LBN 2
463#define MD_BSERR_WIDTH 1
464#define MD_LNFL_LBN 1
465#define MD_LNFL_WIDTH 1
466#define MD_BSY_LBN 0
467#define MD_BSY_WIDTH 1
468
469/* Port 0 and 1 MAC stats registers */
470#define MAC0_STAT_DMA_REG_KER 0xc60
471#define MAC_STAT_DMA_CMD_LBN 48
472#define MAC_STAT_DMA_CMD_WIDTH 1
473#define MAC_STAT_DMA_ADR_LBN 0
474#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
475
476/* Port 0 and 1 MAC control registers */
477#define MAC0_CTRL_REG_KER 0xc80
478#define MAC_XOFF_VAL_LBN 16
479#define MAC_XOFF_VAL_WIDTH 16
480#define TXFIFO_DRAIN_EN_B0_LBN 7
481#define TXFIFO_DRAIN_EN_B0_WIDTH 1
482#define MAC_BCAD_ACPT_LBN 4
483#define MAC_BCAD_ACPT_WIDTH 1
484#define MAC_UC_PROM_LBN 3
485#define MAC_UC_PROM_WIDTH 1
486#define MAC_LINK_STATUS_LBN 2
487#define MAC_LINK_STATUS_WIDTH 1
488#define MAC_SPEED_LBN 0
489#define MAC_SPEED_WIDTH 2
490
491/* 10G XAUI XGXS default values */
492#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
493#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
494#define XX_SD_CTL_DRV_DEFAULT 0 /* 20mA */
495
496/* Multicast address hash table */
497#define MAC_MCAST_HASH_REG0_KER 0xca0
498#define MAC_MCAST_HASH_REG1_KER 0xcb0
499
500/* GMAC configuration register 1 */
501#define GM_CFG1_REG 0xe00
502#define GM_SW_RST_LBN 31
503#define GM_SW_RST_WIDTH 1
504#define GM_LOOP_LBN 8
505#define GM_LOOP_WIDTH 1
506#define GM_RX_FC_EN_LBN 5
507#define GM_RX_FC_EN_WIDTH 1
508#define GM_TX_FC_EN_LBN 4
509#define GM_TX_FC_EN_WIDTH 1
510#define GM_RX_EN_LBN 2
511#define GM_RX_EN_WIDTH 1
512#define GM_TX_EN_LBN 0
513#define GM_TX_EN_WIDTH 1
514
515/* GMAC configuration register 2 */
516#define GM_CFG2_REG 0xe10
517#define GM_PAMBL_LEN_LBN 12
518#define GM_PAMBL_LEN_WIDTH 4
519#define GM_IF_MODE_LBN 8
520#define GM_IF_MODE_WIDTH 2
521#define GM_LEN_CHK_LBN 4
522#define GM_LEN_CHK_WIDTH 1
523#define GM_PAD_CRC_EN_LBN 2
524#define GM_PAD_CRC_EN_WIDTH 1
525#define GM_FD_LBN 0
526#define GM_FD_WIDTH 1
527
528/* GMAC maximum frame length register */
529#define GM_MAX_FLEN_REG 0xe40
530#define GM_MAX_FLEN_LBN 0
531#define GM_MAX_FLEN_WIDTH 16
532
533/* GMAC station address register 1 */
534#define GM_ADR1_REG 0xf00
535#define GM_HWADDR_5_LBN 24
536#define GM_HWADDR_5_WIDTH 8
537#define GM_HWADDR_4_LBN 16
538#define GM_HWADDR_4_WIDTH 8
539#define GM_HWADDR_3_LBN 8
540#define GM_HWADDR_3_WIDTH 8
541#define GM_HWADDR_2_LBN 0
542#define GM_HWADDR_2_WIDTH 8
543
544/* GMAC station address register 2 */
545#define GM_ADR2_REG 0xf10
546#define GM_HWADDR_1_LBN 24
547#define GM_HWADDR_1_WIDTH 8
548#define GM_HWADDR_0_LBN 16
549#define GM_HWADDR_0_WIDTH 8
550
551/* GMAC FIFO configuration register 0 */
552#define GMF_CFG0_REG 0xf20
553#define GMF_FTFENREQ_LBN 12
554#define GMF_FTFENREQ_WIDTH 1
555#define GMF_STFENREQ_LBN 11
556#define GMF_STFENREQ_WIDTH 1
557#define GMF_FRFENREQ_LBN 10
558#define GMF_FRFENREQ_WIDTH 1
559#define GMF_SRFENREQ_LBN 9
560#define GMF_SRFENREQ_WIDTH 1
561#define GMF_WTMENREQ_LBN 8
562#define GMF_WTMENREQ_WIDTH 1
563
564/* GMAC FIFO configuration register 1 */
565#define GMF_CFG1_REG 0xf30
566#define GMF_CFGFRTH_LBN 16
567#define GMF_CFGFRTH_WIDTH 5
568#define GMF_CFGXOFFRTX_LBN 0
569#define GMF_CFGXOFFRTX_WIDTH 16
570
571/* GMAC FIFO configuration register 2 */
572#define GMF_CFG2_REG 0xf40
573#define GMF_CFGHWM_LBN 16
574#define GMF_CFGHWM_WIDTH 6
575#define GMF_CFGLWM_LBN 0
576#define GMF_CFGLWM_WIDTH 6
577
578/* GMAC FIFO configuration register 3 */
579#define GMF_CFG3_REG 0xf50
580#define GMF_CFGHWMFT_LBN 16
581#define GMF_CFGHWMFT_WIDTH 6
582#define GMF_CFGFTTH_LBN 0
583#define GMF_CFGFTTH_WIDTH 6
584
585/* GMAC FIFO configuration register 4 */
586#define GMF_CFG4_REG 0xf60
587#define GMF_HSTFLTRFRM_PAUSE_LBN 12
588#define GMF_HSTFLTRFRM_PAUSE_WIDTH 12
589
590/* GMAC FIFO configuration register 5 */
591#define GMF_CFG5_REG 0xf70
592#define GMF_CFGHDPLX_LBN 22
593#define GMF_CFGHDPLX_WIDTH 1
594#define GMF_CFGBYTMODE_LBN 19
595#define GMF_CFGBYTMODE_WIDTH 1
596#define GMF_HSTDRPLT64_LBN 18
597#define GMF_HSTDRPLT64_WIDTH 1
598#define GMF_HSTFLTRFRMDC_PAUSE_LBN 12
599#define GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
600
601/* XGMAC address register low */
602#define XM_ADR_LO_REG 0x1200
603#define XM_ADR_3_LBN 24
604#define XM_ADR_3_WIDTH 8
605#define XM_ADR_2_LBN 16
606#define XM_ADR_2_WIDTH 8
607#define XM_ADR_1_LBN 8
608#define XM_ADR_1_WIDTH 8
609#define XM_ADR_0_LBN 0
610#define XM_ADR_0_WIDTH 8
611
612/* XGMAC address register high */
613#define XM_ADR_HI_REG 0x1210
614#define XM_ADR_5_LBN 8
615#define XM_ADR_5_WIDTH 8
616#define XM_ADR_4_LBN 0
617#define XM_ADR_4_WIDTH 8
618
619/* XGMAC global configuration */
620#define XM_GLB_CFG_REG 0x1220
621#define XM_RX_STAT_EN_LBN 11
622#define XM_RX_STAT_EN_WIDTH 1
623#define XM_TX_STAT_EN_LBN 10
624#define XM_TX_STAT_EN_WIDTH 1
625#define XM_RX_JUMBO_MODE_LBN 6
626#define XM_RX_JUMBO_MODE_WIDTH 1
627#define XM_INTCLR_MODE_LBN 3
628#define XM_INTCLR_MODE_WIDTH 1
629#define XM_CORE_RST_LBN 0
630#define XM_CORE_RST_WIDTH 1
631
632/* XGMAC transmit configuration */
633#define XM_TX_CFG_REG 0x1230
634#define XM_IPG_LBN 16
635#define XM_IPG_WIDTH 4
636#define XM_FCNTL_LBN 10
637#define XM_FCNTL_WIDTH 1
638#define XM_TXCRC_LBN 8
639#define XM_TXCRC_WIDTH 1
640#define XM_AUTO_PAD_LBN 5
641#define XM_AUTO_PAD_WIDTH 1
642#define XM_TX_PRMBL_LBN 2
643#define XM_TX_PRMBL_WIDTH 1
644#define XM_TXEN_LBN 1
645#define XM_TXEN_WIDTH 1
646
647/* XGMAC receive configuration */
648#define XM_RX_CFG_REG 0x1240
649#define XM_PASS_CRC_ERR_LBN 25
650#define XM_PASS_CRC_ERR_WIDTH 1
651#define XM_ACPT_ALL_MCAST_LBN 11
652#define XM_ACPT_ALL_MCAST_WIDTH 1
653#define XM_ACPT_ALL_UCAST_LBN 9
654#define XM_ACPT_ALL_UCAST_WIDTH 1
655#define XM_AUTO_DEPAD_LBN 8
656#define XM_AUTO_DEPAD_WIDTH 1
657#define XM_RXEN_LBN 1
658#define XM_RXEN_WIDTH 1
659
660/* XGMAC management interrupt mask register */
661#define XM_MGT_INT_MSK_REG_B0 0x1250
662#define XM_MSK_PRMBLE_ERR_LBN 2
663#define XM_MSK_PRMBLE_ERR_WIDTH 1
664#define XM_MSK_RMTFLT_LBN 1
665#define XM_MSK_RMTFLT_WIDTH 1
666#define XM_MSK_LCLFLT_LBN 0
667#define XM_MSK_LCLFLT_WIDTH 1
668
669/* XGMAC flow control register */
670#define XM_FC_REG 0x1270
671#define XM_PAUSE_TIME_LBN 16
672#define XM_PAUSE_TIME_WIDTH 16
673#define XM_DIS_FCNTL_LBN 0
674#define XM_DIS_FCNTL_WIDTH 1
675
676/* XGMAC pause time count register */
677#define XM_PAUSE_TIME_REG 0x1290
678
679/* XGMAC transmit parameter register */
680#define XM_TX_PARAM_REG 0x012d0
681#define XM_TX_JUMBO_MODE_LBN 31
682#define XM_TX_JUMBO_MODE_WIDTH 1
683#define XM_MAX_TX_FRM_SIZE_LBN 16
684#define XM_MAX_TX_FRM_SIZE_WIDTH 14
685
686/* XGMAC receive parameter register */
687#define XM_RX_PARAM_REG 0x12e0
688#define XM_MAX_RX_FRM_SIZE_LBN 0
689#define XM_MAX_RX_FRM_SIZE_WIDTH 14
690
691/* XGMAC management interrupt status register */
692#define XM_MGT_INT_REG_B0 0x12f0
693#define XM_PRMBLE_ERR 2
694#define XM_PRMBLE_WIDTH 1
695#define XM_RMTFLT_LBN 1
696#define XM_RMTFLT_WIDTH 1
697#define XM_LCLFLT_LBN 0
698#define XM_LCLFLT_WIDTH 1
699
700/* XGXS/XAUI powerdown/reset register */
701#define XX_PWR_RST_REG 0x1300
702
703#define XX_SD_RST_ACT_LBN 16
704#define XX_SD_RST_ACT_WIDTH 1
705#define XX_PWRDND_EN_LBN 15
706#define XX_PWRDND_EN_WIDTH 1
707#define XX_PWRDNC_EN_LBN 14
708#define XX_PWRDNC_EN_WIDTH 1
709#define XX_PWRDNB_EN_LBN 13
710#define XX_PWRDNB_EN_WIDTH 1
711#define XX_PWRDNA_EN_LBN 12
712#define XX_PWRDNA_EN_WIDTH 1
713#define XX_RSTPLLCD_EN_LBN 9
714#define XX_RSTPLLCD_EN_WIDTH 1
715#define XX_RSTPLLAB_EN_LBN 8
716#define XX_RSTPLLAB_EN_WIDTH 1
717#define XX_RESETD_EN_LBN 7
718#define XX_RESETD_EN_WIDTH 1
719#define XX_RESETC_EN_LBN 6
720#define XX_RESETC_EN_WIDTH 1
721#define XX_RESETB_EN_LBN 5
722#define XX_RESETB_EN_WIDTH 1
723#define XX_RESETA_EN_LBN 4
724#define XX_RESETA_EN_WIDTH 1
725#define XX_RSTXGXSRX_EN_LBN 2
726#define XX_RSTXGXSRX_EN_WIDTH 1
727#define XX_RSTXGXSTX_EN_LBN 1
728#define XX_RSTXGXSTX_EN_WIDTH 1
729#define XX_RST_XX_EN_LBN 0
730#define XX_RST_XX_EN_WIDTH 1
731
732/* XGXS/XAUI powerdown/reset control register */
733#define XX_SD_CTL_REG 0x1310
734#define XX_HIDRVD_LBN 15
735#define XX_HIDRVD_WIDTH 1
736#define XX_LODRVD_LBN 14
737#define XX_LODRVD_WIDTH 1
738#define XX_HIDRVC_LBN 13
739#define XX_HIDRVC_WIDTH 1
740#define XX_LODRVC_LBN 12
741#define XX_LODRVC_WIDTH 1
742#define XX_HIDRVB_LBN 11
743#define XX_HIDRVB_WIDTH 1
744#define XX_LODRVB_LBN 10
745#define XX_LODRVB_WIDTH 1
746#define XX_HIDRVA_LBN 9
747#define XX_HIDRVA_WIDTH 1
748#define XX_LODRVA_LBN 8
749#define XX_LODRVA_WIDTH 1
750#define XX_LPBKD_LBN 3
751#define XX_LPBKD_WIDTH 1
752#define XX_LPBKC_LBN 2
753#define XX_LPBKC_WIDTH 1
754#define XX_LPBKB_LBN 1
755#define XX_LPBKB_WIDTH 1
756#define XX_LPBKA_LBN 0
757#define XX_LPBKA_WIDTH 1
758
759#define XX_TXDRV_CTL_REG 0x1320
760#define XX_DEQD_LBN 28
761#define XX_DEQD_WIDTH 4
762#define XX_DEQC_LBN 24
763#define XX_DEQC_WIDTH 4
764#define XX_DEQB_LBN 20
765#define XX_DEQB_WIDTH 4
766#define XX_DEQA_LBN 16
767#define XX_DEQA_WIDTH 4
768#define XX_DTXD_LBN 12
769#define XX_DTXD_WIDTH 4
770#define XX_DTXC_LBN 8
771#define XX_DTXC_WIDTH 4
772#define XX_DTXB_LBN 4
773#define XX_DTXB_WIDTH 4
774#define XX_DTXA_LBN 0
775#define XX_DTXA_WIDTH 4
776
777/* XAUI XGXS core status register */
778#define XX_CORE_STAT_REG 0x1360
779#define XX_FORCE_SIG_LBN 24
780#define XX_FORCE_SIG_WIDTH 8
781#define XX_FORCE_SIG_DECODE_FORCED 0xff
782#define XX_XGXS_LB_EN_LBN 23
783#define XX_XGXS_LB_EN_WIDTH 1
784#define XX_XGMII_LB_EN_LBN 22
785#define XX_XGMII_LB_EN_WIDTH 1
786#define XX_ALIGN_DONE_LBN 20
787#define XX_ALIGN_DONE_WIDTH 1
788#define XX_SYNC_STAT_LBN 16
789#define XX_SYNC_STAT_WIDTH 4
790#define XX_SYNC_STAT_DECODE_SYNCED 0xf
791#define XX_COMMA_DET_LBN 12
792#define XX_COMMA_DET_WIDTH 4
793#define XX_COMMA_DET_DECODE_DETECTED 0xf
794#define XX_COMMA_DET_RESET 0xf
795#define XX_CHARERR_LBN 4
796#define XX_CHARERR_WIDTH 4
797#define XX_CHARERR_RESET 0xf
798#define XX_DISPERR_LBN 0
799#define XX_DISPERR_WIDTH 4
800#define XX_DISPERR_RESET 0xf
801
802/* Receive filter table */
803#define RX_FILTER_TBL0 0xF00000
804
805/* Receive descriptor pointer table */
806#define RX_DESC_PTR_TBL_KER_A1 0x11800
807#define RX_DESC_PTR_TBL_KER_B0 0xF40000
808#define RX_DESC_PTR_TBL_KER_P0 0x900
809#define RX_ISCSI_DDIG_EN_LBN 88
810#define RX_ISCSI_DDIG_EN_WIDTH 1
811#define RX_ISCSI_HDIG_EN_LBN 87
812#define RX_ISCSI_HDIG_EN_WIDTH 1
813#define RX_DESCQ_BUF_BASE_ID_LBN 36
814#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
815#define RX_DESCQ_EVQ_ID_LBN 24
816#define RX_DESCQ_EVQ_ID_WIDTH 12
817#define RX_DESCQ_OWNER_ID_LBN 10
818#define RX_DESCQ_OWNER_ID_WIDTH 14
819#define RX_DESCQ_LABEL_LBN 5
820#define RX_DESCQ_LABEL_WIDTH 5
821#define RX_DESCQ_SIZE_LBN 3
822#define RX_DESCQ_SIZE_WIDTH 2
823#define RX_DESCQ_SIZE_4K 3
824#define RX_DESCQ_SIZE_2K 2
825#define RX_DESCQ_SIZE_1K 1
826#define RX_DESCQ_SIZE_512 0
827#define RX_DESCQ_TYPE_LBN 2
828#define RX_DESCQ_TYPE_WIDTH 1
829#define RX_DESCQ_JUMBO_LBN 1
830#define RX_DESCQ_JUMBO_WIDTH 1
831#define RX_DESCQ_EN_LBN 0
832#define RX_DESCQ_EN_WIDTH 1
833
834/* Transmit descriptor pointer table */
835#define TX_DESC_PTR_TBL_KER_A1 0x11900
836#define TX_DESC_PTR_TBL_KER_B0 0xF50000
837#define TX_DESC_PTR_TBL_KER_P0 0xa40
838#define TX_NON_IP_DROP_DIS_B0_LBN 91
839#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
840#define TX_IP_CHKSM_DIS_B0_LBN 90
841#define TX_IP_CHKSM_DIS_B0_WIDTH 1
842#define TX_TCP_CHKSM_DIS_B0_LBN 89
843#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
844#define TX_DESCQ_EN_LBN 88
845#define TX_DESCQ_EN_WIDTH 1
846#define TX_ISCSI_DDIG_EN_LBN 87
847#define TX_ISCSI_DDIG_EN_WIDTH 1
848#define TX_ISCSI_HDIG_EN_LBN 86
849#define TX_ISCSI_HDIG_EN_WIDTH 1
850#define TX_DESCQ_BUF_BASE_ID_LBN 36
851#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
852#define TX_DESCQ_EVQ_ID_LBN 24
853#define TX_DESCQ_EVQ_ID_WIDTH 12
854#define TX_DESCQ_OWNER_ID_LBN 10
855#define TX_DESCQ_OWNER_ID_WIDTH 14
856#define TX_DESCQ_LABEL_LBN 5
857#define TX_DESCQ_LABEL_WIDTH 5
858#define TX_DESCQ_SIZE_LBN 3
859#define TX_DESCQ_SIZE_WIDTH 2
860#define TX_DESCQ_SIZE_4K 3
861#define TX_DESCQ_SIZE_2K 2
862#define TX_DESCQ_SIZE_1K 1
863#define TX_DESCQ_SIZE_512 0
864#define TX_DESCQ_TYPE_LBN 1
865#define TX_DESCQ_TYPE_WIDTH 2
866
867/* Event queue pointer */
868#define EVQ_PTR_TBL_KER_A1 0x11a00
869#define EVQ_PTR_TBL_KER_B0 0xf60000
870#define EVQ_PTR_TBL_KER_P0 0x500
871#define EVQ_EN_LBN 23
872#define EVQ_EN_WIDTH 1
873#define EVQ_SIZE_LBN 20
874#define EVQ_SIZE_WIDTH 3
875#define EVQ_SIZE_32K 6
876#define EVQ_SIZE_16K 5
877#define EVQ_SIZE_8K 4
878#define EVQ_SIZE_4K 3
879#define EVQ_SIZE_2K 2
880#define EVQ_SIZE_1K 1
881#define EVQ_SIZE_512 0
882#define EVQ_BUF_BASE_ID_LBN 0
883#define EVQ_BUF_BASE_ID_WIDTH 20
884
885/* Event queue read pointer */
886#define EVQ_RPTR_REG_KER_A1 0x11b00
887#define EVQ_RPTR_REG_KER_B0 0xfa0000
888#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
889#define EVQ_RPTR_DWORD_LBN 0
890#define EVQ_RPTR_DWORD_WIDTH 14
891
892/* RSS indirection table */
893#define RX_RSS_INDIR_TBL_B0 0xFB0000
894#define RX_RSS_INDIR_ENT_B0_LBN 0
895#define RX_RSS_INDIR_ENT_B0_WIDTH 6
896
897/* Special buffer descriptors (full-mode) */
898#define BUF_FULL_TBL_KER_A1 0x8000
899#define BUF_FULL_TBL_KER_B0 0x800000
900#define IP_DAT_BUF_SIZE_LBN 50
901#define IP_DAT_BUF_SIZE_WIDTH 1
902#define IP_DAT_BUF_SIZE_8K 1
903#define IP_DAT_BUF_SIZE_4K 0
904#define BUF_ADR_REGION_LBN 48
905#define BUF_ADR_REGION_WIDTH 2
906#define BUF_ADR_FBUF_LBN 14
907#define BUF_ADR_FBUF_WIDTH 34
908#define BUF_OWNER_ID_FBUF_LBN 0
909#define BUF_OWNER_ID_FBUF_WIDTH 14
910
911/* Transmit descriptor */
912#define TX_KER_PORT_LBN 63
913#define TX_KER_PORT_WIDTH 1
914#define TX_KER_CONT_LBN 62
915#define TX_KER_CONT_WIDTH 1
916#define TX_KER_BYTE_CNT_LBN 48
917#define TX_KER_BYTE_CNT_WIDTH 14
918#define TX_KER_BUF_REGION_LBN 46
919#define TX_KER_BUF_REGION_WIDTH 2
920#define TX_KER_BUF_REGION0_DECODE 0
921#define TX_KER_BUF_REGION1_DECODE 1
922#define TX_KER_BUF_REGION2_DECODE 2
923#define TX_KER_BUF_REGION3_DECODE 3
924#define TX_KER_BUF_ADR_LBN 0
925#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
926
927/* Receive descriptor */
928#define RX_KER_BUF_SIZE_LBN 48
929#define RX_KER_BUF_SIZE_WIDTH 14
930#define RX_KER_BUF_REGION_LBN 46
931#define RX_KER_BUF_REGION_WIDTH 2
932#define RX_KER_BUF_REGION0_DECODE 0
933#define RX_KER_BUF_REGION1_DECODE 1
934#define RX_KER_BUF_REGION2_DECODE 2
935#define RX_KER_BUF_REGION3_DECODE 3
936#define RX_KER_BUF_ADR_LBN 0
937#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
938
939/**************************************************************************
940 *
941 * Falcon events
942 *
943 **************************************************************************
944 */
945
946/* Event queue entries */
947#define EV_CODE_LBN 60
948#define EV_CODE_WIDTH 4
949#define RX_IP_EV_DECODE 0
950#define TX_IP_EV_DECODE 2
951#define DRIVER_EV_DECODE 5
952#define GLOBAL_EV_DECODE 6
953#define DRV_GEN_EV_DECODE 7
954#define WHOLE_EVENT_LBN 0
955#define WHOLE_EVENT_WIDTH 64
956
957/* Receive events */
958#define RX_EV_PKT_OK_LBN 56
959#define RX_EV_PKT_OK_WIDTH 1
960#define RX_EV_PAUSE_FRM_ERR_LBN 55
961#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
962#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
963#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
964#define RX_EV_IF_FRAG_ERR_LBN 53
965#define RX_EV_IF_FRAG_ERR_WIDTH 1
966#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
967#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
968#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
969#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
970#define RX_EV_ETH_CRC_ERR_LBN 50
971#define RX_EV_ETH_CRC_ERR_WIDTH 1
972#define RX_EV_FRM_TRUNC_LBN 49
973#define RX_EV_FRM_TRUNC_WIDTH 1
974#define RX_EV_DRIB_NIB_LBN 48
975#define RX_EV_DRIB_NIB_WIDTH 1
976#define RX_EV_TOBE_DISC_LBN 47
977#define RX_EV_TOBE_DISC_WIDTH 1
978#define RX_EV_PKT_TYPE_LBN 44
979#define RX_EV_PKT_TYPE_WIDTH 3
980#define RX_EV_PKT_TYPE_ETH_DECODE 0
981#define RX_EV_PKT_TYPE_LLC_DECODE 1
982#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
983#define RX_EV_PKT_TYPE_VLAN_DECODE 3
984#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
985#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
986#define RX_EV_HDR_TYPE_LBN 42
987#define RX_EV_HDR_TYPE_WIDTH 2
988#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
989#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
990#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
991#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
992#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
993 ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
994#define RX_EV_MCAST_HASH_MATCH_LBN 40
995#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
996#define RX_EV_MCAST_PKT_LBN 39
997#define RX_EV_MCAST_PKT_WIDTH 1
998#define RX_EV_Q_LABEL_LBN 32
999#define RX_EV_Q_LABEL_WIDTH 5
1000#define RX_EV_JUMBO_CONT_LBN 31
1001#define RX_EV_JUMBO_CONT_WIDTH 1
1002#define RX_EV_BYTE_CNT_LBN 16
1003#define RX_EV_BYTE_CNT_WIDTH 14
1004#define RX_EV_SOP_LBN 15
1005#define RX_EV_SOP_WIDTH 1
1006#define RX_EV_DESC_PTR_LBN 0
1007#define RX_EV_DESC_PTR_WIDTH 12
1008
1009/* Transmit events */
1010#define TX_EV_PKT_ERR_LBN 38
1011#define TX_EV_PKT_ERR_WIDTH 1
1012#define TX_EV_Q_LABEL_LBN 32
1013#define TX_EV_Q_LABEL_WIDTH 5
1014#define TX_EV_WQ_FF_FULL_LBN 15
1015#define TX_EV_WQ_FF_FULL_WIDTH 1
1016#define TX_EV_COMP_LBN 12
1017#define TX_EV_COMP_WIDTH 1
1018#define TX_EV_DESC_PTR_LBN 0
1019#define TX_EV_DESC_PTR_WIDTH 12
1020
1021/* Driver events */
1022#define DRIVER_EV_SUB_CODE_LBN 56
1023#define DRIVER_EV_SUB_CODE_WIDTH 4
1024#define DRIVER_EV_SUB_DATA_LBN 0
1025#define DRIVER_EV_SUB_DATA_WIDTH 14
1026#define TX_DESCQ_FLS_DONE_EV_DECODE 0
1027#define RX_DESCQ_FLS_DONE_EV_DECODE 1
1028#define EVQ_INIT_DONE_EV_DECODE 2
1029#define EVQ_NOT_EN_EV_DECODE 3
1030#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
1031#define SRM_UPD_DONE_EV_DECODE 5
1032#define WAKE_UP_EV_DECODE 6
1033#define TX_PKT_NON_TCP_UDP_DECODE 9
1034#define TIMER_EV_DECODE 10
1035#define RX_RECOVERY_EV_DECODE 11
1036#define RX_DSC_ERROR_EV_DECODE 14
1037#define TX_DSC_ERROR_EV_DECODE 15
1038#define DRIVER_EV_TX_DESCQ_ID_LBN 0
1039#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
1040#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
1041#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
1042#define DRIVER_EV_RX_DESCQ_ID_LBN 0
1043#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
1044#define SRM_CLR_EV_DECODE 0
1045#define SRM_UPD_EV_DECODE 1
1046#define SRM_ILLCLR_EV_DECODE 2
1047
1048/* Global events */
1049#define RX_RECOVERY_B0_LBN 12
1050#define RX_RECOVERY_B0_WIDTH 1
1051#define XG_MNT_INTR_B0_LBN 11
1052#define XG_MNT_INTR_B0_WIDTH 1
1053#define RX_RECOVERY_A1_LBN 11
1054#define RX_RECOVERY_A1_WIDTH 1
1055#define XFP_PHY_INTR_LBN 10
1056#define XFP_PHY_INTR_WIDTH 1
1057#define XG_PHY_INTR_LBN 9
1058#define XG_PHY_INTR_WIDTH 1
1059#define G_PHY1_INTR_LBN 8
1060#define G_PHY1_INTR_WIDTH 1
1061#define G_PHY0_INTR_LBN 7
1062#define G_PHY0_INTR_WIDTH 1
1063
1064/* Driver-generated test events */
1065#define EVQ_MAGIC_LBN 0
1066#define EVQ_MAGIC_WIDTH 32
1067
1068/**************************************************************************
1069 *
1070 * Falcon MAC stats
1071 *
1072 **************************************************************************
1073 *
1074 */
1075
1076#define GRxGoodOct_offset 0x0
1077#define GRxGoodOct_WIDTH 48
1078#define GRxBadOct_offset 0x8
1079#define GRxBadOct_WIDTH 48
1080#define GRxMissPkt_offset 0x10
1081#define GRxMissPkt_WIDTH 32
1082#define GRxFalseCRS_offset 0x14
1083#define GRxFalseCRS_WIDTH 32
1084#define GRxPausePkt_offset 0x18
1085#define GRxPausePkt_WIDTH 32
1086#define GRxBadPkt_offset 0x1C
1087#define GRxBadPkt_WIDTH 32
1088#define GRxUcastPkt_offset 0x20
1089#define GRxUcastPkt_WIDTH 32
1090#define GRxMcastPkt_offset 0x24
1091#define GRxMcastPkt_WIDTH 32
1092#define GRxBcastPkt_offset 0x28
1093#define GRxBcastPkt_WIDTH 32
1094#define GRxGoodLt64Pkt_offset 0x2C
1095#define GRxGoodLt64Pkt_WIDTH 32
1096#define GRxBadLt64Pkt_offset 0x30
1097#define GRxBadLt64Pkt_WIDTH 32
1098#define GRx64Pkt_offset 0x34
1099#define GRx64Pkt_WIDTH 32
1100#define GRx65to127Pkt_offset 0x38
1101#define GRx65to127Pkt_WIDTH 32
1102#define GRx128to255Pkt_offset 0x3C
1103#define GRx128to255Pkt_WIDTH 32
1104#define GRx256to511Pkt_offset 0x40
1105#define GRx256to511Pkt_WIDTH 32
1106#define GRx512to1023Pkt_offset 0x44
1107#define GRx512to1023Pkt_WIDTH 32
1108#define GRx1024to15xxPkt_offset 0x48
1109#define GRx1024to15xxPkt_WIDTH 32
1110#define GRx15xxtoJumboPkt_offset 0x4C
1111#define GRx15xxtoJumboPkt_WIDTH 32
1112#define GRxGtJumboPkt_offset 0x50
1113#define GRxGtJumboPkt_WIDTH 32
1114#define GRxFcsErr64to15xxPkt_offset 0x54
1115#define GRxFcsErr64to15xxPkt_WIDTH 32
1116#define GRxFcsErr15xxtoJumboPkt_offset 0x58
1117#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
1118#define GRxFcsErrGtJumboPkt_offset 0x5C
1119#define GRxFcsErrGtJumboPkt_WIDTH 32
1120#define GTxGoodBadOct_offset 0x80
1121#define GTxGoodBadOct_WIDTH 48
1122#define GTxGoodOct_offset 0x88
1123#define GTxGoodOct_WIDTH 48
1124#define GTxSglColPkt_offset 0x90
1125#define GTxSglColPkt_WIDTH 32
1126#define GTxMultColPkt_offset 0x94
1127#define GTxMultColPkt_WIDTH 32
1128#define GTxExColPkt_offset 0x98
1129#define GTxExColPkt_WIDTH 32
1130#define GTxDefPkt_offset 0x9C
1131#define GTxDefPkt_WIDTH 32
1132#define GTxLateCol_offset 0xA0
1133#define GTxLateCol_WIDTH 32
1134#define GTxExDefPkt_offset 0xA4
1135#define GTxExDefPkt_WIDTH 32
1136#define GTxPausePkt_offset 0xA8
1137#define GTxPausePkt_WIDTH 32
1138#define GTxBadPkt_offset 0xAC
1139#define GTxBadPkt_WIDTH 32
1140#define GTxUcastPkt_offset 0xB0
1141#define GTxUcastPkt_WIDTH 32
1142#define GTxMcastPkt_offset 0xB4
1143#define GTxMcastPkt_WIDTH 32
1144#define GTxBcastPkt_offset 0xB8
1145#define GTxBcastPkt_WIDTH 32
1146#define GTxLt64Pkt_offset 0xBC
1147#define GTxLt64Pkt_WIDTH 32
1148#define GTx64Pkt_offset 0xC0
1149#define GTx64Pkt_WIDTH 32
1150#define GTx65to127Pkt_offset 0xC4
1151#define GTx65to127Pkt_WIDTH 32
1152#define GTx128to255Pkt_offset 0xC8
1153#define GTx128to255Pkt_WIDTH 32
1154#define GTx256to511Pkt_offset 0xCC
1155#define GTx256to511Pkt_WIDTH 32
1156#define GTx512to1023Pkt_offset 0xD0
1157#define GTx512to1023Pkt_WIDTH 32
1158#define GTx1024to15xxPkt_offset 0xD4
1159#define GTx1024to15xxPkt_WIDTH 32
1160#define GTx15xxtoJumboPkt_offset 0xD8
1161#define GTx15xxtoJumboPkt_WIDTH 32
1162#define GTxGtJumboPkt_offset 0xDC
1163#define GTxGtJumboPkt_WIDTH 32
1164#define GTxNonTcpUdpPkt_offset 0xE0
1165#define GTxNonTcpUdpPkt_WIDTH 16
1166#define GTxMacSrcErrPkt_offset 0xE4
1167#define GTxMacSrcErrPkt_WIDTH 16
1168#define GTxIpSrcErrPkt_offset 0xE8
1169#define GTxIpSrcErrPkt_WIDTH 16
1170#define GDmaDone_offset 0xEC
1171#define GDmaDone_WIDTH 32
1172
1173#define XgRxOctets_offset 0x0
1174#define XgRxOctets_WIDTH 48
1175#define XgRxOctetsOK_offset 0x8
1176#define XgRxOctetsOK_WIDTH 48
1177#define XgRxPkts_offset 0x10
1178#define XgRxPkts_WIDTH 32
1179#define XgRxPktsOK_offset 0x14
1180#define XgRxPktsOK_WIDTH 32
1181#define XgRxBroadcastPkts_offset 0x18
1182#define XgRxBroadcastPkts_WIDTH 32
1183#define XgRxMulticastPkts_offset 0x1C
1184#define XgRxMulticastPkts_WIDTH 32
1185#define XgRxUnicastPkts_offset 0x20
1186#define XgRxUnicastPkts_WIDTH 32
1187#define XgRxUndersizePkts_offset 0x24
1188#define XgRxUndersizePkts_WIDTH 32
1189#define XgRxOversizePkts_offset 0x28
1190#define XgRxOversizePkts_WIDTH 32
1191#define XgRxJabberPkts_offset 0x2C
1192#define XgRxJabberPkts_WIDTH 32
1193#define XgRxUndersizeFCSerrorPkts_offset 0x30
1194#define XgRxUndersizeFCSerrorPkts_WIDTH 32
1195#define XgRxDropEvents_offset 0x34
1196#define XgRxDropEvents_WIDTH 32
1197#define XgRxFCSerrorPkts_offset 0x38
1198#define XgRxFCSerrorPkts_WIDTH 32
1199#define XgRxAlignError_offset 0x3C
1200#define XgRxAlignError_WIDTH 32
1201#define XgRxSymbolError_offset 0x40
1202#define XgRxSymbolError_WIDTH 32
1203#define XgRxInternalMACError_offset 0x44
1204#define XgRxInternalMACError_WIDTH 32
1205#define XgRxControlPkts_offset 0x48
1206#define XgRxControlPkts_WIDTH 32
1207#define XgRxPausePkts_offset 0x4C
1208#define XgRxPausePkts_WIDTH 32
1209#define XgRxPkts64Octets_offset 0x50
1210#define XgRxPkts64Octets_WIDTH 32
1211#define XgRxPkts65to127Octets_offset 0x54
1212#define XgRxPkts65to127Octets_WIDTH 32
1213#define XgRxPkts128to255Octets_offset 0x58
1214#define XgRxPkts128to255Octets_WIDTH 32
1215#define XgRxPkts256to511Octets_offset 0x5C
1216#define XgRxPkts256to511Octets_WIDTH 32
1217#define XgRxPkts512to1023Octets_offset 0x60
1218#define XgRxPkts512to1023Octets_WIDTH 32
1219#define XgRxPkts1024to15xxOctets_offset 0x64
1220#define XgRxPkts1024to15xxOctets_WIDTH 32
1221#define XgRxPkts15xxtoMaxOctets_offset 0x68
1222#define XgRxPkts15xxtoMaxOctets_WIDTH 32
1223#define XgRxLengthError_offset 0x6C
1224#define XgRxLengthError_WIDTH 32
1225#define XgTxPkts_offset 0x80
1226#define XgTxPkts_WIDTH 32
1227#define XgTxOctets_offset 0x88
1228#define XgTxOctets_WIDTH 48
1229#define XgTxMulticastPkts_offset 0x90
1230#define XgTxMulticastPkts_WIDTH 32
1231#define XgTxBroadcastPkts_offset 0x94
1232#define XgTxBroadcastPkts_WIDTH 32
1233#define XgTxUnicastPkts_offset 0x98
1234#define XgTxUnicastPkts_WIDTH 32
1235#define XgTxControlPkts_offset 0x9C
1236#define XgTxControlPkts_WIDTH 32
1237#define XgTxPausePkts_offset 0xA0
1238#define XgTxPausePkts_WIDTH 32
1239#define XgTxPkts64Octets_offset 0xA4
1240#define XgTxPkts64Octets_WIDTH 32
1241#define XgTxPkts65to127Octets_offset 0xA8
1242#define XgTxPkts65to127Octets_WIDTH 32
1243#define XgTxPkts128to255Octets_offset 0xAC
1244#define XgTxPkts128to255Octets_WIDTH 32
1245#define XgTxPkts256to511Octets_offset 0xB0
1246#define XgTxPkts256to511Octets_WIDTH 32
1247#define XgTxPkts512to1023Octets_offset 0xB4
1248#define XgTxPkts512to1023Octets_WIDTH 32
1249#define XgTxPkts1024to15xxOctets_offset 0xB8
1250#define XgTxPkts1024to15xxOctets_WIDTH 32
1251#define XgTxPkts1519toMaxOctets_offset 0xBC
1252#define XgTxPkts1519toMaxOctets_WIDTH 32
1253#define XgTxUndersizePkts_offset 0xC0
1254#define XgTxUndersizePkts_WIDTH 32
1255#define XgTxOversizePkts_offset 0xC4
1256#define XgTxOversizePkts_WIDTH 32
1257#define XgTxNonTcpUdpPkt_offset 0xC8
1258#define XgTxNonTcpUdpPkt_WIDTH 16
1259#define XgTxMacSrcErrPkt_offset 0xCC
1260#define XgTxMacSrcErrPkt_WIDTH 16
1261#define XgTxIpSrcErrPkt_offset 0xD0
1262#define XgTxIpSrcErrPkt_WIDTH 16
1263#define XgDmaDone_offset 0xD4
1264
1265#define FALCON_STATS_NOT_DONE 0x00000000
1266#define FALCON_STATS_DONE 0xffffffff
1267
1268/* Interrupt status register bits */
1269#define FATAL_INT_LBN 64
1270#define FATAL_INT_WIDTH 1
1271#define INT_EVQS_LBN 40
1272#define INT_EVQS_WIDTH 4
1273
1274/**************************************************************************
1275 *
1276 * Falcon non-volatile configuration
1277 *
1278 **************************************************************************
1279 */
1280
1281/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
1282struct falcon_nvconfig_board_v2 {
1283 __le16 nports;
1284 u8 port0_phy_addr;
1285 u8 port0_phy_type;
1286 u8 port1_phy_addr;
1287 u8 port1_phy_type;
1288 __le16 asic_sub_revision;
1289 __le16 board_revision;
1290} __packed;
1291
1292/* Board configuration v3 extra information */
1293struct falcon_nvconfig_board_v3 {
1294 __le32 spi_device_type[2];
1295} __packed;
1296
1297/* Bit numbers for spi_device_type */
1298#define SPI_DEV_TYPE_SIZE_LBN 0
1299#define SPI_DEV_TYPE_SIZE_WIDTH 5
1300#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
1301#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
1302#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
1303#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
1304#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
1305#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
1306#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
1307#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
1308#define SPI_DEV_TYPE_FIELD(type, field) \
1309 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
1310
1311#define NVCONFIG_OFFSET 0x300
1312
1313#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
1314struct falcon_nvconfig {
1315 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
1316 u8 mac_address[2][8]; /* 0x310 */
1317 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
1318 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
1319 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
1320 efx_oword_t hw_init_reg; /* 0x350 */
1321 efx_oword_t nic_stat_reg; /* 0x360 */
1322 efx_oword_t glb_ctl_reg; /* 0x370 */
1323 efx_oword_t srm_cfg_reg; /* 0x380 */
1324 efx_oword_t spare_reg; /* 0x390 */
1325 __le16 board_magic_num; /* 0x3A0 */
1326 __le16 board_struct_ver;
1327 __le16 board_checksum;
1328 struct falcon_nvconfig_board_v2 board_v2;
1329 efx_oword_t ee_base_page_reg; /* 0x3B0 */
1330 struct falcon_nvconfig_board_v3 board_v3;
1331} __packed;
1332
1333#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
deleted file mode 100644
index 8883092dae97..000000000000
--- a/drivers/net/sfc/falcon_io.h
+++ /dev/null
@@ -1,258 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_IO_H
12#define EFX_FALCON_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16
17/**************************************************************************
18 *
19 * Falcon hardware access
20 *
21 **************************************************************************
22 *
23 * Notes on locking strategy:
24 *
25 * Most Falcon registers require 16-byte (or 8-byte, for SRAM
26 * registers) atomic writes which necessitates locking.
27 * Under normal operation few writes to the Falcon BAR are made and these
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
29 * cased to allow 4-byte (hence lockless) accesses.
30 *
31 * It *is* safe to write to these 4-byte registers in the middle of an
32 * access to an 8-byte or 16-byte register. We therefore use a
33 * spinlock to protect accesses to the larger registers, but no locks
34 * for the 4-byte registers.
35 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
37 * due to the way the 16byte registers are "collected" in the Falcon BIU
38 *
39 * We also lock when carrying out reads, to ensure consistency of the
40 * data (made possible since the BIU reads all 128 bits into a cache).
41 * Reads are very rare, so this isn't a significant performance
42 * impact. (Most data transferred from NIC to host is DMAed directly
43 * into host memory).
44 *
45 * I/O BAR access uses locks for both reads and writes (but is only provided
46 * for testing purposes).
47 */
48
49/* Special buffer descriptors (Falcon SRAM) */
50#define BUF_TBL_KER_A1 0x18000
51#define BUF_TBL_KER_B0 0x800000
52
53
54#if BITS_PER_LONG == 64
55#define FALCON_USE_QWORD_IO 1
56#endif
57
58#ifdef FALCON_USE_QWORD_IO
59static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
60 unsigned int reg)
61{
62 __raw_writeq((__force u64)value, efx->membase + reg);
63}
64static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
65{
66 return (__force __le64)__raw_readq(efx->membase + reg);
67}
68#endif
69
70static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
71 unsigned int reg)
72{
73 __raw_writel((__force u32)value, efx->membase + reg);
74}
75static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
76{
77 return (__force __le32)__raw_readl(efx->membase + reg);
78}
79
80/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
81static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
82 unsigned int reg)
83{
84 unsigned long flags;
85
86 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
87 EFX_OWORD_VAL(*value));
88
89 spin_lock_irqsave(&efx->biu_lock, flags);
90#ifdef FALCON_USE_QWORD_IO
91 _falcon_writeq(efx, value->u64[0], reg + 0);
92 wmb();
93 _falcon_writeq(efx, value->u64[1], reg + 8);
94#else
95 _falcon_writel(efx, value->u32[0], reg + 0);
96 _falcon_writel(efx, value->u32[1], reg + 4);
97 _falcon_writel(efx, value->u32[2], reg + 8);
98 wmb();
99 _falcon_writel(efx, value->u32[3], reg + 12);
100#endif
101 mmiowb();
102 spin_unlock_irqrestore(&efx->biu_lock, flags);
103}
104
105/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
106static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
107 unsigned int index)
108{
109 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
110 unsigned long flags;
111
112 EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
113 reg, EFX_QWORD_VAL(*value));
114
115 spin_lock_irqsave(&efx->biu_lock, flags);
116#ifdef FALCON_USE_QWORD_IO
117 _falcon_writeq(efx, value->u64[0], reg + 0);
118#else
119 _falcon_writel(efx, value->u32[0], reg + 0);
120 wmb();
121 _falcon_writel(efx, value->u32[1], reg + 4);
122#endif
123 mmiowb();
124 spin_unlock_irqrestore(&efx->biu_lock, flags);
125}
126
127/* Write dword to Falcon register that allows partial writes
128 *
129 * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
130 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
131 * for lockless writes.
132 */
133static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
134 unsigned int reg)
135{
136 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
137 reg, EFX_DWORD_VAL(*value));
138
139 /* No lock required */
140 _falcon_writel(efx, value->u32[0], reg);
141}
142
143/* Read from a Falcon register
144 *
145 * This reads an entire 16-byte Falcon register in one go, locking as
146 * appropriate. It is essential to read the first dword first, as this
147 * prompts Falcon to load the current value into the shadow register.
148 */
149static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
150 unsigned int reg)
151{
152 unsigned long flags;
153
154 spin_lock_irqsave(&efx->biu_lock, flags);
155 value->u32[0] = _falcon_readl(efx, reg + 0);
156 rmb();
157 value->u32[1] = _falcon_readl(efx, reg + 4);
158 value->u32[2] = _falcon_readl(efx, reg + 8);
159 value->u32[3] = _falcon_readl(efx, reg + 12);
160 spin_unlock_irqrestore(&efx->biu_lock, flags);
161
162 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
163 EFX_OWORD_VAL(*value));
164}
165
166/* This reads an 8-byte Falcon SRAM entry in one go. */
167static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
168 unsigned int index)
169{
170 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
171 unsigned long flags;
172
173 spin_lock_irqsave(&efx->biu_lock, flags);
174#ifdef FALCON_USE_QWORD_IO
175 value->u64[0] = _falcon_readq(efx, reg + 0);
176#else
177 value->u32[0] = _falcon_readl(efx, reg + 0);
178 rmb();
179 value->u32[1] = _falcon_readl(efx, reg + 4);
180#endif
181 spin_unlock_irqrestore(&efx->biu_lock, flags);
182
183 EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
184 reg, EFX_QWORD_VAL(*value));
185}
186
187/* Read dword from Falcon register that allows partial writes (sic) */
188static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
189 unsigned int reg)
190{
191 value->u32[0] = _falcon_readl(efx, reg);
192 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
193 reg, EFX_DWORD_VAL(*value));
194}
195
196/* Write to a register forming part of a table */
197static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
198 unsigned int reg, unsigned int index)
199{
200 falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
201}
202
203/* Read to a register forming part of a table */
204static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
205 unsigned int reg, unsigned int index)
206{
207 falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
208}
209
210/* Write to a dword register forming part of a table */
211static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
212 unsigned int reg, unsigned int index)
213{
214 falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
215}
216
217/* Page-mapped register block size */
218#define FALCON_PAGE_BLOCK_SIZE 0x2000
219
220/* Calculate offset to page-mapped register block */
221#define FALCON_PAGED_REG(page, reg) \
222 ((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
223
224/* As for falcon_write(), but for a page-mapped register. */
225static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
226 unsigned int reg, unsigned int page)
227{
228 falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
229}
230
231/* As for falcon_writel(), but for a page-mapped register. */
232static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
233 unsigned int reg, unsigned int page)
234{
235 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
236}
237
238/* Write dword to Falcon page-mapped register with an extra lock.
239 *
240 * As for falcon_writel_page(), but for a register that suffers from
241 * SFC bug 3181. If writing to page 0, take out a lock so the BIU
242 * collector cannot be confused.
243 */
244static inline void falcon_writel_page_locked(struct efx_nic *efx,
245 efx_dword_t *value,
246 unsigned int reg,
247 unsigned int page)
248{
249 unsigned long flags = 0;
250
251 if (page == 0)
252 spin_lock_irqsave(&efx->biu_lock, flags);
253 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
254 if (page == 0)
255 spin_unlock_irqrestore(&efx->biu_lock, flags);
256}
257
258#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index bec52ca37eee..7e57b4a54b37 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -12,12 +12,11 @@
12#include "net_driver.h" 12#include "net_driver.h"
13#include "efx.h" 13#include "efx.h"
14#include "falcon.h" 14#include "falcon.h"
15#include "falcon_hwdefs.h" 15#include "regs.h"
16#include "falcon_io.h" 16#include "io.h"
17#include "mac.h" 17#include "mac.h"
18#include "mdio_10g.h" 18#include "mdio_10g.h"
19#include "phy.h" 19#include "phy.h"
20#include "boards.h"
21#include "workarounds.h" 20#include "workarounds.h"
22 21
23/************************************************************************** 22/**************************************************************************
@@ -36,27 +35,27 @@ static void falcon_setup_xaui(struct efx_nic *efx)
36 if (efx->phy_type == PHY_TYPE_NONE) 35 if (efx->phy_type == PHY_TYPE_NONE)
37 return; 36 return;
38 37
39 falcon_read(efx, &sdctl, XX_SD_CTL_REG); 38 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
40 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT); 39 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
41 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT); 40 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
42 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT); 41 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
43 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT); 42 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
44 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT); 43 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
45 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT); 44 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
46 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT); 45 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
47 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT); 46 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
48 falcon_write(efx, &sdctl, XX_SD_CTL_REG); 47 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
49 48
50 EFX_POPULATE_OWORD_8(txdrv, 49 EFX_POPULATE_OWORD_8(txdrv,
51 XX_DEQD, XX_TXDRV_DEQ_DEFAULT, 50 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
52 XX_DEQC, XX_TXDRV_DEQ_DEFAULT, 51 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
53 XX_DEQB, XX_TXDRV_DEQ_DEFAULT, 52 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
54 XX_DEQA, XX_TXDRV_DEQ_DEFAULT, 53 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
55 XX_DTXD, XX_TXDRV_DTX_DEFAULT, 54 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
56 XX_DTXC, XX_TXDRV_DTX_DEFAULT, 55 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
57 XX_DTXB, XX_TXDRV_DTX_DEFAULT, 56 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
58 XX_DTXA, XX_TXDRV_DTX_DEFAULT); 57 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
59 falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG); 58 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
60} 59}
61 60
62int falcon_reset_xaui(struct efx_nic *efx) 61int falcon_reset_xaui(struct efx_nic *efx)
@@ -65,14 +64,14 @@ int falcon_reset_xaui(struct efx_nic *efx)
65 int count; 64 int count;
66 65
67 /* Start reset sequence */ 66 /* Start reset sequence */
68 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); 67 EFX_POPULATE_DWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
69 falcon_write(efx, &reg, XX_PWR_RST_REG); 68 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
70 69
71 /* Wait up to 10 ms for completion, then reinitialise */ 70 /* Wait up to 10 ms for completion, then reinitialise */
72 for (count = 0; count < 1000; count++) { 71 for (count = 0; count < 1000; count++) {
73 falcon_read(efx, &reg, XX_PWR_RST_REG); 72 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
74 if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 && 73 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
75 EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) { 74 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
76 falcon_setup_xaui(efx); 75 falcon_setup_xaui(efx);
77 return 0; 76 return 0;
78 } 77 }
@@ -100,12 +99,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
100 99
101 /* Flush the ISR */ 100 /* Flush the ISR */
102 if (enable) 101 if (enable)
103 falcon_read(efx, &reg, XM_MGT_INT_REG_B0); 102 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
104 103
105 EFX_POPULATE_OWORD_2(reg, 104 EFX_POPULATE_OWORD_2(reg,
106 XM_MSK_RMTFLT, !enable, 105 FRF_AB_XM_MSK_RMTFLT, !enable,
107 XM_MSK_LCLFLT, !enable); 106 FRF_AB_XM_MSK_LCLFLT, !enable);
108 falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0); 107 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
109} 108}
110 109
111/* Get status of XAUI link */ 110/* Get status of XAUI link */
@@ -119,18 +118,18 @@ bool falcon_xaui_link_ok(struct efx_nic *efx)
119 return true; 118 return true;
120 119
121 /* Read link status */ 120 /* Read link status */
122 falcon_read(efx, &reg, XX_CORE_STAT_REG); 121 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
123 122
124 align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE); 123 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
125 sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT); 124 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
126 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED)) 125 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
127 link_ok = true; 126 link_ok = true;
128 127
129 /* Clear link status ready for next read */ 128 /* Clear link status ready for next read */
130 EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET); 129 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
131 EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET); 130 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
132 EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET); 131 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
133 falcon_write(efx, &reg, XX_CORE_STAT_REG); 132 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
134 133
135 /* If the link is up, then check the phy side of the xaui link */ 134 /* If the link is up, then check the phy side of the xaui link */
136 if (efx->link_up && link_ok) 135 if (efx->link_up && link_ok)
@@ -148,55 +147,49 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
148 147
149 /* Configure MAC - cut-thru mode is hard wired on */ 148 /* Configure MAC - cut-thru mode is hard wired on */
150 EFX_POPULATE_DWORD_3(reg, 149 EFX_POPULATE_DWORD_3(reg,
151 XM_RX_JUMBO_MODE, 1, 150 FRF_AB_XM_RX_JUMBO_MODE, 1,
152 XM_TX_STAT_EN, 1, 151 FRF_AB_XM_TX_STAT_EN, 1,
153 XM_RX_STAT_EN, 1); 152 FRF_AB_XM_RX_STAT_EN, 1);
154 falcon_write(efx, &reg, XM_GLB_CFG_REG); 153 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
155 154
156 /* Configure TX */ 155 /* Configure TX */
157 EFX_POPULATE_DWORD_6(reg, 156 EFX_POPULATE_DWORD_6(reg,
158 XM_TXEN, 1, 157 FRF_AB_XM_TXEN, 1,
159 XM_TX_PRMBL, 1, 158 FRF_AB_XM_TX_PRMBL, 1,
160 XM_AUTO_PAD, 1, 159 FRF_AB_XM_AUTO_PAD, 1,
161 XM_TXCRC, 1, 160 FRF_AB_XM_TXCRC, 1,
162 XM_FCNTL, 1, 161 FRF_AB_XM_FCNTL, 1,
163 XM_IPG, 0x3); 162 FRF_AB_XM_IPG, 0x3);
164 falcon_write(efx, &reg, XM_TX_CFG_REG); 163 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
165 164
166 /* Configure RX */ 165 /* Configure RX */
167 EFX_POPULATE_DWORD_5(reg, 166 EFX_POPULATE_DWORD_5(reg,
168 XM_RXEN, 1, 167 FRF_AB_XM_RXEN, 1,
169 XM_AUTO_DEPAD, 0, 168 FRF_AB_XM_AUTO_DEPAD, 0,
170 XM_ACPT_ALL_MCAST, 1, 169 FRF_AB_XM_ACPT_ALL_MCAST, 1,
171 XM_ACPT_ALL_UCAST, efx->promiscuous, 170 FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
172 XM_PASS_CRC_ERR, 1); 171 FRF_AB_XM_PASS_CRC_ERR, 1);
173 falcon_write(efx, &reg, XM_RX_CFG_REG); 172 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
174 173
175 /* Set frame length */ 174 /* Set frame length */
176 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); 175 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
177 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len); 176 EFX_POPULATE_DWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
178 falcon_write(efx, &reg, XM_RX_PARAM_REG); 177 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
179 EFX_POPULATE_DWORD_2(reg, 178 EFX_POPULATE_DWORD_2(reg,
180 XM_MAX_TX_FRM_SIZE, max_frame_len, 179 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
181 XM_TX_JUMBO_MODE, 1); 180 FRF_AB_XM_TX_JUMBO_MODE, 1);
182 falcon_write(efx, &reg, XM_TX_PARAM_REG); 181 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
183 182
184 EFX_POPULATE_DWORD_2(reg, 183 EFX_POPULATE_DWORD_2(reg,
185 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ 184 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
186 XM_DIS_FCNTL, !rx_fc); 185 FRF_AB_XM_DIS_FCNTL, !rx_fc);
187 falcon_write(efx, &reg, XM_FC_REG); 186 efx_writeo(efx, &reg, FR_AB_XM_FC);
188 187
189 /* Set MAC address */ 188 /* Set MAC address */
190 EFX_POPULATE_DWORD_4(reg, 189 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
191 XM_ADR_0, efx->net_dev->dev_addr[0], 190 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
192 XM_ADR_1, efx->net_dev->dev_addr[1], 191 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
193 XM_ADR_2, efx->net_dev->dev_addr[2], 192 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
194 XM_ADR_3, efx->net_dev->dev_addr[3]);
195 falcon_write(efx, &reg, XM_ADR_LO_REG);
196 EFX_POPULATE_DWORD_2(reg,
197 XM_ADR_4, efx->net_dev->dev_addr[4],
198 XM_ADR_5, efx->net_dev->dev_addr[5]);
199 falcon_write(efx, &reg, XM_ADR_HI_REG);
200} 193}
201 194
202static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) 195static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
@@ -212,12 +205,13 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
212 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; 205 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
213 bool reset_xgxs; 206 bool reset_xgxs;
214 207
215 falcon_read(efx, &reg, XX_CORE_STAT_REG); 208 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
216 old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN); 209 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
217 old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN); 210 old_xgmii_loopback =
211 EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
218 212
219 falcon_read(efx, &reg, XX_SD_CTL_REG); 213 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
220 old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA); 214 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
221 215
222 /* The PHY driver may have turned XAUI off */ 216 /* The PHY driver may have turned XAUI off */
223 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || 217 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
@@ -228,20 +222,20 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
228 falcon_reset_xaui(efx); 222 falcon_reset_xaui(efx);
229 } 223 }
230 224
231 falcon_read(efx, &reg, XX_CORE_STAT_REG); 225 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
232 EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG, 226 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
233 (xgxs_loopback || xaui_loopback) ? 227 (xgxs_loopback || xaui_loopback) ?
234 XX_FORCE_SIG_DECODE_FORCED : 0); 228 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
235 EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); 229 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
236 EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); 230 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
237 falcon_write(efx, &reg, XX_CORE_STAT_REG); 231 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
238 232
239 falcon_read(efx, &reg, XX_SD_CTL_REG); 233 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
240 EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback); 234 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
241 EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback); 235 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
242 EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback); 236 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
243 EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback); 237 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
244 falcon_write(efx, &reg, XX_SD_CTL_REG); 238 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
245} 239}
246 240
247 241
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
deleted file mode 100644
index dfccaa7b573e..000000000000
--- a/drivers/net/sfc/gmii.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_GMII_H
12#define EFX_GMII_H
13
14/*
15 * GMII interface
16 */
17
18#include <linux/mii.h>
19
20/* GMII registers, excluding registers already defined as MII
21 * registers in mii.h
22 */
23#define GMII_IER 0x12 /* Interrupt enable register */
24#define GMII_ISR 0x13 /* Interrupt status register */
25
26/* Interrupt enable register */
27#define IER_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
28#define IER_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
29#define IER_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
30#define IER_PAGE_RCVD 0x1000 /* Bit 12 - page received */
31#define IER_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
32#define IER_LINK_CHG 0x0400 /* Bit 10 - link status changed */
33#define IER_SYM_ERR 0x0200 /* Bit 9 - symbol error */
34#define IER_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
35#define IER_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
36#define IER_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
37#define IER_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
38#define IER_ENERGY 0x0010 /* Bit 4 - energy detect */
39#define IER_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
40#define IER_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
41#define IER_JABBER 0x0001 /* Bit 0 - jabber */
42
43/* Interrupt status register */
44#define ISR_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
45#define ISR_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
46#define ISR_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
47#define ISR_PAGE_RCVD 0x1000 /* Bit 12 - page received */
48#define ISR_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
49#define ISR_LINK_CHG 0x0400 /* Bit 10 - link status changed */
50#define ISR_SYM_ERR 0x0200 /* Bit 9 - symbol error */
51#define ISR_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
52#define ISR_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
53#define ISR_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
54#define ISR_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
55#define ISR_ENERGY 0x0010 /* Bit 4 - energy detect */
56#define ISR_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
57#define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
58#define ISR_JABBER 0x0001 /* Bit 0 - jabber */
59
60#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
new file mode 100644
index 000000000000..b89177c27f4a
--- /dev/null
+++ b/drivers/net/sfc/io.h
@@ -0,0 +1,256 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_IO_H
12#define EFX_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16
17/**************************************************************************
18 *
19 * NIC register I/O
20 *
21 **************************************************************************
22 *
23 * Notes on locking strategy:
24 *
25 * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes
26 * which necessitates locking.
27 * Under normal operation few writes to NIC registers are made and these
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
29 * cased to allow 4-byte (hence lockless) accesses.
30 *
31 * It *is* safe to write to these 4-byte registers in the middle of an
32 * access to an 8-byte or 16-byte register. We therefore use a
33 * spinlock to protect accesses to the larger registers, but no locks
34 * for the 4-byte registers.
35 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
37 * due to the way the 16byte registers are "collected" in the BIU.
38 *
39 * We also lock when carrying out reads, to ensure consistency of the
40 * data (made possible since the BIU reads all 128 bits into a cache).
41 * Reads are very rare, so this isn't a significant performance
42 * impact. (Most data transferred from NIC to host is DMAed directly
43 * into host memory).
44 *
45 * I/O BAR access uses locks for both reads and writes (but is only provided
46 * for testing purposes).
47 */
48
49#if BITS_PER_LONG == 64
50#define EFX_USE_QWORD_IO 1
51#endif
52
53#ifdef EFX_USE_QWORD_IO
54static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
55 unsigned int reg)
56{
57 __raw_writeq((__force u64)value, efx->membase + reg);
58}
59static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
60{
61 return (__force __le64)__raw_readq(efx->membase + reg);
62}
63#endif
64
65static inline void _efx_writed(struct efx_nic *efx, __le32 value,
66 unsigned int reg)
67{
68 __raw_writel((__force u32)value, efx->membase + reg);
69}
70static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
71{
72 return (__force __le32)__raw_readl(efx->membase + reg);
73}
74
75/* Writes to a normal 16-byte Efx register, locking as appropriate. */
76static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
77 unsigned int reg)
78{
79 unsigned long flags __attribute__ ((unused));
80
81 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
82 EFX_OWORD_VAL(*value));
83
84 spin_lock_irqsave(&efx->biu_lock, flags);
85#ifdef EFX_USE_QWORD_IO
86 _efx_writeq(efx, value->u64[0], reg + 0);
87 wmb();
88 _efx_writeq(efx, value->u64[1], reg + 8);
89#else
90 _efx_writed(efx, value->u32[0], reg + 0);
91 _efx_writed(efx, value->u32[1], reg + 4);
92 _efx_writed(efx, value->u32[2], reg + 8);
93 wmb();
94 _efx_writed(efx, value->u32[3], reg + 12);
95#endif
96 mmiowb();
97 spin_unlock_irqrestore(&efx->biu_lock, flags);
98}
99
100/* Write an 8-byte NIC SRAM entry through the supplied mapping,
101 * locking as appropriate. */
102static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
103 efx_qword_t *value, unsigned int index)
104{
105 unsigned int addr = index * sizeof(*value);
106 unsigned long flags __attribute__ ((unused));
107
108 EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n",
109 addr, EFX_QWORD_VAL(*value));
110
111 spin_lock_irqsave(&efx->biu_lock, flags);
112#ifdef EFX_USE_QWORD_IO
113 __raw_writeq((__force u64)value->u64[0], membase + addr);
114#else
115 __raw_writel((__force u32)value->u32[0], membase + addr);
116 wmb();
117 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
118#endif
119 mmiowb();
120 spin_unlock_irqrestore(&efx->biu_lock, flags);
121}
122
123/* Write dword to NIC register that allows partial writes
124 *
125 * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
126 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
127 * for lockless writes.
128 */
129static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
130 unsigned int reg)
131{
132 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
133 reg, EFX_DWORD_VAL(*value));
134
135 /* No lock required */
136 _efx_writed(efx, value->u32[0], reg);
137}
138
139/* Read from a NIC register
140 *
141 * This reads an entire 16-byte register in one go, locking as
142 * appropriate. It is essential to read the first dword first, as this
143 * prompts the NIC to load the current value into the shadow register.
144 */
145static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
146 unsigned int reg)
147{
148 unsigned long flags __attribute__ ((unused));
149
150 spin_lock_irqsave(&efx->biu_lock, flags);
151 value->u32[0] = _efx_readd(efx, reg + 0);
152 rmb();
153 value->u32[1] = _efx_readd(efx, reg + 4);
154 value->u32[2] = _efx_readd(efx, reg + 8);
155 value->u32[3] = _efx_readd(efx, reg + 12);
156 spin_unlock_irqrestore(&efx->biu_lock, flags);
157
158 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
159 EFX_OWORD_VAL(*value));
160}
161
162/* Read an 8-byte SRAM entry through supplied mapping,
163 * locking as appropriate. */
164static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
165 efx_qword_t *value, unsigned int index)
166{
167 unsigned int addr = index * sizeof(*value);
168 unsigned long flags __attribute__ ((unused));
169
170 spin_lock_irqsave(&efx->biu_lock, flags);
171#ifdef EFX_USE_QWORD_IO
172 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
173#else
174 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
175 rmb();
176 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
177#endif
178 spin_unlock_irqrestore(&efx->biu_lock, flags);
179
180 EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
181 addr, EFX_QWORD_VAL(*value));
182}
183
184/* Read dword from register that allows partial writes (sic) */
185static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
186 unsigned int reg)
187{
188 value->u32[0] = _efx_readd(efx, reg);
189 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
190 reg, EFX_DWORD_VAL(*value));
191}
192
193/* Write to a register forming part of a table */
194static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
195 unsigned int reg, unsigned int index)
196{
197 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
198}
199
200/* Read to a register forming part of a table */
201static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
202 unsigned int reg, unsigned int index)
203{
204 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
205}
206
207/* Write to a dword register forming part of a table */
208static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
209 unsigned int reg, unsigned int index)
210{
211 efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
212}
213
214/* Page-mapped register block size */
215#define EFX_PAGE_BLOCK_SIZE 0x2000
216
217/* Calculate offset to page-mapped register block */
218#define EFX_PAGED_REG(page, reg) \
219 ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
220
221/* As for efx_writeo(), but for a page-mapped register. */
222static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
223 unsigned int reg, unsigned int page)
224{
225 efx_writeo(efx, value, EFX_PAGED_REG(page, reg));
226}
227
228/* As for efx_writed(), but for a page-mapped register. */
229static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
230 unsigned int reg, unsigned int page)
231{
232 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
233}
234
235/* Write dword to page-mapped register with an extra lock.
236 *
237 * As for efx_writed_page(), but for a register that suffers from
238 * SFC bug 3181. Take out a lock so the BIU collector cannot be
239 * confused. */
240static inline void efx_writed_page_locked(struct efx_nic *efx,
241 efx_dword_t *value,
242 unsigned int reg,
243 unsigned int page)
244{
245 unsigned long flags __attribute__ ((unused));
246
247 if (page == 0) {
248 spin_lock_irqsave(&efx->biu_lock, flags);
249 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
250 spin_unlock_irqrestore(&efx->biu_lock, flags);
251 } else {
252 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
253 }
254}
255
256#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 6c33459f9ea9..231e580acc9a 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -14,7 +14,6 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include "net_driver.h" 15#include "net_driver.h"
16#include "mdio_10g.h" 16#include "mdio_10g.h"
17#include "boards.h"
18#include "workarounds.h" 17#include "workarounds.h"
19 18
20unsigned efx_mdio_id_oui(u32 id) 19unsigned efx_mdio_id_oui(u32 id)
@@ -249,7 +248,7 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
249int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 248int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
250{ 249{
251 struct ethtool_cmd prev; 250 struct ethtool_cmd prev;
252 u32 required; 251 bool xnp;
253 int reg; 252 int reg;
254 253
255 efx->phy_op->get_settings(efx, &prev); 254 efx->phy_op->get_settings(efx, &prev);
@@ -266,86 +265,60 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
266 return -EINVAL; 265 return -EINVAL;
267 266
268 /* Check that PHY supports these settings */ 267 /* Check that PHY supports these settings */
269 if (ecmd->autoneg) { 268 if (!ecmd->autoneg ||
270 required = SUPPORTED_Autoneg; 269 (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
271 } else if (ecmd->duplex) {
272 switch (ecmd->speed) {
273 case SPEED_10: required = SUPPORTED_10baseT_Full; break;
274 case SPEED_100: required = SUPPORTED_100baseT_Full; break;
275 default: return -EINVAL;
276 }
277 } else {
278 switch (ecmd->speed) {
279 case SPEED_10: required = SUPPORTED_10baseT_Half; break;
280 case SPEED_100: required = SUPPORTED_100baseT_Half; break;
281 default: return -EINVAL;
282 }
283 }
284 required |= ecmd->advertising;
285 if (required & ~prev.supported)
286 return -EINVAL; 270 return -EINVAL;
287 271
288 if (ecmd->autoneg) { 272 xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
289 bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full 273 || EFX_WORKAROUND_13204(efx));
290 || EFX_WORKAROUND_13204(efx)); 274
291 275 /* Set up the base page */
292 /* Set up the base page */ 276 reg = ADVERTISE_CSMA;
293 reg = ADVERTISE_CSMA; 277 if (ecmd->advertising & ADVERTISED_10baseT_Half)
294 if (ecmd->advertising & ADVERTISED_10baseT_Half) 278 reg |= ADVERTISE_10HALF;
295 reg |= ADVERTISE_10HALF; 279 if (ecmd->advertising & ADVERTISED_10baseT_Full)
296 if (ecmd->advertising & ADVERTISED_10baseT_Full) 280 reg |= ADVERTISE_10FULL;
297 reg |= ADVERTISE_10FULL; 281 if (ecmd->advertising & ADVERTISED_100baseT_Half)
298 if (ecmd->advertising & ADVERTISED_100baseT_Half) 282 reg |= ADVERTISE_100HALF;
299 reg |= ADVERTISE_100HALF; 283 if (ecmd->advertising & ADVERTISED_100baseT_Full)
300 if (ecmd->advertising & ADVERTISED_100baseT_Full) 284 reg |= ADVERTISE_100FULL;
301 reg |= ADVERTISE_100FULL; 285 if (xnp)
302 if (xnp) 286 reg |= ADVERTISE_RESV;
303 reg |= ADVERTISE_RESV; 287 else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
304 else if (ecmd->advertising & (ADVERTISED_1000baseT_Half | 288 ADVERTISED_1000baseT_Full))
305 ADVERTISED_1000baseT_Full)) 289 reg |= ADVERTISE_NPAGE;
306 reg |= ADVERTISE_NPAGE; 290 reg |= mii_advertise_flowctrl(efx->wanted_fc);
307 reg |= mii_advertise_flowctrl(efx->wanted_fc); 291 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
308 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); 292
309 293 /* Set up the (extended) next page if necessary */
310 /* Set up the (extended) next page if necessary */ 294 if (efx->phy_op->set_npage_adv)
311 if (efx->phy_op->set_npage_adv) 295 efx->phy_op->set_npage_adv(efx, ecmd->advertising);
312 efx->phy_op->set_npage_adv(efx, ecmd->advertising); 296
313 297 /* Enable and restart AN */
314 /* Enable and restart AN */ 298 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
315 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1); 299 reg |= MDIO_AN_CTRL1_ENABLE;
316 reg |= MDIO_AN_CTRL1_ENABLE; 300 if (!(EFX_WORKAROUND_15195(efx) &&
317 if (!(EFX_WORKAROUND_15195(efx) && 301 LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
318 LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)) 302 reg |= MDIO_AN_CTRL1_RESTART;
319 reg |= MDIO_AN_CTRL1_RESTART; 303 if (xnp)
320 if (xnp) 304 reg |= MDIO_AN_CTRL1_XNP;
321 reg |= MDIO_AN_CTRL1_XNP; 305 else
322 else 306 reg &= ~MDIO_AN_CTRL1_XNP;
323 reg &= ~MDIO_AN_CTRL1_XNP; 307 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
324 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
325 } else {
326 /* Disable AN */
327 efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_CTRL1,
328 MDIO_AN_CTRL1_ENABLE, false);
329
330 /* Set the basic control bits */
331 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1);
332 reg &= ~(MDIO_CTRL1_SPEEDSEL | MDIO_CTRL1_FULLDPLX);
333 if (ecmd->speed == SPEED_100)
334 reg |= MDIO_PMA_CTRL1_SPEED100;
335 if (ecmd->duplex)
336 reg |= MDIO_CTRL1_FULLDPLX;
337 efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, reg);
338 }
339 308
340 return 0; 309 return 0;
341} 310}
342 311
343enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx) 312enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
344{ 313{
345 int lpa; 314 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
346 315
347 if (!(efx->phy_op->mmds & MDIO_DEVS_AN)) 316 if (!(efx->wanted_fc & EFX_FC_AUTO))
348 return efx->wanted_fc; 317 return efx->wanted_fc;
349 lpa = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA); 318
350 return efx_fc_resolve(efx->wanted_fc, lpa); 319 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
320
321 return mii_resolve_flowctrl_fdx(
322 mii_advertise_flowctrl(efx->wanted_fc),
323 efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
351} 324}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 6b14421a7444..75b37f101231 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -17,7 +17,6 @@
17 */ 17 */
18 18
19#include "efx.h" 19#include "efx.h"
20#include "boards.h"
21 20
22static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } 21static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
23static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } 22static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 298566da638b..bb3d258bd5e8 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -327,7 +327,7 @@ enum efx_rx_alloc_method {
327 * @used_flags: Channel is used by net driver 327 * @used_flags: Channel is used by net driver
328 * @enabled: Channel enabled indicator 328 * @enabled: Channel enabled indicator
329 * @irq: IRQ number (MSI and MSI-X only) 329 * @irq: IRQ number (MSI and MSI-X only)
330 * @irq_moderation: IRQ moderation value (in us) 330 * @irq_moderation: IRQ moderation value (in hardware ticks)
331 * @napi_dev: Net device used with NAPI 331 * @napi_dev: Net device used with NAPI
332 * @napi_str: NAPI control structure 332 * @napi_str: NAPI control structure
333 * @reset_work: Scheduled reset work thread 333 * @reset_work: Scheduled reset work thread
@@ -389,19 +389,6 @@ struct efx_channel {
389}; 389};
390 390
391/** 391/**
392 * struct efx_blinker - S/W LED blinking context
393 * @state: Current state - on or off
394 * @resubmit: Timer resubmission flag
395 * @timer: Control timer for blinking
396 */
397struct efx_blinker {
398 bool state;
399 bool resubmit;
400 struct timer_list timer;
401};
402
403
404/**
405 * struct efx_board - board information 392 * struct efx_board - board information
406 * @type: Board model type 393 * @type: Board model type
407 * @major: Major rev. ('A', 'B' ...) 394 * @major: Major rev. ('A', 'B' ...)
@@ -412,7 +399,9 @@ struct efx_blinker {
412 * @blink: Starts/stops blinking 399 * @blink: Starts/stops blinking
413 * @monitor: Board-specific health check function 400 * @monitor: Board-specific health check function
414 * @fini: Cleanup function 401 * @fini: Cleanup function
415 * @blinker: used to blink LEDs in software 402 * @blink_state: Current blink state
403 * @blink_resubmit: Blink timer resubmission flag
404 * @blink_timer: Blink timer
416 * @hwmon_client: I2C client for hardware monitor 405 * @hwmon_client: I2C client for hardware monitor
417 * @ioexp_client: I2C client for power/port control 406 * @ioexp_client: I2C client for power/port control
418 */ 407 */
@@ -429,7 +418,9 @@ struct efx_board {
429 int (*monitor) (struct efx_nic *nic); 418 int (*monitor) (struct efx_nic *nic);
430 void (*blink) (struct efx_nic *efx, bool start); 419 void (*blink) (struct efx_nic *efx, bool start);
431 void (*fini) (struct efx_nic *nic); 420 void (*fini) (struct efx_nic *nic);
432 struct efx_blinker blinker; 421 bool blink_state;
422 bool blink_resubmit;
423 struct timer_list blink_timer;
433 struct i2c_client *hwmon_client, *ioexp_client; 424 struct i2c_client *hwmon_client, *ioexp_client;
434}; 425};
435 426
@@ -506,17 +497,6 @@ enum efx_mac_type {
506 EFX_XMAC = 2, 497 EFX_XMAC = 2,
507}; 498};
508 499
509static inline enum efx_fc_type efx_fc_resolve(enum efx_fc_type wanted_fc,
510 unsigned int lpa)
511{
512 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
513
514 if (!(wanted_fc & EFX_FC_AUTO))
515 return wanted_fc;
516
517 return mii_resolve_flowctrl_fdx(mii_advertise_flowctrl(wanted_fc), lpa);
518}
519
520/** 500/**
521 * struct efx_mac_operations - Efx MAC operations table 501 * struct efx_mac_operations - Efx MAC operations table
522 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock 502 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
@@ -537,7 +517,6 @@ struct efx_mac_operations {
537 * @fini: Shut down PHY 517 * @fini: Shut down PHY
538 * @reconfigure: Reconfigure PHY (e.g. for new link parameters) 518 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
539 * @clear_interrupt: Clear down interrupt 519 * @clear_interrupt: Clear down interrupt
540 * @blink: Blink LEDs
541 * @poll: Poll for hardware state. Serialised by the mac_lock. 520 * @poll: Poll for hardware state. Serialised by the mac_lock.
542 * @get_settings: Get ethtool settings. Serialised by the mac_lock. 521 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
543 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 522 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
@@ -697,10 +676,13 @@ union efx_multicast_hash {
697 * @tx_queue: TX DMA queues 676 * @tx_queue: TX DMA queues
698 * @rx_queue: RX DMA queues 677 * @rx_queue: RX DMA queues
699 * @channel: Channels 678 * @channel: Channels
679 * @next_buffer_table: First available buffer table id
700 * @n_rx_queues: Number of RX queues 680 * @n_rx_queues: Number of RX queues
701 * @n_channels: Number of channels in use 681 * @n_channels: Number of channels in use
702 * @rx_buffer_len: RX buffer length 682 * @rx_buffer_len: RX buffer length
703 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 683 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
684 * @int_error_count: Number of internal errors seen recently
685 * @int_error_expire: Time at which error count will be expired
704 * @irq_status: Interrupt status buffer 686 * @irq_status: Interrupt status buffer
705 * @last_irq_cpu: Last CPU to handle interrupt. 687 * @last_irq_cpu: Last CPU to handle interrupt.
706 * This register is written with the SMP processor ID whenever an 688 * This register is written with the SMP processor ID whenever an
@@ -784,11 +766,15 @@ struct efx_nic {
784 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 766 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
785 struct efx_channel channel[EFX_MAX_CHANNELS]; 767 struct efx_channel channel[EFX_MAX_CHANNELS];
786 768
769 unsigned next_buffer_table;
787 int n_rx_queues; 770 int n_rx_queues;
788 int n_channels; 771 int n_channels;
789 unsigned int rx_buffer_len; 772 unsigned int rx_buffer_len;
790 unsigned int rx_buffer_order; 773 unsigned int rx_buffer_order;
791 774
775 unsigned int_error_count;
776 unsigned long int_error_expire;
777
792 struct efx_buffer irq_status; 778 struct efx_buffer irq_status;
793 volatile signed int last_irq_cpu; 779 volatile signed int last_irq_cpu;
794 780
@@ -869,14 +855,7 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
869 * @buf_tbl_base: Buffer table base address 855 * @buf_tbl_base: Buffer table base address
870 * @evq_ptr_tbl_base: Event queue pointer table base address 856 * @evq_ptr_tbl_base: Event queue pointer table base address
871 * @evq_rptr_tbl_base: Event queue read-pointer table base address 857 * @evq_rptr_tbl_base: Event queue read-pointer table base address
872 * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
873 * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
874 * @evq_size: Event queue size (must be a power of two)
875 * @max_dma_mask: Maximum possible DMA mask 858 * @max_dma_mask: Maximum possible DMA mask
876 * @tx_dma_mask: TX DMA mask
877 * @bug5391_mask: Address mask for bug 5391 workaround
878 * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
879 * @rx_xon_thresh: RX FIFO XON watermark (bytes)
880 * @rx_buffer_padding: Padding added to each RX buffer 859 * @rx_buffer_padding: Padding added to each RX buffer
881 * @max_interrupt_mode: Highest capability interrupt mode supported 860 * @max_interrupt_mode: Highest capability interrupt mode supported
882 * from &enum efx_init_mode. 861 * from &enum efx_init_mode.
@@ -892,15 +871,8 @@ struct efx_nic_type {
892 unsigned int evq_ptr_tbl_base; 871 unsigned int evq_ptr_tbl_base;
893 unsigned int evq_rptr_tbl_base; 872 unsigned int evq_rptr_tbl_base;
894 873
895 unsigned int txd_ring_mask;
896 unsigned int rxd_ring_mask;
897 unsigned int evq_size;
898 u64 max_dma_mask; 874 u64 max_dma_mask;
899 unsigned int tx_dma_mask;
900 unsigned bug5391_mask;
901 875
902 int rx_xoff_thresh;
903 int rx_xon_thresh;
904 unsigned int rx_buffer_padding; 876 unsigned int rx_buffer_padding;
905 unsigned int max_interrupt_mode; 877 unsigned int max_interrupt_mode;
906 unsigned int phys_addr_channels; 878 unsigned int phys_addr_channels;
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index c1cff9c0c173..b5150f3bca31 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -23,9 +23,9 @@ extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
23extern int sft9001_wait_boot(struct efx_nic *efx); 23extern int sft9001_wait_boot(struct efx_nic *efx);
24 24
25/**************************************************************************** 25/****************************************************************************
26 * AMCC/Quake QT20xx PHYs 26 * AMCC/Quake QT202x PHYs
27 */ 27 */
28extern struct efx_phy_operations falcon_xfp_phy_ops; 28extern struct efx_phy_operations falcon_qt202x_phy_ops;
29 29
30/* These PHYs provide various H/W control states for LEDs */ 30/* These PHYs provide various H/W control states for LEDs */
31#define QUAKE_LED_LINK_INVAL (0) 31#define QUAKE_LED_LINK_INVAL (0)
@@ -39,6 +39,6 @@ extern struct efx_phy_operations falcon_xfp_phy_ops;
39#define QUAKE_LED_TXLINK (0) 39#define QUAKE_LED_TXLINK (0)
40#define QUAKE_LED_RXLINK (8) 40#define QUAKE_LED_RXLINK (8)
41 41
42extern void xfp_set_led(struct efx_nic *p, int led, int state); 42extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
43 43
44#endif 44#endif
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/qt202x_phy.c
index e6b3d5eaddba..560eb18280e1 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -7,8 +7,7 @@
7 * by the Free Software Foundation, incorporated herein by reference. 7 * by the Free Software Foundation, incorporated herein by reference.
8 */ 8 */
9/* 9/*
10 * Driver for SFP+ and XFP optical PHYs plus some support specific to the 10 * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
11 * AMCC QT20xx adapters; see www.amcc.com for details
12 */ 11 */
13 12
14#include <linux/timer.h> 13#include <linux/timer.h>
@@ -18,13 +17,13 @@
18#include "phy.h" 17#include "phy.h"
19#include "falcon.h" 18#include "falcon.h"
20 19
21#define XFP_REQUIRED_DEVS (MDIO_DEVS_PCS | \ 20#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \
22 MDIO_DEVS_PMAPMD | \ 21 MDIO_DEVS_PMAPMD | \
23 MDIO_DEVS_PHYXS) 22 MDIO_DEVS_PHYXS)
24 23
25#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ 24#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \
26 (1 << LOOPBACK_PMAPMD) | \ 25 (1 << LOOPBACK_PMAPMD) | \
27 (1 << LOOPBACK_NETWORK)) 26 (1 << LOOPBACK_NETWORK))
28 27
29/****************************************************************************/ 28/****************************************************************************/
30/* Quake-specific MDIO registers */ 29/* Quake-specific MDIO registers */
@@ -45,18 +44,18 @@
45#define PCS_VEND1_REG 0xc000 44#define PCS_VEND1_REG 0xc000
46#define PCS_VEND1_LBTXD_LBN 5 45#define PCS_VEND1_LBTXD_LBN 5
47 46
48void xfp_set_led(struct efx_nic *p, int led, int mode) 47void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
49{ 48{
50 int addr = MDIO_QUAKE_LED0_REG + led; 49 int addr = MDIO_QUAKE_LED0_REG + led;
51 efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode); 50 efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
52} 51}
53 52
54struct xfp_phy_data { 53struct qt202x_phy_data {
55 enum efx_phy_mode phy_mode; 54 enum efx_phy_mode phy_mode;
56}; 55};
57 56
58#define XFP_MAX_RESET_TIME 500 57#define QT2022C2_MAX_RESET_TIME 500
59#define XFP_RESET_WAIT 10 58#define QT2022C2_RESET_WAIT 10
60 59
61static int qt2025c_wait_reset(struct efx_nic *efx) 60static int qt2025c_wait_reset(struct efx_nic *efx)
62{ 61{
@@ -97,7 +96,7 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
97 return 0; 96 return 0;
98} 97}
99 98
100static int xfp_reset_phy(struct efx_nic *efx) 99static int qt202x_reset_phy(struct efx_nic *efx)
101{ 100{
102 int rc; 101 int rc;
103 102
@@ -111,8 +110,9 @@ static int xfp_reset_phy(struct efx_nic *efx)
111 /* Reset the PHYXS MMD. This is documented as doing 110 /* Reset the PHYXS MMD. This is documented as doing
112 * a complete soft reset. */ 111 * a complete soft reset. */
113 rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS, 112 rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
114 XFP_MAX_RESET_TIME / XFP_RESET_WAIT, 113 QT2022C2_MAX_RESET_TIME /
115 XFP_RESET_WAIT); 114 QT2022C2_RESET_WAIT,
115 QT2022C2_RESET_WAIT);
116 if (rc < 0) 116 if (rc < 0)
117 goto fail; 117 goto fail;
118 } 118 }
@@ -122,7 +122,7 @@ static int xfp_reset_phy(struct efx_nic *efx)
122 122
123 /* Check that all the MMDs we expect are present and responding. We 123 /* Check that all the MMDs we expect are present and responding. We
124 * expect faults on some if the link is down, but not on the PHY XS */ 124 * expect faults on some if the link is down, but not on the PHY XS */
125 rc = efx_mdio_check_mmds(efx, XFP_REQUIRED_DEVS, MDIO_DEVS_PHYXS); 125 rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
126 if (rc < 0) 126 if (rc < 0)
127 goto fail; 127 goto fail;
128 128
@@ -135,13 +135,13 @@ static int xfp_reset_phy(struct efx_nic *efx)
135 return rc; 135 return rc;
136} 136}
137 137
138static int xfp_phy_init(struct efx_nic *efx) 138static int qt202x_phy_init(struct efx_nic *efx)
139{ 139{
140 struct xfp_phy_data *phy_data; 140 struct qt202x_phy_data *phy_data;
141 u32 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); 141 u32 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
142 int rc; 142 int rc;
143 143
144 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 144 phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
145 if (!phy_data) 145 if (!phy_data)
146 return -ENOMEM; 146 return -ENOMEM;
147 efx->phy_data = phy_data; 147 efx->phy_data = phy_data;
@@ -152,7 +152,7 @@ static int xfp_phy_init(struct efx_nic *efx)
152 152
153 phy_data->phy_mode = efx->phy_mode; 153 phy_data->phy_mode = efx->phy_mode;
154 154
155 rc = xfp_reset_phy(efx); 155 rc = qt202x_reset_phy(efx);
156 156
157 EFX_INFO(efx, "PHY init %s.\n", 157 EFX_INFO(efx, "PHY init %s.\n",
158 rc ? "failed" : "successful"); 158 rc ? "failed" : "successful");
@@ -167,28 +167,28 @@ static int xfp_phy_init(struct efx_nic *efx)
167 return rc; 167 return rc;
168} 168}
169 169
170static void xfp_phy_clear_interrupt(struct efx_nic *efx) 170static void qt202x_phy_clear_interrupt(struct efx_nic *efx)
171{ 171{
172 /* Read to clear link status alarm */ 172 /* Read to clear link status alarm */
173 efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT); 173 efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT);
174} 174}
175 175
176static int xfp_link_ok(struct efx_nic *efx) 176static int qt202x_link_ok(struct efx_nic *efx)
177{ 177{
178 return efx_mdio_links_ok(efx, XFP_REQUIRED_DEVS); 178 return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
179} 179}
180 180
181static void xfp_phy_poll(struct efx_nic *efx) 181static void qt202x_phy_poll(struct efx_nic *efx)
182{ 182{
183 int link_up = xfp_link_ok(efx); 183 int link_up = qt202x_link_ok(efx);
184 /* Simulate a PHY event if link state has changed */ 184 /* Simulate a PHY event if link state has changed */
185 if (link_up != efx->link_up) 185 if (link_up != efx->link_up)
186 falcon_sim_phy_event(efx); 186 falcon_sim_phy_event(efx);
187} 187}
188 188
189static void xfp_phy_reconfigure(struct efx_nic *efx) 189static void qt202x_phy_reconfigure(struct efx_nic *efx)
190{ 190{
191 struct xfp_phy_data *phy_data = efx->phy_data; 191 struct qt202x_phy_data *phy_data = efx->phy_data;
192 192
193 if (efx->phy_type == PHY_TYPE_QT2025C) { 193 if (efx->phy_type == PHY_TYPE_QT2025C) {
194 /* There are several different register bits which can 194 /* There are several different register bits which can
@@ -207,7 +207,7 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
207 /* Reset the PHY when moving from tx off to tx on */ 207 /* Reset the PHY when moving from tx off to tx on */
208 if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) && 208 if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
209 (phy_data->phy_mode & PHY_MODE_TX_DISABLED)) 209 (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
210 xfp_reset_phy(efx); 210 qt202x_reset_phy(efx);
211 211
212 efx_mdio_transmit_disable(efx); 212 efx_mdio_transmit_disable(efx);
213 } 213 }
@@ -215,18 +215,18 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
215 efx_mdio_phy_reconfigure(efx); 215 efx_mdio_phy_reconfigure(efx);
216 216
217 phy_data->phy_mode = efx->phy_mode; 217 phy_data->phy_mode = efx->phy_mode;
218 efx->link_up = xfp_link_ok(efx); 218 efx->link_up = qt202x_link_ok(efx);
219 efx->link_speed = 10000; 219 efx->link_speed = 10000;
220 efx->link_fd = true; 220 efx->link_fd = true;
221 efx->link_fc = efx->wanted_fc; 221 efx->link_fc = efx->wanted_fc;
222} 222}
223 223
224static void xfp_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 224static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
225{ 225{
226 mdio45_ethtool_gset(&efx->mdio, ecmd); 226 mdio45_ethtool_gset(&efx->mdio, ecmd);
227} 227}
228 228
229static void xfp_phy_fini(struct efx_nic *efx) 229static void qt202x_phy_fini(struct efx_nic *efx)
230{ 230{
231 /* Clobber the LED if it was blinking */ 231 /* Clobber the LED if it was blinking */
232 efx->board_info.blink(efx, false); 232 efx->board_info.blink(efx, false);
@@ -236,15 +236,15 @@ static void xfp_phy_fini(struct efx_nic *efx)
236 efx->phy_data = NULL; 236 efx->phy_data = NULL;
237} 237}
238 238
239struct efx_phy_operations falcon_xfp_phy_ops = { 239struct efx_phy_operations falcon_qt202x_phy_ops = {
240 .macs = EFX_XMAC, 240 .macs = EFX_XMAC,
241 .init = xfp_phy_init, 241 .init = qt202x_phy_init,
242 .reconfigure = xfp_phy_reconfigure, 242 .reconfigure = qt202x_phy_reconfigure,
243 .poll = xfp_phy_poll, 243 .poll = qt202x_phy_poll,
244 .fini = xfp_phy_fini, 244 .fini = qt202x_phy_fini,
245 .clear_interrupt = xfp_phy_clear_interrupt, 245 .clear_interrupt = qt202x_phy_clear_interrupt,
246 .get_settings = xfp_phy_get_settings, 246 .get_settings = qt202x_phy_get_settings,
247 .set_settings = efx_mdio_set_settings, 247 .set_settings = efx_mdio_set_settings,
248 .mmds = XFP_REQUIRED_DEVS, 248 .mmds = QT202X_REQUIRED_DEVS,
249 .loopbacks = XFP_LOOPBACKS, 249 .loopbacks = QT202X_LOOPBACKS,
250}; 250};
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
new file mode 100644
index 000000000000..f336d83d5fa0
--- /dev/null
+++ b/drivers/net/sfc/regs.h
@@ -0,0 +1,3180 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_REGS_H
12#define EFX_REGS_H
13
14/*
15 * Falcon hardware architecture definitions have a name prefix following
16 * the format:
17 *
18 * F<type>_<min-rev><max-rev>_
19 *
20 * The following <type> strings are used:
21 *
22 * MMIO register MC register Host memory structure
23 * -------------------------------------------------------------
24 * Address R MCR
25 * Bitfield RF MCRF SF
26 * Enumerator FE MCFE SE
27 *
28 * <min-rev> is the first revision to which the definition applies:
29 *
30 * A: Falcon A1 (SFC4000AB)
31 * B: Falcon B0 (SFC4000BA)
32 * C: Siena A0 (SFL9021AA)
33 *
34 * If the definition has been changed or removed in later revisions
35 * then <max-rev> is the last revision to which the definition applies;
36 * otherwise it is "Z".
37 */
38
39/**************************************************************************
40 *
41 * Falcon/Siena registers and descriptors
42 *
43 **************************************************************************
44 */
45
46/* ADR_REGION_REG: Address region register */
47#define FR_AZ_ADR_REGION 0x00000000
48#define FRF_AZ_ADR_REGION3_LBN 96
49#define FRF_AZ_ADR_REGION3_WIDTH 18
50#define FRF_AZ_ADR_REGION2_LBN 64
51#define FRF_AZ_ADR_REGION2_WIDTH 18
52#define FRF_AZ_ADR_REGION1_LBN 32
53#define FRF_AZ_ADR_REGION1_WIDTH 18
54#define FRF_AZ_ADR_REGION0_LBN 0
55#define FRF_AZ_ADR_REGION0_WIDTH 18
56
57/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
58#define FR_AZ_INT_EN_KER 0x00000010
59#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
60#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
61#define FRF_AZ_KER_INT_CHAR_LBN 4
62#define FRF_AZ_KER_INT_CHAR_WIDTH 1
63#define FRF_AZ_KER_INT_KER_LBN 3
64#define FRF_AZ_KER_INT_KER_WIDTH 1
65#define FRF_AZ_DRV_INT_EN_KER_LBN 0
66#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
67
68/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
69#define FR_BZ_INT_EN_CHAR 0x00000020
70#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
71#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
72#define FRF_BZ_CHAR_INT_CHAR_LBN 4
73#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1
74#define FRF_BZ_CHAR_INT_KER_LBN 3
75#define FRF_BZ_CHAR_INT_KER_WIDTH 1
76#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0
77#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
78
79/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
80#define FR_AZ_INT_ADR_KER 0x00000030
81#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
82#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
83#define FRF_AZ_INT_ADR_KER_LBN 0
84#define FRF_AZ_INT_ADR_KER_WIDTH 64
85
86/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
87#define FR_BZ_INT_ADR_CHAR 0x00000040
88#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
89#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
90#define FRF_BZ_INT_ADR_CHAR_LBN 0
91#define FRF_BZ_INT_ADR_CHAR_WIDTH 64
92
93/* INT_ACK_KER: Kernel interrupt acknowledge register */
94#define FR_AA_INT_ACK_KER 0x00000050
95#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
96#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
97
98/* INT_ISR0_REG: Function 0 Interrupt Acknowlege Status register */
99#define FR_BZ_INT_ISR0 0x00000090
100#define FRF_BZ_INT_ISR_REG_LBN 0
101#define FRF_BZ_INT_ISR_REG_WIDTH 64
102
103/* HW_INIT_REG: Hardware initialization register */
104#define FR_AZ_HW_INIT 0x000000c0
105#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
106#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
107#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
108#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
109#define FRF_CZ_TX_MRG_TAGS_LBN 120
110#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
111#define FRF_AB_TRGT_MASK_ALL_LBN 100
112#define FRF_AB_TRGT_MASK_ALL_WIDTH 1
113#define FRF_AZ_DOORBELL_DROP_LBN 92
114#define FRF_AZ_DOORBELL_DROP_WIDTH 8
115#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
116#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
117#define FRF_AB_PE_EIDLE_DIS_LBN 75
118#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
119#define FRF_AA_FC_BLOCKING_EN_LBN 45
120#define FRF_AA_FC_BLOCKING_EN_WIDTH 1
121#define FRF_BZ_B2B_REQ_EN_LBN 45
122#define FRF_BZ_B2B_REQ_EN_WIDTH 1
123#define FRF_AA_B2B_REQ_EN_LBN 44
124#define FRF_AA_B2B_REQ_EN_WIDTH 1
125#define FRF_BB_FC_BLOCKING_EN_LBN 44
126#define FRF_BB_FC_BLOCKING_EN_WIDTH 1
127#define FRF_AZ_POST_WR_MASK_LBN 40
128#define FRF_AZ_POST_WR_MASK_WIDTH 4
129#define FRF_AZ_TLP_TC_LBN 34
130#define FRF_AZ_TLP_TC_WIDTH 3
131#define FRF_AZ_TLP_ATTR_LBN 32
132#define FRF_AZ_TLP_ATTR_WIDTH 2
133#define FRF_AB_INTB_VEC_LBN 24
134#define FRF_AB_INTB_VEC_WIDTH 5
135#define FRF_AB_INTA_VEC_LBN 16
136#define FRF_AB_INTA_VEC_WIDTH 5
137#define FRF_AZ_WD_TIMER_LBN 8
138#define FRF_AZ_WD_TIMER_WIDTH 8
139#define FRF_AZ_US_DISABLE_LBN 5
140#define FRF_AZ_US_DISABLE_WIDTH 1
141#define FRF_AZ_TLP_EP_LBN 4
142#define FRF_AZ_TLP_EP_WIDTH 1
143#define FRF_AZ_ATTR_SEL_LBN 3
144#define FRF_AZ_ATTR_SEL_WIDTH 1
145#define FRF_AZ_TD_SEL_LBN 1
146#define FRF_AZ_TD_SEL_WIDTH 1
147#define FRF_AZ_TLP_TD_LBN 0
148#define FRF_AZ_TLP_TD_WIDTH 1
149
150/* EE_SPI_HCMD_REG: SPI host command register */
151#define FR_AB_EE_SPI_HCMD 0x00000100
152#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
153#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
154#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
155#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
156#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
157#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
158#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
159#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
160#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
161#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
162#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
163#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
164#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
165#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
166#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
167#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
168
169/* USR_EV_CFG: User Level Event Configuration register */
170#define FR_CZ_USR_EV_CFG 0x00000100
171#define FRF_CZ_USREV_DIS_LBN 16
172#define FRF_CZ_USREV_DIS_WIDTH 1
173#define FRF_CZ_DFLT_EVQ_LBN 0
174#define FRF_CZ_DFLT_EVQ_WIDTH 10
175
176/* EE_SPI_HADR_REG: SPI host address register */
177#define FR_AB_EE_SPI_HADR 0x00000110
178#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
179#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
180#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
181#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
182
183/* EE_SPI_HDATA_REG: SPI host data register */
184#define FR_AB_EE_SPI_HDATA 0x00000120
185#define FRF_AB_EE_SPI_HDATA3_LBN 96
186#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
187#define FRF_AB_EE_SPI_HDATA2_LBN 64
188#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
189#define FRF_AB_EE_SPI_HDATA1_LBN 32
190#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
191#define FRF_AB_EE_SPI_HDATA0_LBN 0
192#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
193
194/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
195#define FR_AB_EE_BASE_PAGE 0x00000130
196#define FRF_AB_EE_EXPROM_MASK_LBN 16
197#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
198#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
199#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
200
201/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
202#define FR_AB_EE_VPD_CFG0 0x00000140
203#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
204#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
205#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
206#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
207#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
208#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
209#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
210#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
211#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
212#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
213#define FRF_AB_EE_VPDW_LENGTH_LBN 80
214#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
215#define FRF_AB_EE_VPDW_BASE_LBN 64
216#define FRF_AB_EE_VPDW_BASE_WIDTH 15
217#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
218#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
219#define FRF_AB_EE_VPD_BASE_LBN 32
220#define FRF_AB_EE_VPD_BASE_WIDTH 24
221#define FRF_AB_EE_VPD_LENGTH_LBN 16
222#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
223#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
224#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
225#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
226#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
227#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
228#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
229#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
230#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
231#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
232#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
233#define FRF_AB_EE_VPD_EN_LBN 0
234#define FRF_AB_EE_VPD_EN_WIDTH 1
235
236/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
237#define FR_AB_EE_VPD_SW_CNTL 0x00000150
238#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
239#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
240#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
241#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
242#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
243#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
244
245/* EE_VPD_SW_DATA_REG: VPD access SW data register */
246#define FR_AB_EE_VPD_SW_DATA 0x00000160
247#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
248#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
249
250/* PBMX_DBG_IADDR_REG: Capture Module address register */
251#define FR_CZ_PBMX_DBG_IADDR 0x000001f0
252#define FRF_CZ_PBMX_DBG_IADDR_LBN 0
253#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
254
255/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
256#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0
257#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
258#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
259#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
260#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
261#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
262#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
263
264/* PBMX_DBG_IDATA_REG: Capture Module data register */
265#define FR_CZ_PBMX_DBG_IDATA 0x000001f8
266#define FRF_CZ_PBMX_DBG_IDATA_LBN 0
267#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
268
269/* NIC_STAT_REG: NIC status register */
270#define FR_AB_NIC_STAT 0x00000200
271#define FRF_BB_AER_DIS_LBN 34
272#define FRF_BB_AER_DIS_WIDTH 1
273#define FRF_BB_EE_STRAP_EN_LBN 31
274#define FRF_BB_EE_STRAP_EN_WIDTH 1
275#define FRF_BB_EE_STRAP_LBN 24
276#define FRF_BB_EE_STRAP_WIDTH 4
277#define FRF_BB_REVISION_ID_LBN 17
278#define FRF_BB_REVISION_ID_WIDTH 7
279#define FRF_AB_ONCHIP_SRAM_LBN 16
280#define FRF_AB_ONCHIP_SRAM_WIDTH 1
281#define FRF_AB_SF_PRST_LBN 9
282#define FRF_AB_SF_PRST_WIDTH 1
283#define FRF_AB_EE_PRST_LBN 8
284#define FRF_AB_EE_PRST_WIDTH 1
285#define FRF_AB_ATE_MODE_LBN 3
286#define FRF_AB_ATE_MODE_WIDTH 1
287#define FRF_AB_STRAP_PINS_LBN 0
288#define FRF_AB_STRAP_PINS_WIDTH 3
289
290/* GPIO_CTL_REG: GPIO control register */
291#define FR_AB_GPIO_CTL 0x00000210
292#define FRF_AB_GPIO_OUT3_LBN 112
293#define FRF_AB_GPIO_OUT3_WIDTH 16
294#define FRF_AB_GPIO_IN3_LBN 104
295#define FRF_AB_GPIO_IN3_WIDTH 8
296#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
297#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
298#define FRF_AB_GPIO_OUT2_LBN 80
299#define FRF_AB_GPIO_OUT2_WIDTH 16
300#define FRF_AB_GPIO_IN2_LBN 72
301#define FRF_AB_GPIO_IN2_WIDTH 8
302#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
303#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
304#define FRF_AB_GPIO15_OEN_LBN 63
305#define FRF_AB_GPIO15_OEN_WIDTH 1
306#define FRF_AB_GPIO14_OEN_LBN 62
307#define FRF_AB_GPIO14_OEN_WIDTH 1
308#define FRF_AB_GPIO13_OEN_LBN 61
309#define FRF_AB_GPIO13_OEN_WIDTH 1
310#define FRF_AB_GPIO12_OEN_LBN 60
311#define FRF_AB_GPIO12_OEN_WIDTH 1
312#define FRF_AB_GPIO11_OEN_LBN 59
313#define FRF_AB_GPIO11_OEN_WIDTH 1
314#define FRF_AB_GPIO10_OEN_LBN 58
315#define FRF_AB_GPIO10_OEN_WIDTH 1
316#define FRF_AB_GPIO9_OEN_LBN 57
317#define FRF_AB_GPIO9_OEN_WIDTH 1
318#define FRF_AB_GPIO8_OEN_LBN 56
319#define FRF_AB_GPIO8_OEN_WIDTH 1
320#define FRF_AB_GPIO15_OUT_LBN 55
321#define FRF_AB_GPIO15_OUT_WIDTH 1
322#define FRF_AB_GPIO14_OUT_LBN 54
323#define FRF_AB_GPIO14_OUT_WIDTH 1
324#define FRF_AB_GPIO13_OUT_LBN 53
325#define FRF_AB_GPIO13_OUT_WIDTH 1
326#define FRF_AB_GPIO12_OUT_LBN 52
327#define FRF_AB_GPIO12_OUT_WIDTH 1
328#define FRF_AB_GPIO11_OUT_LBN 51
329#define FRF_AB_GPIO11_OUT_WIDTH 1
330#define FRF_AB_GPIO10_OUT_LBN 50
331#define FRF_AB_GPIO10_OUT_WIDTH 1
332#define FRF_AB_GPIO9_OUT_LBN 49
333#define FRF_AB_GPIO9_OUT_WIDTH 1
334#define FRF_AB_GPIO8_OUT_LBN 48
335#define FRF_AB_GPIO8_OUT_WIDTH 1
336#define FRF_AB_GPIO15_IN_LBN 47
337#define FRF_AB_GPIO15_IN_WIDTH 1
338#define FRF_AB_GPIO14_IN_LBN 46
339#define FRF_AB_GPIO14_IN_WIDTH 1
340#define FRF_AB_GPIO13_IN_LBN 45
341#define FRF_AB_GPIO13_IN_WIDTH 1
342#define FRF_AB_GPIO12_IN_LBN 44
343#define FRF_AB_GPIO12_IN_WIDTH 1
344#define FRF_AB_GPIO11_IN_LBN 43
345#define FRF_AB_GPIO11_IN_WIDTH 1
346#define FRF_AB_GPIO10_IN_LBN 42
347#define FRF_AB_GPIO10_IN_WIDTH 1
348#define FRF_AB_GPIO9_IN_LBN 41
349#define FRF_AB_GPIO9_IN_WIDTH 1
350#define FRF_AB_GPIO8_IN_LBN 40
351#define FRF_AB_GPIO8_IN_WIDTH 1
352#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
353#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
354#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
355#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
356#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
357#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
358#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
359#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
360#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
361#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
362#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
363#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
364#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
365#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
366#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
367#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
368#define FRF_AB_CLK156_OUT_EN_LBN 31
369#define FRF_AB_CLK156_OUT_EN_WIDTH 1
370#define FRF_AB_USE_NIC_CLK_LBN 30
371#define FRF_AB_USE_NIC_CLK_WIDTH 1
372#define FRF_AB_GPIO5_OEN_LBN 29
373#define FRF_AB_GPIO5_OEN_WIDTH 1
374#define FRF_AB_GPIO4_OEN_LBN 28
375#define FRF_AB_GPIO4_OEN_WIDTH 1
376#define FRF_AB_GPIO3_OEN_LBN 27
377#define FRF_AB_GPIO3_OEN_WIDTH 1
378#define FRF_AB_GPIO2_OEN_LBN 26
379#define FRF_AB_GPIO2_OEN_WIDTH 1
380#define FRF_AB_GPIO1_OEN_LBN 25
381#define FRF_AB_GPIO1_OEN_WIDTH 1
382#define FRF_AB_GPIO0_OEN_LBN 24
383#define FRF_AB_GPIO0_OEN_WIDTH 1
384#define FRF_AB_GPIO7_OUT_LBN 23
385#define FRF_AB_GPIO7_OUT_WIDTH 1
386#define FRF_AB_GPIO6_OUT_LBN 22
387#define FRF_AB_GPIO6_OUT_WIDTH 1
388#define FRF_AB_GPIO5_OUT_LBN 21
389#define FRF_AB_GPIO5_OUT_WIDTH 1
390#define FRF_AB_GPIO4_OUT_LBN 20
391#define FRF_AB_GPIO4_OUT_WIDTH 1
392#define FRF_AB_GPIO3_OUT_LBN 19
393#define FRF_AB_GPIO3_OUT_WIDTH 1
394#define FRF_AB_GPIO2_OUT_LBN 18
395#define FRF_AB_GPIO2_OUT_WIDTH 1
396#define FRF_AB_GPIO1_OUT_LBN 17
397#define FRF_AB_GPIO1_OUT_WIDTH 1
398#define FRF_AB_GPIO0_OUT_LBN 16
399#define FRF_AB_GPIO0_OUT_WIDTH 1
400#define FRF_AB_GPIO7_IN_LBN 15
401#define FRF_AB_GPIO7_IN_WIDTH 1
402#define FRF_AB_GPIO6_IN_LBN 14
403#define FRF_AB_GPIO6_IN_WIDTH 1
404#define FRF_AB_GPIO5_IN_LBN 13
405#define FRF_AB_GPIO5_IN_WIDTH 1
406#define FRF_AB_GPIO4_IN_LBN 12
407#define FRF_AB_GPIO4_IN_WIDTH 1
408#define FRF_AB_GPIO3_IN_LBN 11
409#define FRF_AB_GPIO3_IN_WIDTH 1
410#define FRF_AB_GPIO2_IN_LBN 10
411#define FRF_AB_GPIO2_IN_WIDTH 1
412#define FRF_AB_GPIO1_IN_LBN 9
413#define FRF_AB_GPIO1_IN_WIDTH 1
414#define FRF_AB_GPIO0_IN_LBN 8
415#define FRF_AB_GPIO0_IN_WIDTH 1
416#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
417#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
418#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
419#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
420#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
421#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
422#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
423#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
424#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
425#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
426#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
427#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
428#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
429#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
430#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
431#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
432
433/* GLB_CTL_REG: Global control register */
434#define FR_AB_GLB_CTL 0x00000220
435#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
436#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
437#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
438#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
439#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
440#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
441#define FRF_AA_PCIX_RST_CTL_LBN 60
442#define FRF_AA_PCIX_RST_CTL_WIDTH 1
443#define FRF_BB_BIU_RST_CTL_LBN 60
444#define FRF_BB_BIU_RST_CTL_WIDTH 1
445#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
446#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
447#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
448#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
449#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
450#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
451#define FRF_AB_XGRX_RST_CTL_LBN 56
452#define FRF_AB_XGRX_RST_CTL_WIDTH 1
453#define FRF_AB_XGTX_RST_CTL_LBN 55
454#define FRF_AB_XGTX_RST_CTL_WIDTH 1
455#define FRF_AB_EM_RST_CTL_LBN 54
456#define FRF_AB_EM_RST_CTL_WIDTH 1
457#define FRF_AB_EV_RST_CTL_LBN 53
458#define FRF_AB_EV_RST_CTL_WIDTH 1
459#define FRF_AB_SR_RST_CTL_LBN 52
460#define FRF_AB_SR_RST_CTL_WIDTH 1
461#define FRF_AB_RX_RST_CTL_LBN 51
462#define FRF_AB_RX_RST_CTL_WIDTH 1
463#define FRF_AB_TX_RST_CTL_LBN 50
464#define FRF_AB_TX_RST_CTL_WIDTH 1
465#define FRF_AB_EE_RST_CTL_LBN 49
466#define FRF_AB_EE_RST_CTL_WIDTH 1
467#define FRF_AB_CS_RST_CTL_LBN 48
468#define FRF_AB_CS_RST_CTL_WIDTH 1
469#define FRF_AB_HOT_RST_CTL_LBN 40
470#define FRF_AB_HOT_RST_CTL_WIDTH 2
471#define FRF_AB_RST_EXT_PHY_LBN 31
472#define FRF_AB_RST_EXT_PHY_WIDTH 1
473#define FRF_AB_RST_XAUI_SD_LBN 30
474#define FRF_AB_RST_XAUI_SD_WIDTH 1
475#define FRF_AB_RST_PCIE_SD_LBN 29
476#define FRF_AB_RST_PCIE_SD_WIDTH 1
477#define FRF_AA_RST_PCIX_LBN 28
478#define FRF_AA_RST_PCIX_WIDTH 1
479#define FRF_BB_RST_BIU_LBN 28
480#define FRF_BB_RST_BIU_WIDTH 1
481#define FRF_AB_RST_PCIE_STKY_LBN 27
482#define FRF_AB_RST_PCIE_STKY_WIDTH 1
483#define FRF_AB_RST_PCIE_NSTKY_LBN 26
484#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
485#define FRF_AB_RST_PCIE_CORE_LBN 25
486#define FRF_AB_RST_PCIE_CORE_WIDTH 1
487#define FRF_AB_RST_XGRX_LBN 24
488#define FRF_AB_RST_XGRX_WIDTH 1
489#define FRF_AB_RST_XGTX_LBN 23
490#define FRF_AB_RST_XGTX_WIDTH 1
491#define FRF_AB_RST_EM_LBN 22
492#define FRF_AB_RST_EM_WIDTH 1
493#define FRF_AB_RST_EV_LBN 21
494#define FRF_AB_RST_EV_WIDTH 1
495#define FRF_AB_RST_SR_LBN 20
496#define FRF_AB_RST_SR_WIDTH 1
497#define FRF_AB_RST_RX_LBN 19
498#define FRF_AB_RST_RX_WIDTH 1
499#define FRF_AB_RST_TX_LBN 18
500#define FRF_AB_RST_TX_WIDTH 1
501#define FRF_AB_RST_SF_LBN 17
502#define FRF_AB_RST_SF_WIDTH 1
503#define FRF_AB_RST_CS_LBN 16
504#define FRF_AB_RST_CS_WIDTH 1
505#define FRF_AB_INT_RST_DUR_LBN 4
506#define FRF_AB_INT_RST_DUR_WIDTH 3
507#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
508#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
509#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
510#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
511#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
512#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
513#define FFE_AB_EXT_PHY_RST_DUR_640US 3
514#define FFE_AB_EXT_PHY_RST_DUR_320US 2
515#define FFE_AB_EXT_PHY_RST_DUR_160US 1
516#define FFE_AB_EXT_PHY_RST_DUR_80US 0
517#define FRF_AB_SWRST_LBN 0
518#define FRF_AB_SWRST_WIDTH 1
519
520/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
521#define FR_AZ_FATAL_INTR_KER 0x00000230
522#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
523#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
524#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
525#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
526#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
527#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
528#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
529#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
530#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
531#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
532#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
533#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
534#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
535#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
536#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
537#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
538#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
539#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
540#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
541#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
542#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
543#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
544#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
545#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
546#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
547#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
548#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
549#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
550#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
551#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
552#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
553#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
554#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
555#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
556#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
557#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
558#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
559#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
560#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
561#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
562#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
563#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
564#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
565#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
566#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
567#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
568#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
569#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
570#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
571#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
572#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
573#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
574#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
575#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
576#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
577#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
578
579/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
580#define FR_BZ_FATAL_INTR_CHAR 0x00000240
581#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
582#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
583#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
584#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
585#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
586#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
587#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
588#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
589#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
590#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
591#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
592#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
593#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
594#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
595#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
596#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
597#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
598#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
599#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
600#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
601#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
602#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
603#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
604#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
605#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
606#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
607#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
608#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
609#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
610#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
611#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
612#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
613#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
614#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
615#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
616#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
617#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
618#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
619#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
620#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
621#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
622#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
623#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
624#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
625#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
626#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
627#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
628#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
629#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
630#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
631#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
632#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
633#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
634#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
635#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
636#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
637
638/* DP_CTRL_REG: Datapath control register */
639#define FR_BZ_DP_CTRL 0x00000250
640#define FRF_BZ_FLS_EVQ_ID_LBN 0
641#define FRF_BZ_FLS_EVQ_ID_WIDTH 12
642
643/* MEM_STAT_REG: Memory status register */
644#define FR_AZ_MEM_STAT 0x00000260
645#define FRF_AB_MEM_PERR_VEC_LBN 53
646#define FRF_AB_MEM_PERR_VEC_WIDTH 38
647#define FRF_AB_MBIST_CORR_LBN 38
648#define FRF_AB_MBIST_CORR_WIDTH 15
649#define FRF_AB_MBIST_ERR_LBN 0
650#define FRF_AB_MBIST_ERR_WIDTH 40
651#define FRF_CZ_MEM_PERR_VEC_LBN 0
652#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
653
654/* CS_DEBUG_REG: Debug register */
655#define FR_AZ_CS_DEBUG 0x00000270
656#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
657#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
658#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
659#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
660#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
661#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
662#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
663#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
664#define FRF_CZ_CS_PORT_NUM_LBN 40
665#define FRF_CZ_CS_PORT_NUM_WIDTH 2
666#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
667#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
668#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
669#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
670#define FRF_CZ_CS_PORT_FPE_LBN 1
671#define FRF_CZ_CS_PORT_FPE_WIDTH 35
672#define FRF_AB_EM_DEBUG_ADDR_LBN 26
673#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
674#define FRF_AB_SR_DEBUG_ADDR_LBN 21
675#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
676#define FRF_AB_EV_DEBUG_ADDR_LBN 16
677#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
678#define FRF_AB_RX_DEBUG_ADDR_LBN 11
679#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
680#define FRF_AB_TX_DEBUG_ADDR_LBN 6
681#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
682#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
683#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
684#define FRF_AZ_CS_DEBUG_EN_LBN 0
685#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
686
687/* DRIVER_REG: Driver scratch register [0-7] */
688#define FR_AZ_DRIVER 0x00000280
689#define FR_AZ_DRIVER_STEP 16
690#define FR_AZ_DRIVER_ROWS 8
691#define FRF_AZ_DRIVER_DW0_LBN 0
692#define FRF_AZ_DRIVER_DW0_WIDTH 32
693
694/* ALTERA_BUILD_REG: Altera build register */
695#define FR_AZ_ALTERA_BUILD 0x00000300
696#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
697#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
698
699/* CSR_SPARE_REG: Spare register */
700#define FR_AZ_CSR_SPARE 0x00000310
701#define FRF_AB_MEM_PERR_EN_LBN 64
702#define FRF_AB_MEM_PERR_EN_WIDTH 38
703#define FRF_CZ_MEM_PERR_EN_LBN 64
704#define FRF_CZ_MEM_PERR_EN_WIDTH 35
705#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
706#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
707#define FRF_AZ_CSR_SPARE_BITS_LBN 0
708#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
709
710/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
711#define FR_AB_PCIE_SD_CTL0123 0x00000320
712#define FRF_AB_PCIE_TESTSIG_H_LBN 96
713#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
714#define FRF_AB_PCIE_TESTSIG_L_LBN 64
715#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
716#define FRF_AB_PCIE_OFFSET_LBN 56
717#define FRF_AB_PCIE_OFFSET_WIDTH 8
718#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
719#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
720#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
721#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
722#define FRF_AB_PCIE_HIVMODE_H_LBN 53
723#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
724#define FRF_AB_PCIE_HIVMODE_L_LBN 52
725#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
726#define FRF_AB_PCIE_PARRESET_H_LBN 51
727#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
728#define FRF_AB_PCIE_PARRESET_L_LBN 50
729#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
730#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
731#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
732#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
733#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
734#define FRF_AB_PCIE_LPBK_LBN 40
735#define FRF_AB_PCIE_LPBK_WIDTH 8
736#define FRF_AB_PCIE_PARLPBK_LBN 32
737#define FRF_AB_PCIE_PARLPBK_WIDTH 8
738#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
739#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
740#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
741#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
742#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
743#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
744#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
745#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
746#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
747#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
748#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
749#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
750#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
751#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
752#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
753#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
754#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
755#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
756#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
757#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
758#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
759#define FFE_AB_PCIE_RXEQCTL_OFF 2
760#define FFE_AB_PCIE_RXEQCTL_MIN 1
761#define FFE_AB_PCIE_RXEQCTL_MAX 0
762#define FRF_AB_PCIE_HIDRV_LBN 8
763#define FRF_AB_PCIE_HIDRV_WIDTH 8
764#define FRF_AB_PCIE_LODRV_LBN 0
765#define FRF_AB_PCIE_LODRV_WIDTH 8
766
767/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
768#define FR_AB_PCIE_SD_CTL45 0x00000330
769#define FRF_AB_PCIE_DTX7_LBN 60
770#define FRF_AB_PCIE_DTX7_WIDTH 4
771#define FRF_AB_PCIE_DTX6_LBN 56
772#define FRF_AB_PCIE_DTX6_WIDTH 4
773#define FRF_AB_PCIE_DTX5_LBN 52
774#define FRF_AB_PCIE_DTX5_WIDTH 4
775#define FRF_AB_PCIE_DTX4_LBN 48
776#define FRF_AB_PCIE_DTX4_WIDTH 4
777#define FRF_AB_PCIE_DTX3_LBN 44
778#define FRF_AB_PCIE_DTX3_WIDTH 4
779#define FRF_AB_PCIE_DTX2_LBN 40
780#define FRF_AB_PCIE_DTX2_WIDTH 4
781#define FRF_AB_PCIE_DTX1_LBN 36
782#define FRF_AB_PCIE_DTX1_WIDTH 4
783#define FRF_AB_PCIE_DTX0_LBN 32
784#define FRF_AB_PCIE_DTX0_WIDTH 4
785#define FRF_AB_PCIE_DEQ7_LBN 28
786#define FRF_AB_PCIE_DEQ7_WIDTH 4
787#define FRF_AB_PCIE_DEQ6_LBN 24
788#define FRF_AB_PCIE_DEQ6_WIDTH 4
789#define FRF_AB_PCIE_DEQ5_LBN 20
790#define FRF_AB_PCIE_DEQ5_WIDTH 4
791#define FRF_AB_PCIE_DEQ4_LBN 16
792#define FRF_AB_PCIE_DEQ4_WIDTH 4
793#define FRF_AB_PCIE_DEQ3_LBN 12
794#define FRF_AB_PCIE_DEQ3_WIDTH 4
795#define FRF_AB_PCIE_DEQ2_LBN 8
796#define FRF_AB_PCIE_DEQ2_WIDTH 4
797#define FRF_AB_PCIE_DEQ1_LBN 4
798#define FRF_AB_PCIE_DEQ1_WIDTH 4
799#define FRF_AB_PCIE_DEQ0_LBN 0
800#define FRF_AB_PCIE_DEQ0_WIDTH 4
801
802/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
803#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340
804#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
805#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
806#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
807#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
808#define FRF_AB_PCIE_PRBSERR_LBN 40
809#define FRF_AB_PCIE_PRBSERR_WIDTH 8
810#define FRF_AB_PCIE_PRBSERRH0_LBN 32
811#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
812#define FRF_AB_PCIE_FASTINIT_H_LBN 15
813#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
814#define FRF_AB_PCIE_FASTINIT_L_LBN 14
815#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
816#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
817#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
818#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
819#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
820#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
821#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
822#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
823#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
824#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
825#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
826#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
827#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
828#define FRF_AB_PCIE_PRBSSEL_LBN 0
829#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
830
831/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
832#define FR_BB_DEBUG_DATA_OUT 0x00000350
833#define FRF_BB_DEBUG2_PORT_LBN 25
834#define FRF_BB_DEBUG2_PORT_WIDTH 15
835#define FRF_BB_DEBUG1_PORT_LBN 0
836#define FRF_BB_DEBUG1_PORT_WIDTH 25
837
838/* EVQ_RPTR_REGP0: Event queue read pointer register */
839#define FR_BZ_EVQ_RPTR_P0 0x00000400
840#define FR_BZ_EVQ_RPTR_P0_STEP 8192
841#define FR_BZ_EVQ_RPTR_P0_ROWS 1024
842/* EVQ_RPTR_REG_KER: Event queue read pointer register */
843#define FR_AA_EVQ_RPTR_KER 0x00011b00
844#define FR_AA_EVQ_RPTR_KER_STEP 4
845#define FR_AA_EVQ_RPTR_KER_ROWS 4
846/* EVQ_RPTR_REG: Event queue read pointer register */
847#define FR_BZ_EVQ_RPTR 0x00fa0000
848#define FR_BZ_EVQ_RPTR_STEP 16
849#define FR_BB_EVQ_RPTR_ROWS 4096
850#define FR_CZ_EVQ_RPTR_ROWS 1024
851/* EVQ_RPTR_REGP123: Event queue read pointer register */
852#define FR_BB_EVQ_RPTR_P123 0x01000400
853#define FR_BB_EVQ_RPTR_P123_STEP 8192
854#define FR_BB_EVQ_RPTR_P123_ROWS 3072
855#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
856#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
857#define FRF_AZ_EVQ_RPTR_LBN 0
858#define FRF_AZ_EVQ_RPTR_WIDTH 15
859
860/* TIMER_COMMAND_REGP0: Timer Command Registers */
861#define FR_BZ_TIMER_COMMAND_P0 0x00000420
862#define FR_BZ_TIMER_COMMAND_P0_STEP 8192
863#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024
864/* TIMER_COMMAND_REG_KER: Timer Command Registers */
865#define FR_AA_TIMER_COMMAND_KER 0x00000420
866#define FR_AA_TIMER_COMMAND_KER_STEP 8192
867#define FR_AA_TIMER_COMMAND_KER_ROWS 4
868/* TIMER_COMMAND_REGP123: Timer Command Registers */
869#define FR_BB_TIMER_COMMAND_P123 0x01000420
870#define FR_BB_TIMER_COMMAND_P123_STEP 8192
871#define FR_BB_TIMER_COMMAND_P123_ROWS 3072
872#define FRF_CZ_TC_TIMER_MODE_LBN 14
873#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
874#define FRF_AB_TC_TIMER_MODE_LBN 12
875#define FRF_AB_TC_TIMER_MODE_WIDTH 2
876#define FRF_CZ_TC_TIMER_VAL_LBN 0
877#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
878#define FRF_AB_TC_TIMER_VAL_LBN 0
879#define FRF_AB_TC_TIMER_VAL_WIDTH 12
880
881/* DRV_EV_REG: Driver generated event register */
882#define FR_AZ_DRV_EV 0x00000440
883#define FRF_AZ_DRV_EV_QID_LBN 64
884#define FRF_AZ_DRV_EV_QID_WIDTH 12
885#define FRF_AZ_DRV_EV_DATA_LBN 0
886#define FRF_AZ_DRV_EV_DATA_WIDTH 64
887
888/* EVQ_CTL_REG: Event queue control register */
889#define FR_AZ_EVQ_CTL 0x00000450
890#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
891#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
892#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
893#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
894#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
895#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
896#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
897#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
898#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
899#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
900
901/* EVQ_CNT1_REG: Event counter 1 register */
902#define FR_AZ_EVQ_CNT1 0x00000460
903#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
904#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
905#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
906#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
907#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
908#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
909#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
910#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
911#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
912#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
913#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
914#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
915#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
916#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
917
918/* EVQ_CNT2_REG: Event counter 2 register */
919#define FR_AZ_EVQ_CNT2 0x00000470
920#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
921#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
922#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
923#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
924#define FRF_AZ_EVQ_RDY_CNT_LBN 80
925#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
926#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
927#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
928#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
929#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
930#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
931#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
932#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
933#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
934
935/* USR_EV_REG: Event mailbox register */
936#define FR_CZ_USR_EV 0x00000540
937#define FR_CZ_USR_EV_STEP 8192
938#define FR_CZ_USR_EV_ROWS 1024
939#define FRF_CZ_USR_EV_DATA_LBN 0
940#define FRF_CZ_USR_EV_DATA_WIDTH 32
941
942/* BUF_TBL_CFG_REG: Buffer table configuration register */
943#define FR_AZ_BUF_TBL_CFG 0x00000600
944#define FRF_AZ_BUF_TBL_MODE_LBN 3
945#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
946
947/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
948#define FR_AZ_SRM_RX_DC_CFG 0x00000610
949#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
950#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
951#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
952#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
953
954/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
955#define FR_AZ_SRM_TX_DC_CFG 0x00000620
956#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
957#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
958
959/* SRM_CFG_REG: SRAM configuration register */
960#define FR_AZ_SRM_CFG 0x00000630
961#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
962#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
963#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
964#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
965#define FRF_AZ_SRM_INIT_EN_LBN 3
966#define FRF_AZ_SRM_INIT_EN_WIDTH 1
967#define FRF_AZ_SRM_NUM_BANK_LBN 2
968#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
969#define FRF_AZ_SRM_BANK_SIZE_LBN 0
970#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
971
972/* BUF_TBL_UPD_REG: Buffer table update register */
973#define FR_AZ_BUF_TBL_UPD 0x00000650
974#define FRF_AZ_BUF_UPD_CMD_LBN 63
975#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
976#define FRF_AZ_BUF_CLR_CMD_LBN 62
977#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
978#define FRF_AZ_BUF_CLR_END_ID_LBN 32
979#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
980#define FRF_AZ_BUF_CLR_START_ID_LBN 0
981#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
982
983/* SRM_UPD_EVQ_REG: Buffer table update register */
984#define FR_AZ_SRM_UPD_EVQ 0x00000660
985#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
986#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
987
988/* SRAM_PARITY_REG: SRAM parity register. */
989#define FR_AZ_SRAM_PARITY 0x00000670
990#define FRF_CZ_BYPASS_ECC_LBN 3
991#define FRF_CZ_BYPASS_ECC_WIDTH 1
992#define FRF_CZ_SEC_INT_LBN 2
993#define FRF_CZ_SEC_INT_WIDTH 1
994#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
995#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
996#define FRF_AB_FORCE_SRAM_PERR_LBN 0
997#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
998#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
999#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
1000
1001/* RX_CFG_REG: Receive configuration register */
1002#define FR_AZ_RX_CFG 0x00000800
1003#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
1004#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
1005#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
1006#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
1007#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
1008#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
1009#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
1010#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
1011#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
1012#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
1013#define FRF_BZ_RX_TCP_SUP_LBN 48
1014#define FRF_BZ_RX_TCP_SUP_WIDTH 1
1015#define FRF_BZ_RX_INGR_EN_LBN 47
1016#define FRF_BZ_RX_INGR_EN_WIDTH 1
1017#define FRF_BZ_RX_IP_HASH_LBN 46
1018#define FRF_BZ_RX_IP_HASH_WIDTH 1
1019#define FRF_BZ_RX_HASH_ALG_LBN 45
1020#define FRF_BZ_RX_HASH_ALG_WIDTH 1
1021#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
1022#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
1023#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
1024#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
1025#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
1026#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
1027#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
1028#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
1029#define FRF_BZ_RX_OWNERR_CTL_LBN 38
1030#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
1031#define FRF_BZ_RX_XON_TX_TH_LBN 33
1032#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
1033#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
1034#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
1035#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
1036#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
1037#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
1038#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
1039#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
1040#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
1041#define FRF_AA_RX_OWNERR_CTL_LBN 30
1042#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
1043#define FRF_AA_RX_XON_TX_TH_LBN 25
1044#define FRF_AA_RX_XON_TX_TH_WIDTH 5
1045#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
1046#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
1047#define FRF_AA_RX_XOFF_TX_TH_LBN 20
1048#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
1049#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
1050#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
1051#define FRF_BZ_RX_XON_MAC_TH_LBN 10
1052#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
1053#define FRF_AA_RX_XON_MAC_TH_LBN 6
1054#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
1055#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
1056#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
1057#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
1058#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
1059#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
1060#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
1061
1062/* RX_FILTER_CTL_REG: Receive filter control registers */
1063#define FR_BZ_RX_FILTER_CTL 0x00000810
1064#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
1065#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
1066#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
1067#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
1068#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
1069#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
1070#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
1071#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
1072#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
1073#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
1074#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
1075#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
1076#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
1077#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
1078#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
1079#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
1080#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
1081#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
1082#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
1083#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
1084#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
1085#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
1086#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
1087#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
1088#define FRF_BZ_NUM_KER_LBN 24
1089#define FRF_BZ_NUM_KER_WIDTH 2
1090#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
1091#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
1092#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
1093#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
1094#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
1095#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
1096
1097/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
1098#define FR_AZ_RX_FLUSH_DESCQ 0x00000820
1099#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
1100#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
1101#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
1102#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
1103
1104/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
1105#define FR_BZ_RX_DESC_UPD_P0 0x00000830
1106#define FR_BZ_RX_DESC_UPD_P0_STEP 8192
1107#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024
1108/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
1109#define FR_AA_RX_DESC_UPD_KER 0x00000830
1110#define FR_AA_RX_DESC_UPD_KER_STEP 8192
1111#define FR_AA_RX_DESC_UPD_KER_ROWS 4
1112/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
1113#define FR_BB_RX_DESC_UPD_P123 0x01000830
1114#define FR_BB_RX_DESC_UPD_P123_STEP 8192
1115#define FR_BB_RX_DESC_UPD_P123_ROWS 3072
1116#define FRF_AZ_RX_DESC_WPTR_LBN 96
1117#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
1118#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
1119#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
1120#define FRF_AZ_RX_DESC_LBN 0
1121#define FRF_AZ_RX_DESC_WIDTH 64
1122
1123/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
1124#define FR_AZ_RX_DC_CFG 0x00000840
1125#define FRF_AB_RX_MAX_PF_LBN 2
1126#define FRF_AB_RX_MAX_PF_WIDTH 2
1127#define FRF_AZ_RX_DC_SIZE_LBN 0
1128#define FRF_AZ_RX_DC_SIZE_WIDTH 2
1129#define FFE_AZ_RX_DC_SIZE_64 3
1130#define FFE_AZ_RX_DC_SIZE_32 2
1131#define FFE_AZ_RX_DC_SIZE_16 1
1132#define FFE_AZ_RX_DC_SIZE_8 0
1133
1134/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
1135#define FR_AZ_RX_DC_PF_WM 0x00000850
1136#define FRF_AZ_RX_DC_PF_HWM_LBN 6
1137#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
1138#define FRF_AZ_RX_DC_PF_LWM_LBN 0
1139#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
1140
1141/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
1142#define FR_BZ_RX_RSS_TKEY 0x00000860
1143#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64
1144#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
1145#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0
1146#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
1147
1148/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
1149#define FR_AZ_RX_NODESC_DROP 0x00000880
1150#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
1151#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
1152#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0
1153#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
1154
1155/* RX_SELF_RST_REG: Receive self reset register */
1156#define FR_AA_RX_SELF_RST 0x00000890
1157#define FRF_AA_RX_ISCSI_DIS_LBN 17
1158#define FRF_AA_RX_ISCSI_DIS_WIDTH 1
1159#define FRF_AA_RX_SW_RST_REG_LBN 16
1160#define FRF_AA_RX_SW_RST_REG_WIDTH 1
1161#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
1162#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
1163#define FRF_AA_RX_SELF_RST_EN_LBN 8
1164#define FRF_AA_RX_SELF_RST_EN_WIDTH 1
1165#define FRF_AA_RX_MAX_PF_LAT_LBN 4
1166#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4
1167#define FRF_AA_RX_MAX_LU_LAT_LBN 0
1168#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4
1169
1170/* RX_DEBUG_REG: undocumented register */
1171#define FR_AZ_RX_DEBUG 0x000008a0
1172#define FRF_AZ_RX_DEBUG_LBN 0
1173#define FRF_AZ_RX_DEBUG_WIDTH 64
1174
1175/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
1176#define FR_AZ_RX_PUSH_DROP 0x000008b0
1177#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
1178#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
1179
1180/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
1181#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
1182#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
1183#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
1184
1185/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
1186#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
1187#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
1188#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
1189
1190/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
1191#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
1192#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
1193#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
1194#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
1195#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
1196#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
1197#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
1198#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
1199#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
1200
1201/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
1202#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00
1203#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
1204#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
1205#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
1206#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
1207
1208/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
1209#define FR_BZ_TX_DESC_UPD_P0 0x00000a10
1210#define FR_BZ_TX_DESC_UPD_P0_STEP 8192
1211#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024
1212/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
1213#define FR_AA_TX_DESC_UPD_KER 0x00000a10
1214#define FR_AA_TX_DESC_UPD_KER_STEP 8192
1215#define FR_AA_TX_DESC_UPD_KER_ROWS 8
1216/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
1217#define FR_BB_TX_DESC_UPD_P123 0x01000a10
1218#define FR_BB_TX_DESC_UPD_P123_STEP 8192
1219#define FR_BB_TX_DESC_UPD_P123_ROWS 3072
1220#define FRF_AZ_TX_DESC_WPTR_LBN 96
1221#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
1222#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
1223#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
1224#define FRF_AZ_TX_DESC_LBN 0
1225#define FRF_AZ_TX_DESC_WIDTH 95
1226
1227/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
1228#define FR_AZ_TX_DC_CFG 0x00000a20
1229#define FRF_AZ_TX_DC_SIZE_LBN 0
1230#define FRF_AZ_TX_DC_SIZE_WIDTH 2
1231#define FFE_AZ_TX_DC_SIZE_32 2
1232#define FFE_AZ_TX_DC_SIZE_16 1
1233#define FFE_AZ_TX_DC_SIZE_8 0
1234
1235/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
1236#define FR_AA_TX_CHKSM_CFG 0x00000a30
1237#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
1238#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
1239#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
1240#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
1241#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
1242#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
1243#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
1244#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
1245
1246/* TX_CFG_REG: Transmit configuration register */
1247#define FR_AZ_TX_CFG 0x00000a50
1248#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
1249#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
1250#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
1251#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
1252#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
1253#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1254#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
1255#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1256#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
1257#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1258#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
1259#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1260#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
1261#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1262#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
1263#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1264#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
1265#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
1266#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
1267#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
1268#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
1269#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
1270#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
1271#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
1272#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
1273#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
1274#define FRF_AZ_TX_P1_PRI_EN_LBN 4
1275#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
1276#define FRF_AZ_TX_OWNERR_CTL_LBN 2
1277#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
1278#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
1279#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
1280#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
1281#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
1282
1283/* TX_PUSH_DROP_REG: Transmit push dropped register */
1284#define FR_AZ_TX_PUSH_DROP 0x00000a60
1285#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
1286#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
1287
1288/* TX_RESERVED_REG: Transmit configuration register */
1289#define FR_AZ_TX_RESERVED 0x00000a80
1290#define FRF_AZ_TX_EVT_CNT_LBN 121
1291#define FRF_AZ_TX_EVT_CNT_WIDTH 7
1292#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
1293#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
1294#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
1295#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
1296#define FRF_AZ_TX_PUSH_EN_LBN 89
1297#define FRF_AZ_TX_PUSH_EN_WIDTH 1
1298#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
1299#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
1300#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
1301#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
1302#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
1303#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
1304#define FRF_AZ_TX_DMAQ_ST_LBN 78
1305#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
1306#define FRF_AZ_TX_RX_SPACER_LBN 64
1307#define FRF_AZ_TX_RX_SPACER_WIDTH 8
1308#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
1309#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
1310#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
1311#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
1312#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
1313#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
1314#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
1315#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
1316#define FRF_AZ_TX_XP_TIMER_LBN 52
1317#define FRF_AZ_TX_XP_TIMER_WIDTH 5
1318#define FRF_AZ_TX_PREF_SPACER_LBN 44
1319#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
1320#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
1321#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
1322#define FRF_AZ_TX_ONLY1TAG_LBN 21
1323#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
1324#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
1325#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
1326#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
1327#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
1328#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
1329#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
1330#define FRF_AA_TX_DMA_FF_THR_LBN 16
1331#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
1332#define FRF_AZ_TX_DMA_SPACER_LBN 8
1333#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
1334#define FRF_AA_TX_TCP_DIS_LBN 7
1335#define FRF_AA_TX_TCP_DIS_WIDTH 1
1336#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
1337#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
1338#define FRF_AA_TX_IP_DIS_LBN 6
1339#define FRF_AA_TX_IP_DIS_WIDTH 1
1340#define FRF_AZ_TX_MAX_CPL_LBN 2
1341#define FRF_AZ_TX_MAX_CPL_WIDTH 2
1342#define FFE_AZ_TX_MAX_CPL_16 3
1343#define FFE_AZ_TX_MAX_CPL_8 2
1344#define FFE_AZ_TX_MAX_CPL_4 1
1345#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
1346#define FRF_AZ_TX_MAX_PREF_LBN 0
1347#define FRF_AZ_TX_MAX_PREF_WIDTH 2
1348#define FFE_AZ_TX_MAX_PREF_32 3
1349#define FFE_AZ_TX_MAX_PREF_16 2
1350#define FFE_AZ_TX_MAX_PREF_8 1
1351#define FFE_AZ_TX_MAX_PREF_OFF 0
1352
1353/* TX_PACE_REG: Transmit pace control register */
1354#define FR_BZ_TX_PACE 0x00000a90
1355#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
1356#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
1357#define FRF_BZ_TX_PACE_SB_AF_LBN 9
1358#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10
1359#define FRF_BZ_TX_PACE_FB_BASE_LBN 5
1360#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
1361#define FRF_BZ_TX_PACE_BIN_TH_LBN 0
1362#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
1363
1364/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
1365#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0
1366#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
1367#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
1368
1369/* TX_VLAN_REG: Transmit VLAN tag register */
1370#define FR_BB_TX_VLAN 0x00000ae0
1371#define FRF_BB_TX_VLAN_EN_LBN 127
1372#define FRF_BB_TX_VLAN_EN_WIDTH 1
1373#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
1374#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
1375#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
1376#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
1377#define FRF_BB_TX_VLAN7_LBN 112
1378#define FRF_BB_TX_VLAN7_WIDTH 12
1379#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
1380#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
1381#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
1382#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
1383#define FRF_BB_TX_VLAN6_LBN 96
1384#define FRF_BB_TX_VLAN6_WIDTH 12
1385#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
1386#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
1387#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
1388#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
1389#define FRF_BB_TX_VLAN5_LBN 80
1390#define FRF_BB_TX_VLAN5_WIDTH 12
1391#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
1392#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
1393#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
1394#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
1395#define FRF_BB_TX_VLAN4_LBN 64
1396#define FRF_BB_TX_VLAN4_WIDTH 12
1397#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
1398#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
1399#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
1400#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
1401#define FRF_BB_TX_VLAN3_LBN 48
1402#define FRF_BB_TX_VLAN3_WIDTH 12
1403#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
1404#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
1405#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
1406#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
1407#define FRF_BB_TX_VLAN2_LBN 32
1408#define FRF_BB_TX_VLAN2_WIDTH 12
1409#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
1410#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
1411#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
1412#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
1413#define FRF_BB_TX_VLAN1_LBN 16
1414#define FRF_BB_TX_VLAN1_WIDTH 12
1415#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
1416#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
1417#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
1418#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
1419#define FRF_BB_TX_VLAN0_LBN 0
1420#define FRF_BB_TX_VLAN0_WIDTH 12
1421
1422/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
1423#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0
1424#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64
1425#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
1426#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
1427#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
1428#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
1429#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
1430#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
1431#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
1432#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
1433#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
1434#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
1435#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
1436#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
1437#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
1438#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
1439#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
1440#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
1441#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
1442#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
1443#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
1444#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
1445#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
1446#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
1447#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
1448#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
1449#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
1450#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
1451#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
1452#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
1453#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
1454#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
1455#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
1456#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
1457#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
1458#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
1459#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
1460#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
1461#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
1462#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
1463#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
1464#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
1465#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
1466#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
1467#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
1468#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
1469#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
1470#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
1471#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
1472#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
1473#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
1474#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
1475#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
1476#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
1477#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
1478#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
1479#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
1480#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
1481#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
1482#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
1483#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
1484#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
1485#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
1486#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
1487#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
1488#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
1489#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
1490
1491/* TX_IPFIL_TBL: Transmit IP source address filter table */
1492#define FR_BB_TX_IPFIL_TBL 0x00000b00
1493#define FR_BB_TX_IPFIL_TBL_STEP 16
1494#define FR_BB_TX_IPFIL_TBL_ROWS 16
1495#define FRF_BB_TX_IPFIL_MASK_1_LBN 96
1496#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
1497#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64
1498#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
1499#define FRF_BB_TX_IPFIL_MASK_0_LBN 32
1500#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
1501#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0
1502#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
1503
1504/* MD_TXD_REG: PHY management transmit data register */
1505#define FR_AB_MD_TXD 0x00000c00
1506#define FRF_AB_MD_TXD_LBN 0
1507#define FRF_AB_MD_TXD_WIDTH 16
1508
1509/* MD_RXD_REG: PHY management receive data register */
1510#define FR_AB_MD_RXD 0x00000c10
1511#define FRF_AB_MD_RXD_LBN 0
1512#define FRF_AB_MD_RXD_WIDTH 16
1513
1514/* MD_CS_REG: PHY management configuration & status register */
1515#define FR_AB_MD_CS 0x00000c20
1516#define FRF_AB_MD_RD_EN_CMD_LBN 15
1517#define FRF_AB_MD_RD_EN_CMD_WIDTH 1
1518#define FRF_AB_MD_WR_EN_CMD_LBN 14
1519#define FRF_AB_MD_WR_EN_CMD_WIDTH 1
1520#define FRF_AB_MD_ADDR_CMD_LBN 13
1521#define FRF_AB_MD_ADDR_CMD_WIDTH 1
1522#define FRF_AB_MD_PT_LBN 7
1523#define FRF_AB_MD_PT_WIDTH 3
1524#define FRF_AB_MD_PL_LBN 6
1525#define FRF_AB_MD_PL_WIDTH 1
1526#define FRF_AB_MD_INT_CLR_LBN 5
1527#define FRF_AB_MD_INT_CLR_WIDTH 1
1528#define FRF_AB_MD_GC_LBN 4
1529#define FRF_AB_MD_GC_WIDTH 1
1530#define FRF_AB_MD_PRSP_LBN 3
1531#define FRF_AB_MD_PRSP_WIDTH 1
1532#define FRF_AB_MD_RIC_LBN 2
1533#define FRF_AB_MD_RIC_WIDTH 1
1534#define FRF_AB_MD_RDC_LBN 1
1535#define FRF_AB_MD_RDC_WIDTH 1
1536#define FRF_AB_MD_WRC_LBN 0
1537#define FRF_AB_MD_WRC_WIDTH 1
1538
1539/* MD_PHY_ADR_REG: PHY management PHY address register */
1540#define FR_AB_MD_PHY_ADR 0x00000c30
1541#define FRF_AB_MD_PHY_ADR_LBN 0
1542#define FRF_AB_MD_PHY_ADR_WIDTH 16
1543
1544/* MD_ID_REG: PHY management ID register */
1545#define FR_AB_MD_ID 0x00000c40
1546#define FRF_AB_MD_PRT_ADR_LBN 11
1547#define FRF_AB_MD_PRT_ADR_WIDTH 5
1548#define FRF_AB_MD_DEV_ADR_LBN 6
1549#define FRF_AB_MD_DEV_ADR_WIDTH 5
1550
1551/* MD_STAT_REG: PHY management status & mask register */
1552#define FR_AB_MD_STAT 0x00000c50
1553#define FRF_AB_MD_PINT_LBN 4
1554#define FRF_AB_MD_PINT_WIDTH 1
1555#define FRF_AB_MD_DONE_LBN 3
1556#define FRF_AB_MD_DONE_WIDTH 1
1557#define FRF_AB_MD_BSERR_LBN 2
1558#define FRF_AB_MD_BSERR_WIDTH 1
1559#define FRF_AB_MD_LNFL_LBN 1
1560#define FRF_AB_MD_LNFL_WIDTH 1
1561#define FRF_AB_MD_BSY_LBN 0
1562#define FRF_AB_MD_BSY_WIDTH 1
1563
1564/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
1565#define FR_AB_MAC_STAT_DMA 0x00000c60
1566#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
1567#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
1568#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
1569#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
1570
1571/* MAC_CTRL_REG: Port MAC control register */
1572#define FR_AB_MAC_CTRL 0x00000c80
1573#define FRF_AB_MAC_XOFF_VAL_LBN 16
1574#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
1575#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
1576#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
1577#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
1578#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
1579#define FRF_AB_MAC_BCAD_ACPT_LBN 4
1580#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
1581#define FRF_AB_MAC_UC_PROM_LBN 3
1582#define FRF_AB_MAC_UC_PROM_WIDTH 1
1583#define FRF_AB_MAC_LINK_STATUS_LBN 2
1584#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
1585#define FRF_AB_MAC_SPEED_LBN 0
1586#define FRF_AB_MAC_SPEED_WIDTH 2
1587#define FFE_AB_MAC_SPEED_10G 3
1588#define FFE_AB_MAC_SPEED_1G 2
1589#define FFE_AB_MAC_SPEED_100M 1
1590#define FFE_AB_MAC_SPEED_10M 0
1591
1592/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
1593#define FR_BB_GEN_MODE 0x00000c90
1594#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
1595#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
1596#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
1597#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
1598#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
1599#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
1600#define FRF_BB_XG_PHY_INT_MASK_LBN 0
1601#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
1602
1603/* MAC_MC_HASH_REG0: Multicast address hash table */
1604#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0
1605#define FRF_AB_MAC_MCAST_HASH0_LBN 0
1606#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
1607
1608/* MAC_MC_HASH_REG1: Multicast address hash table */
1609#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0
1610#define FRF_AB_MAC_MCAST_HASH1_LBN 0
1611#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
1612
1613/* GM_CFG1_REG: GMAC configuration register 1 */
1614#define FR_AB_GM_CFG1 0x00000e00
1615#define FRF_AB_GM_SW_RST_LBN 31
1616#define FRF_AB_GM_SW_RST_WIDTH 1
1617#define FRF_AB_GM_SIM_RST_LBN 30
1618#define FRF_AB_GM_SIM_RST_WIDTH 1
1619#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
1620#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
1621#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
1622#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
1623#define FRF_AB_GM_RST_RX_FUNC_LBN 17
1624#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
1625#define FRF_AB_GM_RST_TX_FUNC_LBN 16
1626#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
1627#define FRF_AB_GM_LOOP_LBN 8
1628#define FRF_AB_GM_LOOP_WIDTH 1
1629#define FRF_AB_GM_RX_FC_EN_LBN 5
1630#define FRF_AB_GM_RX_FC_EN_WIDTH 1
1631#define FRF_AB_GM_TX_FC_EN_LBN 4
1632#define FRF_AB_GM_TX_FC_EN_WIDTH 1
1633#define FRF_AB_GM_SYNC_RXEN_LBN 3
1634#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
1635#define FRF_AB_GM_RX_EN_LBN 2
1636#define FRF_AB_GM_RX_EN_WIDTH 1
1637#define FRF_AB_GM_SYNC_TXEN_LBN 1
1638#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
1639#define FRF_AB_GM_TX_EN_LBN 0
1640#define FRF_AB_GM_TX_EN_WIDTH 1
1641
1642/* GM_CFG2_REG: GMAC configuration register 2 */
1643#define FR_AB_GM_CFG2 0x00000e10
1644#define FRF_AB_GM_PAMBL_LEN_LBN 12
1645#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
1646#define FRF_AB_GM_IF_MODE_LBN 8
1647#define FRF_AB_GM_IF_MODE_WIDTH 2
1648#define FFE_AB_IF_MODE_BYTE_MODE 2
1649#define FFE_AB_IF_MODE_NIBBLE_MODE 1
1650#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
1651#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
1652#define FRF_AB_GM_LEN_CHK_LBN 4
1653#define FRF_AB_GM_LEN_CHK_WIDTH 1
1654#define FRF_AB_GM_PAD_CRC_EN_LBN 2
1655#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
1656#define FRF_AB_GM_CRC_EN_LBN 1
1657#define FRF_AB_GM_CRC_EN_WIDTH 1
1658#define FRF_AB_GM_FD_LBN 0
1659#define FRF_AB_GM_FD_WIDTH 1
1660
1661/* GM_IPG_REG: GMAC IPG register */
1662#define FR_AB_GM_IPG 0x00000e20
1663#define FRF_AB_GM_NONB2B_IPG1_LBN 24
1664#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
1665#define FRF_AB_GM_NONB2B_IPG2_LBN 16
1666#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
1667#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
1668#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
1669#define FRF_AB_GM_B2B_IPG_LBN 0
1670#define FRF_AB_GM_B2B_IPG_WIDTH 7
1671
1672/* GM_HD_REG: GMAC half duplex register */
1673#define FR_AB_GM_HD 0x00000e30
1674#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
1675#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
1676#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
1677#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
1678#define FRF_AB_GM_BP_NO_BOFF_LBN 18
1679#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
1680#define FRF_AB_GM_DIS_BOFF_LBN 17
1681#define FRF_AB_GM_DIS_BOFF_WIDTH 1
1682#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
1683#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
1684#define FRF_AB_GM_RTRY_LIMIT_LBN 12
1685#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
1686#define FRF_AB_GM_COL_WIN_LBN 0
1687#define FRF_AB_GM_COL_WIN_WIDTH 10
1688
1689/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
1690#define FR_AB_GM_MAX_FLEN 0x00000e40
1691#define FRF_AB_GM_MAX_FLEN_LBN 0
1692#define FRF_AB_GM_MAX_FLEN_WIDTH 16
1693
1694/* GM_TEST_REG: GMAC test register */
1695#define FR_AB_GM_TEST 0x00000e70
1696#define FRF_AB_GM_MAX_BOFF_LBN 3
1697#define FRF_AB_GM_MAX_BOFF_WIDTH 1
1698#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
1699#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
1700#define FRF_AB_GM_TEST_PAUSE_LBN 1
1701#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
1702#define FRF_AB_GM_SHORT_SLOT_LBN 0
1703#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
1704
1705/* GM_ADR1_REG: GMAC station address register 1 */
1706#define FR_AB_GM_ADR1 0x00000f00
1707#define FRF_AB_GM_ADR_B0_LBN 24
1708#define FRF_AB_GM_ADR_B0_WIDTH 8
1709#define FRF_AB_GM_ADR_B1_LBN 16
1710#define FRF_AB_GM_ADR_B1_WIDTH 8
1711#define FRF_AB_GM_ADR_B2_LBN 8
1712#define FRF_AB_GM_ADR_B2_WIDTH 8
1713#define FRF_AB_GM_ADR_B3_LBN 0
1714#define FRF_AB_GM_ADR_B3_WIDTH 8
1715
1716/* GM_ADR2_REG: GMAC station address register 2 */
1717#define FR_AB_GM_ADR2 0x00000f10
1718#define FRF_AB_GM_ADR_B4_LBN 24
1719#define FRF_AB_GM_ADR_B4_WIDTH 8
1720#define FRF_AB_GM_ADR_B5_LBN 16
1721#define FRF_AB_GM_ADR_B5_WIDTH 8
1722
1723/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
1724#define FR_AB_GMF_CFG0 0x00000f20
1725#define FRF_AB_GMF_FTFENRPLY_LBN 20
1726#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
1727#define FRF_AB_GMF_STFENRPLY_LBN 19
1728#define FRF_AB_GMF_STFENRPLY_WIDTH 1
1729#define FRF_AB_GMF_FRFENRPLY_LBN 18
1730#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
1731#define FRF_AB_GMF_SRFENRPLY_LBN 17
1732#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
1733#define FRF_AB_GMF_WTMENRPLY_LBN 16
1734#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
1735#define FRF_AB_GMF_FTFENREQ_LBN 12
1736#define FRF_AB_GMF_FTFENREQ_WIDTH 1
1737#define FRF_AB_GMF_STFENREQ_LBN 11
1738#define FRF_AB_GMF_STFENREQ_WIDTH 1
1739#define FRF_AB_GMF_FRFENREQ_LBN 10
1740#define FRF_AB_GMF_FRFENREQ_WIDTH 1
1741#define FRF_AB_GMF_SRFENREQ_LBN 9
1742#define FRF_AB_GMF_SRFENREQ_WIDTH 1
1743#define FRF_AB_GMF_WTMENREQ_LBN 8
1744#define FRF_AB_GMF_WTMENREQ_WIDTH 1
1745#define FRF_AB_GMF_HSTRSTFT_LBN 4
1746#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
1747#define FRF_AB_GMF_HSTRSTST_LBN 3
1748#define FRF_AB_GMF_HSTRSTST_WIDTH 1
1749#define FRF_AB_GMF_HSTRSTFR_LBN 2
1750#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
1751#define FRF_AB_GMF_HSTRSTSR_LBN 1
1752#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
1753#define FRF_AB_GMF_HSTRSTWT_LBN 0
1754#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
1755
1756/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
1757#define FR_AB_GMF_CFG1 0x00000f30
1758#define FRF_AB_GMF_CFGFRTH_LBN 16
1759#define FRF_AB_GMF_CFGFRTH_WIDTH 5
1760#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
1761#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
1762
1763/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
1764#define FR_AB_GMF_CFG2 0x00000f40
1765#define FRF_AB_GMF_CFGHWM_LBN 16
1766#define FRF_AB_GMF_CFGHWM_WIDTH 6
1767#define FRF_AB_GMF_CFGLWM_LBN 0
1768#define FRF_AB_GMF_CFGLWM_WIDTH 6
1769
1770/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
1771#define FR_AB_GMF_CFG3 0x00000f50
1772#define FRF_AB_GMF_CFGHWMFT_LBN 16
1773#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
1774#define FRF_AB_GMF_CFGFTTH_LBN 0
1775#define FRF_AB_GMF_CFGFTTH_WIDTH 6
1776
1777/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
1778#define FR_AB_GMF_CFG4 0x00000f60
1779#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
1780#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
1781
1782/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
1783#define FR_AB_GMF_CFG5 0x00000f70
1784#define FRF_AB_GMF_CFGHDPLX_LBN 22
1785#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
1786#define FRF_AB_GMF_SRFULL_LBN 21
1787#define FRF_AB_GMF_SRFULL_WIDTH 1
1788#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
1789#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
1790#define FRF_AB_GMF_CFGBYTMODE_LBN 19
1791#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
1792#define FRF_AB_GMF_HSTDRPLT64_LBN 18
1793#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
1794#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
1795#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
1796
1797/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
1798#define FR_BB_TX_SRC_MAC_TBL 0x00001000
1799#define FR_BB_TX_SRC_MAC_TBL_STEP 16
1800#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
1801#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
1802#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
1803#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
1804#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
1805
1806/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
1807#define FR_BB_TX_SRC_MAC_CTL 0x00001100
1808#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
1809#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
1810#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
1811#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
1812#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
1813#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
1814#define FRF_BB_TX_MAC_QID_SEL_LBN 0
1815#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
1816
1817/* XM_ADR_LO_REG: XGMAC address register low */
1818#define FR_AB_XM_ADR_LO 0x00001200
1819#define FRF_AB_XM_ADR_LO_LBN 0
1820#define FRF_AB_XM_ADR_LO_WIDTH 32
1821
1822/* XM_ADR_HI_REG: XGMAC address register high */
1823#define FR_AB_XM_ADR_HI 0x00001210
1824#define FRF_AB_XM_ADR_HI_LBN 0
1825#define FRF_AB_XM_ADR_HI_WIDTH 16
1826
1827/* XM_GLB_CFG_REG: XGMAC global configuration */
1828#define FR_AB_XM_GLB_CFG 0x00001220
1829#define FRF_AB_XM_RMTFLT_GEN_LBN 17
1830#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
1831#define FRF_AB_XM_DEBUG_MODE_LBN 16
1832#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
1833#define FRF_AB_XM_RX_STAT_EN_LBN 11
1834#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
1835#define FRF_AB_XM_TX_STAT_EN_LBN 10
1836#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
1837#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
1838#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
1839#define FRF_AB_XM_WAN_MODE_LBN 5
1840#define FRF_AB_XM_WAN_MODE_WIDTH 1
1841#define FRF_AB_XM_INTCLR_MODE_LBN 3
1842#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
1843#define FRF_AB_XM_CORE_RST_LBN 0
1844#define FRF_AB_XM_CORE_RST_WIDTH 1
1845
1846/* XM_TX_CFG_REG: XGMAC transmit configuration */
1847#define FR_AB_XM_TX_CFG 0x00001230
1848#define FRF_AB_XM_TX_PROG_LBN 24
1849#define FRF_AB_XM_TX_PROG_WIDTH 1
1850#define FRF_AB_XM_IPG_LBN 16
1851#define FRF_AB_XM_IPG_WIDTH 4
1852#define FRF_AB_XM_FCNTL_LBN 10
1853#define FRF_AB_XM_FCNTL_WIDTH 1
1854#define FRF_AB_XM_TXCRC_LBN 8
1855#define FRF_AB_XM_TXCRC_WIDTH 1
1856#define FRF_AB_XM_EDRC_LBN 6
1857#define FRF_AB_XM_EDRC_WIDTH 1
1858#define FRF_AB_XM_AUTO_PAD_LBN 5
1859#define FRF_AB_XM_AUTO_PAD_WIDTH 1
1860#define FRF_AB_XM_TX_PRMBL_LBN 2
1861#define FRF_AB_XM_TX_PRMBL_WIDTH 1
1862#define FRF_AB_XM_TXEN_LBN 1
1863#define FRF_AB_XM_TXEN_WIDTH 1
1864#define FRF_AB_XM_TX_RST_LBN 0
1865#define FRF_AB_XM_TX_RST_WIDTH 1
1866
1867/* XM_RX_CFG_REG: XGMAC receive configuration */
1868#define FR_AB_XM_RX_CFG 0x00001240
1869#define FRF_AB_XM_PASS_LENERR_LBN 26
1870#define FRF_AB_XM_PASS_LENERR_WIDTH 1
1871#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
1872#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
1873#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
1874#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
1875#define FRF_AB_XM_REJ_BCAST_LBN 20
1876#define FRF_AB_XM_REJ_BCAST_WIDTH 1
1877#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
1878#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
1879#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
1880#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
1881#define FRF_AB_XM_AUTO_DEPAD_LBN 8
1882#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
1883#define FRF_AB_XM_RXCRC_LBN 3
1884#define FRF_AB_XM_RXCRC_WIDTH 1
1885#define FRF_AB_XM_RX_PRMBL_LBN 2
1886#define FRF_AB_XM_RX_PRMBL_WIDTH 1
1887#define FRF_AB_XM_RXEN_LBN 1
1888#define FRF_AB_XM_RXEN_WIDTH 1
1889#define FRF_AB_XM_RX_RST_LBN 0
1890#define FRF_AB_XM_RX_RST_WIDTH 1
1891
1892/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
1893#define FR_AB_XM_MGT_INT_MASK 0x00001250
1894#define FRF_AB_XM_MSK_STA_INTR_LBN 16
1895#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
1896#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
1897#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
1898#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
1899#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
1900#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
1901#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
1902#define FRF_AB_XM_MSK_RMTFLT_LBN 1
1903#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
1904#define FRF_AB_XM_MSK_LCLFLT_LBN 0
1905#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
1906
1907/* XM_FC_REG: XGMAC flow control register */
1908#define FR_AB_XM_FC 0x00001270
1909#define FRF_AB_XM_PAUSE_TIME_LBN 16
1910#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
1911#define FRF_AB_XM_RX_MAC_STAT_LBN 11
1912#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
1913#define FRF_AB_XM_TX_MAC_STAT_LBN 10
1914#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
1915#define FRF_AB_XM_MCNTL_PASS_LBN 8
1916#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
1917#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
1918#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
1919#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
1920#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
1921#define FRF_AB_XM_ZPAUSE_LBN 2
1922#define FRF_AB_XM_ZPAUSE_WIDTH 1
1923#define FRF_AB_XM_XMIT_PAUSE_LBN 1
1924#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
1925#define FRF_AB_XM_DIS_FCNTL_LBN 0
1926#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
1927
1928/* XM_PAUSE_TIME_REG: XGMAC pause time register */
1929#define FR_AB_XM_PAUSE_TIME 0x00001290
1930#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
1931#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
1932#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
1933#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
1934
1935/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
1936#define FR_AB_XM_TX_PARAM 0x000012d0
1937#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
1938#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
1939#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
1940#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
1941#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
1942#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
1943#define FRF_AB_XM_PAD_CHAR_LBN 0
1944#define FRF_AB_XM_PAD_CHAR_WIDTH 8
1945
1946/* XM_RX_PARAM_REG: XGMAC receive parameter register */
1947#define FR_AB_XM_RX_PARAM 0x000012e0
1948#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
1949#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
1950#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
1951#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
1952
1953/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
1954#define FR_AB_XM_MGT_INT_MSK 0x000012f0
1955#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
1956#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
1957#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
1958#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
1959#define FRF_AB_XM_PRMBLE_ERR_LBN 2
1960#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
1961#define FRF_AB_XM_RMTFLT_LBN 1
1962#define FRF_AB_XM_RMTFLT_WIDTH 1
1963#define FRF_AB_XM_LCLFLT_LBN 0
1964#define FRF_AB_XM_LCLFLT_WIDTH 1
1965
1966/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
1967#define FR_AB_XX_PWR_RST 0x00001300
1968#define FRF_AB_XX_PWRDND_SIG_LBN 31
1969#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
1970#define FRF_AB_XX_PWRDNC_SIG_LBN 30
1971#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
1972#define FRF_AB_XX_PWRDNB_SIG_LBN 29
1973#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
1974#define FRF_AB_XX_PWRDNA_SIG_LBN 28
1975#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
1976#define FRF_AB_XX_SIM_MODE_LBN 27
1977#define FRF_AB_XX_SIM_MODE_WIDTH 1
1978#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
1979#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
1980#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
1981#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
1982#define FRF_AB_XX_RESETD_SIG_LBN 23
1983#define FRF_AB_XX_RESETD_SIG_WIDTH 1
1984#define FRF_AB_XX_RESETC_SIG_LBN 22
1985#define FRF_AB_XX_RESETC_SIG_WIDTH 1
1986#define FRF_AB_XX_RESETB_SIG_LBN 21
1987#define FRF_AB_XX_RESETB_SIG_WIDTH 1
1988#define FRF_AB_XX_RESETA_SIG_LBN 20
1989#define FRF_AB_XX_RESETA_SIG_WIDTH 1
1990#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
1991#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
1992#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
1993#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
1994#define FRF_AB_XX_SD_RST_ACT_LBN 16
1995#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
1996#define FRF_AB_XX_PWRDND_EN_LBN 15
1997#define FRF_AB_XX_PWRDND_EN_WIDTH 1
1998#define FRF_AB_XX_PWRDNC_EN_LBN 14
1999#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
2000#define FRF_AB_XX_PWRDNB_EN_LBN 13
2001#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
2002#define FRF_AB_XX_PWRDNA_EN_LBN 12
2003#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
2004#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
2005#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
2006#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
2007#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
2008#define FRF_AB_XX_RESETD_EN_LBN 7
2009#define FRF_AB_XX_RESETD_EN_WIDTH 1
2010#define FRF_AB_XX_RESETC_EN_LBN 6
2011#define FRF_AB_XX_RESETC_EN_WIDTH 1
2012#define FRF_AB_XX_RESETB_EN_LBN 5
2013#define FRF_AB_XX_RESETB_EN_WIDTH 1
2014#define FRF_AB_XX_RESETA_EN_LBN 4
2015#define FRF_AB_XX_RESETA_EN_WIDTH 1
2016#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
2017#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
2018#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
2019#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
2020#define FRF_AB_XX_RST_XX_EN_LBN 0
2021#define FRF_AB_XX_RST_XX_EN_WIDTH 1
2022
2023/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
2024#define FR_AB_XX_SD_CTL 0x00001310
2025#define FRF_AB_XX_TERMADJ1_LBN 17
2026#define FRF_AB_XX_TERMADJ1_WIDTH 1
2027#define FRF_AB_XX_TERMADJ0_LBN 16
2028#define FRF_AB_XX_TERMADJ0_WIDTH 1
2029#define FRF_AB_XX_HIDRVD_LBN 15
2030#define FRF_AB_XX_HIDRVD_WIDTH 1
2031#define FRF_AB_XX_LODRVD_LBN 14
2032#define FRF_AB_XX_LODRVD_WIDTH 1
2033#define FRF_AB_XX_HIDRVC_LBN 13
2034#define FRF_AB_XX_HIDRVC_WIDTH 1
2035#define FRF_AB_XX_LODRVC_LBN 12
2036#define FRF_AB_XX_LODRVC_WIDTH 1
2037#define FRF_AB_XX_HIDRVB_LBN 11
2038#define FRF_AB_XX_HIDRVB_WIDTH 1
2039#define FRF_AB_XX_LODRVB_LBN 10
2040#define FRF_AB_XX_LODRVB_WIDTH 1
2041#define FRF_AB_XX_HIDRVA_LBN 9
2042#define FRF_AB_XX_HIDRVA_WIDTH 1
2043#define FRF_AB_XX_LODRVA_LBN 8
2044#define FRF_AB_XX_LODRVA_WIDTH 1
2045#define FRF_AB_XX_LPBKD_LBN 3
2046#define FRF_AB_XX_LPBKD_WIDTH 1
2047#define FRF_AB_XX_LPBKC_LBN 2
2048#define FRF_AB_XX_LPBKC_WIDTH 1
2049#define FRF_AB_XX_LPBKB_LBN 1
2050#define FRF_AB_XX_LPBKB_WIDTH 1
2051#define FRF_AB_XX_LPBKA_LBN 0
2052#define FRF_AB_XX_LPBKA_WIDTH 1
2053
2054/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
2055#define FR_AB_XX_TXDRV_CTL 0x00001320
2056#define FRF_AB_XX_DEQD_LBN 28
2057#define FRF_AB_XX_DEQD_WIDTH 4
2058#define FRF_AB_XX_DEQC_LBN 24
2059#define FRF_AB_XX_DEQC_WIDTH 4
2060#define FRF_AB_XX_DEQB_LBN 20
2061#define FRF_AB_XX_DEQB_WIDTH 4
2062#define FRF_AB_XX_DEQA_LBN 16
2063#define FRF_AB_XX_DEQA_WIDTH 4
2064#define FRF_AB_XX_DTXD_LBN 12
2065#define FRF_AB_XX_DTXD_WIDTH 4
2066#define FRF_AB_XX_DTXC_LBN 8
2067#define FRF_AB_XX_DTXC_WIDTH 4
2068#define FRF_AB_XX_DTXB_LBN 4
2069#define FRF_AB_XX_DTXB_WIDTH 4
2070#define FRF_AB_XX_DTXA_LBN 0
2071#define FRF_AB_XX_DTXA_WIDTH 4
2072
2073/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
2074#define FR_AB_XX_PRBS_CTL 0x00001330
2075#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
2076#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
2077#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
2078#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
2079#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
2080#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
2081#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
2082#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
2083#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
2084#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
2085#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
2086#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
2087#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
2088#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
2089#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
2090#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
2091#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
2092#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
2093#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
2094#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
2095#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
2096#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
2097#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
2098#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
2099#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
2100#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
2101#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
2102#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
2103#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
2104#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
2105#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
2106#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
2107#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
2108#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
2109#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
2110#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
2111#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
2112#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
2113#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
2114#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
2115#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
2116#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
2117#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
2118#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
2119#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
2120#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
2121#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
2122#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
2123
2124/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
2125#define FR_AB_XX_PRBS_CHK 0x00001340
2126#define FRF_AB_XX_REV_LB_EN_LBN 16
2127#define FRF_AB_XX_REV_LB_EN_WIDTH 1
2128#define FRF_AB_XX_CH3_DEG_DET_LBN 15
2129#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
2130#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
2131#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
2132#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
2133#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
2134#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
2135#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
2136#define FRF_AB_XX_CH2_DEG_DET_LBN 11
2137#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
2138#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
2139#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
2140#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
2141#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
2142#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
2143#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
2144#define FRF_AB_XX_CH1_DEG_DET_LBN 7
2145#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
2146#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
2147#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
2148#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
2149#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
2150#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
2151#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
2152#define FRF_AB_XX_CH0_DEG_DET_LBN 3
2153#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
2154#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
2155#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
2156#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
2157#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
2158#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
2159#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
2160
2161/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
2162#define FR_AB_XX_PRBS_ERR 0x00001350
2163#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
2164#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
2165#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
2166#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
2167#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
2168#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
2169#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
2170#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
2171
2172/* XX_CORE_STAT_REG: XAUI XGXS core status register */
2173#define FR_AB_XX_CORE_STAT 0x00001360
2174#define FRF_AB_XX_FORCE_SIG3_LBN 31
2175#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
2176#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
2177#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
2178#define FRF_AB_XX_FORCE_SIG2_LBN 29
2179#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
2180#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
2181#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
2182#define FRF_AB_XX_FORCE_SIG1_LBN 27
2183#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
2184#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
2185#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
2186#define FRF_AB_XX_FORCE_SIG0_LBN 25
2187#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
2188#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
2189#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
2190#define FRF_AB_XX_XGXS_LB_EN_LBN 23
2191#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
2192#define FRF_AB_XX_XGMII_LB_EN_LBN 22
2193#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
2194#define FRF_AB_XX_MATCH_FAULT_LBN 21
2195#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
2196#define FRF_AB_XX_ALIGN_DONE_LBN 20
2197#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
2198#define FRF_AB_XX_SYNC_STAT3_LBN 19
2199#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
2200#define FRF_AB_XX_SYNC_STAT2_LBN 18
2201#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
2202#define FRF_AB_XX_SYNC_STAT1_LBN 17
2203#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
2204#define FRF_AB_XX_SYNC_STAT0_LBN 16
2205#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
2206#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
2207#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
2208#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
2209#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
2210#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
2211#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
2212#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
2213#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
2214#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
2215#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
2216#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
2217#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
2218#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
2219#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
2220#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
2221#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
2222#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
2223#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
2224#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
2225#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
2226#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
2227#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
2228#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
2229#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
2230#define FRF_AB_XX_DISPERR_CH3_LBN 3
2231#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
2232#define FRF_AB_XX_DISPERR_CH2_LBN 2
2233#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
2234#define FRF_AB_XX_DISPERR_CH1_LBN 1
2235#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
2236#define FRF_AB_XX_DISPERR_CH0_LBN 0
2237#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
2238
2239/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
2240#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
2241#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
2242#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
2243/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
2244#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000
2245#define FR_BZ_RX_DESC_PTR_TBL_STEP 16
2246#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096
2247#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
2248#define FRF_CZ_RX_HDR_SPLIT_LBN 90
2249#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
2250#define FRF_AA_RX_RESET_LBN 89
2251#define FRF_AA_RX_RESET_WIDTH 1
2252#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
2253#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
2254#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
2255#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
2256#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
2257#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
2258#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
2259#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
2260#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
2261#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
2262#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
2263#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
2264#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
2265#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
2266#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
2267#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
2268#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
2269#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
2270#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
2271#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
2272#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
2273#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
2274#define FFE_AZ_RX_DESCQ_SIZE_4K 3
2275#define FFE_AZ_RX_DESCQ_SIZE_2K 2
2276#define FFE_AZ_RX_DESCQ_SIZE_1K 1
2277#define FFE_AZ_RX_DESCQ_SIZE_512 0
2278#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
2279#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
2280#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
2281#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
2282#define FRF_AZ_RX_DESCQ_EN_LBN 0
2283#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
2284
2285/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
2286#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
2287#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
2288#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
2289/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
2290#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000
2291#define FR_BZ_TX_DESC_PTR_TBL_STEP 16
2292#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096
2293#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
2294#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
2295#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
2296#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
2297#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
2298#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
2299#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
2300#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
2301#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
2302#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
2303#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
2304#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
2305#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
2306#define FRF_AZ_TX_DESCQ_EN_LBN 88
2307#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
2308#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
2309#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
2310#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
2311#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
2312#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
2313#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
2314#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
2315#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
2316#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
2317#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
2318#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
2319#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
2320#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
2321#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
2322#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
2323#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
2324#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
2325#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
2326#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
2327#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
2328#define FFE_AZ_TX_DESCQ_SIZE_4K 3
2329#define FFE_AZ_TX_DESCQ_SIZE_2K 2
2330#define FFE_AZ_TX_DESCQ_SIZE_1K 1
2331#define FFE_AZ_TX_DESCQ_SIZE_512 0
2332#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
2333#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
2334#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
2335#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
2336
2337/* EVQ_PTR_TBL_KER: Event queue pointer table */
2338#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00
2339#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
2340#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
2341/* EVQ_PTR_TBL: Event queue pointer table */
2342#define FR_BZ_EVQ_PTR_TBL 0x00f60000
2343#define FR_BZ_EVQ_PTR_TBL_STEP 16
2344#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
2345#define FR_BB_EVQ_PTR_TBL_ROWS 4096
2346#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
2347#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
2348#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
2349#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
2350#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
2351#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
2352#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
2353#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
2354#define FRF_AZ_EVQ_EN_LBN 23
2355#define FRF_AZ_EVQ_EN_WIDTH 1
2356#define FRF_AZ_EVQ_SIZE_LBN 20
2357#define FRF_AZ_EVQ_SIZE_WIDTH 3
2358#define FFE_AZ_EVQ_SIZE_32K 6
2359#define FFE_AZ_EVQ_SIZE_16K 5
2360#define FFE_AZ_EVQ_SIZE_8K 4
2361#define FFE_AZ_EVQ_SIZE_4K 3
2362#define FFE_AZ_EVQ_SIZE_2K 2
2363#define FFE_AZ_EVQ_SIZE_1K 1
2364#define FFE_AZ_EVQ_SIZE_512 0
2365#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
2366#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
2367
2368/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
2369#define FR_AA_BUF_HALF_TBL_KER 0x00018000
2370#define FR_AA_BUF_HALF_TBL_KER_STEP 8
2371#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
2372/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
2373#define FR_BZ_BUF_HALF_TBL 0x00800000
2374#define FR_BZ_BUF_HALF_TBL_STEP 8
2375#define FR_CZ_BUF_HALF_TBL_ROWS 147456
2376#define FR_BB_BUF_HALF_TBL_ROWS 524288
2377#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
2378#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
2379#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
2380#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
2381#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
2382#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
2383#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
2384#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
2385
2386/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
2387#define FR_AA_BUF_FULL_TBL_KER 0x00018000
2388#define FR_AA_BUF_FULL_TBL_KER_STEP 8
2389#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
2390/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
2391#define FR_BZ_BUF_FULL_TBL 0x00800000
2392#define FR_BZ_BUF_FULL_TBL_STEP 8
2393#define FR_CZ_BUF_FULL_TBL_ROWS 147456
2394#define FR_BB_BUF_FULL_TBL_ROWS 917504
2395#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
2396#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
2397#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
2398#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
2399#define FRF_AZ_BUF_ADR_REGION_LBN 48
2400#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
2401#define FFE_AZ_BUF_ADR_REGN3 3
2402#define FFE_AZ_BUF_ADR_REGN2 2
2403#define FFE_AZ_BUF_ADR_REGN1 1
2404#define FFE_AZ_BUF_ADR_REGN0 0
2405#define FRF_AZ_BUF_ADR_FBUF_LBN 14
2406#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
2407#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
2408#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
2409
2410/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
2411#define FR_BZ_RX_FILTER_TBL0 0x00f00000
2412#define FR_BZ_RX_FILTER_TBL0_STEP 32
2413#define FR_BZ_RX_FILTER_TBL0_ROWS 8192
2414/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
2415#define FR_BB_RX_FILTER_TBL1 0x00f00010
2416#define FR_BB_RX_FILTER_TBL1_STEP 32
2417#define FR_BB_RX_FILTER_TBL1_ROWS 8192
2418#define FRF_BZ_RSS_EN_LBN 110
2419#define FRF_BZ_RSS_EN_WIDTH 1
2420#define FRF_BZ_SCATTER_EN_LBN 109
2421#define FRF_BZ_SCATTER_EN_WIDTH 1
2422#define FRF_BZ_TCP_UDP_LBN 108
2423#define FRF_BZ_TCP_UDP_WIDTH 1
2424#define FRF_BZ_RXQ_ID_LBN 96
2425#define FRF_BZ_RXQ_ID_WIDTH 12
2426#define FRF_BZ_DEST_IP_LBN 64
2427#define FRF_BZ_DEST_IP_WIDTH 32
2428#define FRF_BZ_DEST_PORT_TCP_LBN 48
2429#define FRF_BZ_DEST_PORT_TCP_WIDTH 16
2430#define FRF_BZ_SRC_IP_LBN 16
2431#define FRF_BZ_SRC_IP_WIDTH 32
2432#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
2433#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
2434
2435/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
2436#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
2437#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
2438#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
2439#define FRF_CZ_RMFT_RSS_EN_LBN 75
2440#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
2441#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
2442#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
2443#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
2444#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
2445#define FRF_CZ_RMFT_RXQ_ID_LBN 61
2446#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
2447#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
2448#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
2449#define FRF_CZ_RMFT_DEST_MAC_LBN 16
2450#define FRF_CZ_RMFT_DEST_MAC_WIDTH 44
2451#define FRF_CZ_RMFT_VLAN_ID_LBN 0
2452#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
2453
2454/* TIMER_TBL: Timer table */
2455#define FR_BZ_TIMER_TBL 0x00f70000
2456#define FR_BZ_TIMER_TBL_STEP 16
2457#define FR_CZ_TIMER_TBL_ROWS 1024
2458#define FR_BB_TIMER_TBL_ROWS 4096
2459#define FRF_CZ_TIMER_Q_EN_LBN 33
2460#define FRF_CZ_TIMER_Q_EN_WIDTH 1
2461#define FRF_CZ_INT_ARMD_LBN 32
2462#define FRF_CZ_INT_ARMD_WIDTH 1
2463#define FRF_CZ_INT_PEND_LBN 31
2464#define FRF_CZ_INT_PEND_WIDTH 1
2465#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
2466#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
2467#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
2468#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
2469#define FRF_CZ_TIMER_MODE_LBN 14
2470#define FRF_CZ_TIMER_MODE_WIDTH 2
2471#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
2472#define FFE_CZ_TIMER_MODE_TRIG_START 2
2473#define FFE_CZ_TIMER_MODE_IMMED_START 1
2474#define FFE_CZ_TIMER_MODE_DIS 0
2475#define FRF_BB_TIMER_MODE_LBN 12
2476#define FRF_BB_TIMER_MODE_WIDTH 2
2477#define FFE_BB_TIMER_MODE_INT_HLDOFF 2
2478#define FFE_BB_TIMER_MODE_TRIG_START 2
2479#define FFE_BB_TIMER_MODE_IMMED_START 1
2480#define FFE_BB_TIMER_MODE_DIS 0
2481#define FRF_CZ_TIMER_VAL_LBN 0
2482#define FRF_CZ_TIMER_VAL_WIDTH 14
2483#define FRF_BB_TIMER_VAL_LBN 0
2484#define FRF_BB_TIMER_VAL_WIDTH 12
2485
2486/* TX_PACE_TBL: Transmit pacing table */
2487#define FR_BZ_TX_PACE_TBL 0x00f80000
2488#define FR_BZ_TX_PACE_TBL_STEP 16
2489#define FR_CZ_TX_PACE_TBL_ROWS 1024
2490#define FR_BB_TX_PACE_TBL_ROWS 4096
2491#define FRF_BZ_TX_PACE_LBN 0
2492#define FRF_BZ_TX_PACE_WIDTH 5
2493
2494/* RX_INDIRECTION_TBL: RX Indirection Table */
2495#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
2496#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
2497#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
2498#define FRF_BZ_IT_QUEUE_LBN 0
2499#define FRF_BZ_IT_QUEUE_WIDTH 6
2500
2501/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
2502#define FR_CZ_TX_FILTER_TBL0 0x00fc0000
2503#define FR_CZ_TX_FILTER_TBL0_STEP 16
2504#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
2505#define FRF_CZ_TIFT_TCP_UDP_LBN 108
2506#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
2507#define FRF_CZ_TIFT_TXQ_ID_LBN 96
2508#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
2509#define FRF_CZ_TIFT_DEST_IP_LBN 64
2510#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
2511#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
2512#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
2513#define FRF_CZ_TIFT_SRC_IP_LBN 16
2514#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
2515#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
2516#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
2517
2518/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
2519#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
2520#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
2521#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
2522#define FRF_CZ_TMFT_TXQ_ID_LBN 61
2523#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
2524#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
2525#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
2526#define FRF_CZ_TMFT_SRC_MAC_LBN 16
2527#define FRF_CZ_TMFT_SRC_MAC_WIDTH 44
2528#define FRF_CZ_TMFT_VLAN_ID_LBN 0
2529#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
2530
2531/* MC_TREG_SMEM: MC Shared Memory */
2532#define FR_CZ_MC_TREG_SMEM 0x00ff0000
2533#define FR_CZ_MC_TREG_SMEM_STEP 4
2534#define FR_CZ_MC_TREG_SMEM_ROWS 512
2535#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
2536#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
2537
2538/* MSIX_VECTOR_TABLE: MSIX Vector Table */
2539#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
2540#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
2541#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
2542/* MSIX_VECTOR_TABLE: MSIX Vector Table */
2543#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000
2544/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
2545#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
2546#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
2547#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
2548#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
2549#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
2550#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
2551#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
2552#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
2553#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
2554#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
2555#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
2556
2557/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
2558#define FR_BB_MSIX_PBA_TABLE 0x00ff2000
2559#define FR_BZ_MSIX_PBA_TABLE_STEP 4
2560#define FR_BB_MSIX_PBA_TABLE_ROWS 2
2561/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
2562#define FR_CZ_MSIX_PBA_TABLE 0x00008000
2563/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
2564#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
2565#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
2566#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
2567
2568/* SRM_DBG_REG: SRAM debug access */
2569#define FR_BZ_SRM_DBG 0x03000000
2570#define FR_BZ_SRM_DBG_STEP 8
2571#define FR_CZ_SRM_DBG_ROWS 262144
2572#define FR_BB_SRM_DBG_ROWS 2097152
2573#define FRF_BZ_SRM_DBG_LBN 0
2574#define FRF_BZ_SRM_DBG_WIDTH 64
2575
2576/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
2577#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
2578#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
2579#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
2580#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
2581#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
2582
2583/* DRIVER_EV */
2584#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
2585#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
2586#define FSE_BZ_TX_DSC_ERROR_EV 15
2587#define FSE_BZ_RX_DSC_ERROR_EV 14
2588#define FSE_AA_RX_RECOVER_EV 11
2589#define FSE_AZ_TIMER_EV 10
2590#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
2591#define FSE_AZ_WAKE_UP_EV 6
2592#define FSE_AZ_SRM_UPD_DONE_EV 5
2593#define FSE_AB_EVQ_NOT_EN_EV 3
2594#define FSE_AZ_EVQ_INIT_DONE_EV 2
2595#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
2596#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
2597#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
2598#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
2599
2600/* EVENT_ENTRY */
2601#define FSF_AZ_EV_CODE_LBN 60
2602#define FSF_AZ_EV_CODE_WIDTH 4
2603#define FSE_CZ_EV_CODE_MCDI_EV 12
2604#define FSE_CZ_EV_CODE_USER_EV 8
2605#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
2606#define FSE_AZ_EV_CODE_GLOBAL_EV 6
2607#define FSE_AZ_EV_CODE_DRIVER_EV 5
2608#define FSE_AZ_EV_CODE_TX_EV 2
2609#define FSE_AZ_EV_CODE_RX_EV 0
2610#define FSF_AZ_EV_DATA_LBN 0
2611#define FSF_AZ_EV_DATA_WIDTH 60
2612
2613/* GLOBAL_EV */
2614#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
2615#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
2616#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
2617#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
2618#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
2619#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
2620#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
2621#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
2622#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
2623#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
2624#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
2625#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
2626
2627/* LEGACY_INT_VEC */
2628#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
2629#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
2630#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
2631#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
2632#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
2633#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
2634#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
2635#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
2636#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
2637#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
2638
2639/* MC_XGMAC_FLTR_RULE_DEF */
2640#define FSF_CZ_MC_XFRC_MODE_LBN 416
2641#define FSF_CZ_MC_XFRC_MODE_WIDTH 1
2642#define FSE_CZ_MC_XFRC_MODE_LAYERED 1
2643#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0
2644#define FSF_CZ_MC_XFRC_HASH_LBN 384
2645#define FSF_CZ_MC_XFRC_HASH_WIDTH 32
2646#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
2647#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
2648#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
2649#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
2650#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
2651#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
2652
2653/* RX_EV */
2654#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
2655#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
2656#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
2657#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
2658#define FSF_AZ_RX_EV_PKT_OK_LBN 56
2659#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
2660#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
2661#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
2662#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
2663#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
2664#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
2665#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
2666#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
2667#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
2668#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
2669#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
2670#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
2671#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
2672#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
2673#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
2674#define FSF_AA_RX_EV_DRIB_NIB_LBN 49
2675#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
2676#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
2677#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
2678#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
2679#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
2680#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
2681#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
2682#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
2683#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
2684#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
2685#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
2686#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
2687#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
2688#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
2689#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
2690#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
2691#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
2692#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
2693#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
2694#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
2695#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
2696#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
2697#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
2698#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
2699#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
2700#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
2701#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
2702#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
2703#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
2704#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
2705#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
2706#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
2707#define FSF_AZ_RX_EV_PORT_LBN 30
2708#define FSF_AZ_RX_EV_PORT_WIDTH 1
2709#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
2710#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
2711#define FSF_AZ_RX_EV_SOP_LBN 15
2712#define FSF_AZ_RX_EV_SOP_WIDTH 1
2713#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
2714#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
2715#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
2716#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
2717#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
2718#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
2719#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
2720#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
2721
2722/* RX_KER_DESC */
2723#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
2724#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
2725#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
2726#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
2727#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
2728#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
2729
2730/* RX_USER_DESC */
2731#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
2732#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
2733#define FSF_AZ_RX_USER_BUF_ID_LBN 0
2734#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
2735
2736/* TX_EV */
2737#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
2738#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
2739#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
2740#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
2741#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
2742#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
2743#define FSF_AZ_TX_EV_PORT_LBN 16
2744#define FSF_AZ_TX_EV_PORT_WIDTH 1
2745#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
2746#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
2747#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
2748#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
2749#define FSF_AZ_TX_EV_COMP_LBN 12
2750#define FSF_AZ_TX_EV_COMP_WIDTH 1
2751#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
2752#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
2753
2754/* TX_KER_DESC */
2755#define FSF_AZ_TX_KER_CONT_LBN 62
2756#define FSF_AZ_TX_KER_CONT_WIDTH 1
2757#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
2758#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
2759#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
2760#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
2761#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
2762#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
2763
2764/* TX_USER_DESC */
2765#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
2766#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
2767#define FSF_AZ_TX_USER_CONT_LBN 46
2768#define FSF_AZ_TX_USER_CONT_WIDTH 1
2769#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
2770#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
2771#define FSF_AZ_TX_USER_BUF_ID_LBN 13
2772#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
2773#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
2774#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
2775
2776/* USER_EV */
2777#define FSF_CZ_USER_QID_LBN 32
2778#define FSF_CZ_USER_QID_WIDTH 10
2779#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
2780#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
2781
2782/**************************************************************************
2783 *
2784 * Falcon B0 PCIe core indirect registers
2785 *
2786 **************************************************************************
2787 */
2788
2789#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
2790
2791#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
2792
2793#define FPCR_BB_ACK_RPL_TIMER 0x700
2794#define FPCRF_BB_ACK_TL_LBN 0
2795#define FPCRF_BB_ACK_TL_WIDTH 16
2796#define FPCRF_BB_RPL_TL_LBN 16
2797#define FPCRF_BB_RPL_TL_WIDTH 16
2798
2799#define FPCR_BB_ACK_FREQ 0x70C
2800#define FPCRF_BB_ACK_FREQ_LBN 0
2801#define FPCRF_BB_ACK_FREQ_WIDTH 7
2802
2803/**************************************************************************
2804 *
2805 * Pseudo-registers and fields
2806 *
2807 **************************************************************************
2808 */
2809
2810/* Interrupt acknowledge work-around register (A0/A1 only) */
2811#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
2812
2813/* EE_SPI_HCMD_REG: SPI host command register */
2814/* Values for the EE_SPI_HCMD_SF_SEL register field */
2815#define FFE_AB_SPI_DEVICE_EEPROM 0
2816#define FFE_AB_SPI_DEVICE_FLASH 1
2817
2818/* NIC_STAT_REG: NIC status register */
2819#define FRF_AB_STRAP_10G_LBN 2
2820#define FRF_AB_STRAP_10G_WIDTH 1
2821#define FRF_AA_STRAP_PCIE_LBN 0
2822#define FRF_AA_STRAP_PCIE_WIDTH 1
2823
2824/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
2825#define FRF_AZ_FATAL_INTR_LBN 0
2826#define FRF_AZ_FATAL_INTR_WIDTH 12
2827
2828/* SRM_CFG_REG: SRAM configuration register */
2829/* We treat the number of SRAM banks and bank size as a single field */
2830#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
2831#define FRF_AZ_SRM_NB_SZ_WIDTH \
2832 (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
2833#define FFE_AB_SRM_NB1_SZ2M 0
2834#define FFE_AB_SRM_NB1_SZ4M 1
2835#define FFE_AB_SRM_NB1_SZ8M 2
2836#define FFE_AB_SRM_NB_SZ_DEF 3
2837#define FFE_AB_SRM_NB2_SZ4M 4
2838#define FFE_AB_SRM_NB2_SZ8M 5
2839#define FFE_AB_SRM_NB2_SZ16M 6
2840#define FFE_AB_SRM_NB_SZ_RES 7
2841
2842/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
2843/* We write just the last dword of these registers */
2844#define FR_AZ_RX_DESC_UPD_DWORD_P0 \
2845 (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
2846 FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
2847#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
2848#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
2849
2850/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
2851#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
2852 (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
2853 FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
2854#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
2855#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
2856
2857/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
2858#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
2859#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
2860
2861/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
2862#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
2863#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
2864
2865/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
2866#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
2867#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
2868 FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
2869
2870/* XM_RX_PARAM_REG: XGMAC receive parameter register */
2871#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
2872#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
2873 FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
2874
2875/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
2876/* Default values */
2877#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
2878#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
2879#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */
2880
2881/* XX_CORE_STAT_REG: XAUI XGXS core status register */
2882/* XGXS all-lanes status fields */
2883#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
2884#define FRF_AB_XX_SYNC_STAT_WIDTH 4
2885#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
2886#define FRF_AB_XX_COMMA_DET_WIDTH 4
2887#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
2888#define FRF_AB_XX_CHAR_ERR_WIDTH 4
2889#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
2890#define FRF_AB_XX_DISPERR_WIDTH 4
2891#define FFE_AB_XX_STAT_ALL_LANES 0xf
2892#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
2893#define FRF_AB_XX_FORCE_SIG_WIDTH 8
2894#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
2895
2896/* DRIVER_EV */
2897/* Sub-fields of an RX flush completion event */
2898#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
2899#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
2900#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
2901#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
2902
2903/* EVENT_ENTRY */
2904/* Magic number field for event test */
2905#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
2906#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
2907
2908/**************************************************************************
2909 *
2910 * Falcon MAC stats
2911 *
2912 **************************************************************************
2913 *
2914 */
2915
2916#define GRxGoodOct_offset 0x0
2917#define GRxGoodOct_WIDTH 48
2918#define GRxBadOct_offset 0x8
2919#define GRxBadOct_WIDTH 48
2920#define GRxMissPkt_offset 0x10
2921#define GRxMissPkt_WIDTH 32
2922#define GRxFalseCRS_offset 0x14
2923#define GRxFalseCRS_WIDTH 32
2924#define GRxPausePkt_offset 0x18
2925#define GRxPausePkt_WIDTH 32
2926#define GRxBadPkt_offset 0x1C
2927#define GRxBadPkt_WIDTH 32
2928#define GRxUcastPkt_offset 0x20
2929#define GRxUcastPkt_WIDTH 32
2930#define GRxMcastPkt_offset 0x24
2931#define GRxMcastPkt_WIDTH 32
2932#define GRxBcastPkt_offset 0x28
2933#define GRxBcastPkt_WIDTH 32
2934#define GRxGoodLt64Pkt_offset 0x2C
2935#define GRxGoodLt64Pkt_WIDTH 32
2936#define GRxBadLt64Pkt_offset 0x30
2937#define GRxBadLt64Pkt_WIDTH 32
2938#define GRx64Pkt_offset 0x34
2939#define GRx64Pkt_WIDTH 32
2940#define GRx65to127Pkt_offset 0x38
2941#define GRx65to127Pkt_WIDTH 32
2942#define GRx128to255Pkt_offset 0x3C
2943#define GRx128to255Pkt_WIDTH 32
2944#define GRx256to511Pkt_offset 0x40
2945#define GRx256to511Pkt_WIDTH 32
2946#define GRx512to1023Pkt_offset 0x44
2947#define GRx512to1023Pkt_WIDTH 32
2948#define GRx1024to15xxPkt_offset 0x48
2949#define GRx1024to15xxPkt_WIDTH 32
2950#define GRx15xxtoJumboPkt_offset 0x4C
2951#define GRx15xxtoJumboPkt_WIDTH 32
2952#define GRxGtJumboPkt_offset 0x50
2953#define GRxGtJumboPkt_WIDTH 32
2954#define GRxFcsErr64to15xxPkt_offset 0x54
2955#define GRxFcsErr64to15xxPkt_WIDTH 32
2956#define GRxFcsErr15xxtoJumboPkt_offset 0x58
2957#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
2958#define GRxFcsErrGtJumboPkt_offset 0x5C
2959#define GRxFcsErrGtJumboPkt_WIDTH 32
2960#define GTxGoodBadOct_offset 0x80
2961#define GTxGoodBadOct_WIDTH 48
2962#define GTxGoodOct_offset 0x88
2963#define GTxGoodOct_WIDTH 48
2964#define GTxSglColPkt_offset 0x90
2965#define GTxSglColPkt_WIDTH 32
2966#define GTxMultColPkt_offset 0x94
2967#define GTxMultColPkt_WIDTH 32
2968#define GTxExColPkt_offset 0x98
2969#define GTxExColPkt_WIDTH 32
2970#define GTxDefPkt_offset 0x9C
2971#define GTxDefPkt_WIDTH 32
2972#define GTxLateCol_offset 0xA0
2973#define GTxLateCol_WIDTH 32
2974#define GTxExDefPkt_offset 0xA4
2975#define GTxExDefPkt_WIDTH 32
2976#define GTxPausePkt_offset 0xA8
2977#define GTxPausePkt_WIDTH 32
2978#define GTxBadPkt_offset 0xAC
2979#define GTxBadPkt_WIDTH 32
2980#define GTxUcastPkt_offset 0xB0
2981#define GTxUcastPkt_WIDTH 32
2982#define GTxMcastPkt_offset 0xB4
2983#define GTxMcastPkt_WIDTH 32
2984#define GTxBcastPkt_offset 0xB8
2985#define GTxBcastPkt_WIDTH 32
2986#define GTxLt64Pkt_offset 0xBC
2987#define GTxLt64Pkt_WIDTH 32
2988#define GTx64Pkt_offset 0xC0
2989#define GTx64Pkt_WIDTH 32
2990#define GTx65to127Pkt_offset 0xC4
2991#define GTx65to127Pkt_WIDTH 32
2992#define GTx128to255Pkt_offset 0xC8
2993#define GTx128to255Pkt_WIDTH 32
2994#define GTx256to511Pkt_offset 0xCC
2995#define GTx256to511Pkt_WIDTH 32
2996#define GTx512to1023Pkt_offset 0xD0
2997#define GTx512to1023Pkt_WIDTH 32
2998#define GTx1024to15xxPkt_offset 0xD4
2999#define GTx1024to15xxPkt_WIDTH 32
3000#define GTx15xxtoJumboPkt_offset 0xD8
3001#define GTx15xxtoJumboPkt_WIDTH 32
3002#define GTxGtJumboPkt_offset 0xDC
3003#define GTxGtJumboPkt_WIDTH 32
3004#define GTxNonTcpUdpPkt_offset 0xE0
3005#define GTxNonTcpUdpPkt_WIDTH 16
3006#define GTxMacSrcErrPkt_offset 0xE4
3007#define GTxMacSrcErrPkt_WIDTH 16
3008#define GTxIpSrcErrPkt_offset 0xE8
3009#define GTxIpSrcErrPkt_WIDTH 16
3010#define GDmaDone_offset 0xEC
3011#define GDmaDone_WIDTH 32
3012
3013#define XgRxOctets_offset 0x0
3014#define XgRxOctets_WIDTH 48
3015#define XgRxOctetsOK_offset 0x8
3016#define XgRxOctetsOK_WIDTH 48
3017#define XgRxPkts_offset 0x10
3018#define XgRxPkts_WIDTH 32
3019#define XgRxPktsOK_offset 0x14
3020#define XgRxPktsOK_WIDTH 32
3021#define XgRxBroadcastPkts_offset 0x18
3022#define XgRxBroadcastPkts_WIDTH 32
3023#define XgRxMulticastPkts_offset 0x1C
3024#define XgRxMulticastPkts_WIDTH 32
3025#define XgRxUnicastPkts_offset 0x20
3026#define XgRxUnicastPkts_WIDTH 32
3027#define XgRxUndersizePkts_offset 0x24
3028#define XgRxUndersizePkts_WIDTH 32
3029#define XgRxOversizePkts_offset 0x28
3030#define XgRxOversizePkts_WIDTH 32
3031#define XgRxJabberPkts_offset 0x2C
3032#define XgRxJabberPkts_WIDTH 32
3033#define XgRxUndersizeFCSerrorPkts_offset 0x30
3034#define XgRxUndersizeFCSerrorPkts_WIDTH 32
3035#define XgRxDropEvents_offset 0x34
3036#define XgRxDropEvents_WIDTH 32
3037#define XgRxFCSerrorPkts_offset 0x38
3038#define XgRxFCSerrorPkts_WIDTH 32
3039#define XgRxAlignError_offset 0x3C
3040#define XgRxAlignError_WIDTH 32
3041#define XgRxSymbolError_offset 0x40
3042#define XgRxSymbolError_WIDTH 32
3043#define XgRxInternalMACError_offset 0x44
3044#define XgRxInternalMACError_WIDTH 32
3045#define XgRxControlPkts_offset 0x48
3046#define XgRxControlPkts_WIDTH 32
3047#define XgRxPausePkts_offset 0x4C
3048#define XgRxPausePkts_WIDTH 32
3049#define XgRxPkts64Octets_offset 0x50
3050#define XgRxPkts64Octets_WIDTH 32
3051#define XgRxPkts65to127Octets_offset 0x54
3052#define XgRxPkts65to127Octets_WIDTH 32
3053#define XgRxPkts128to255Octets_offset 0x58
3054#define XgRxPkts128to255Octets_WIDTH 32
3055#define XgRxPkts256to511Octets_offset 0x5C
3056#define XgRxPkts256to511Octets_WIDTH 32
3057#define XgRxPkts512to1023Octets_offset 0x60
3058#define XgRxPkts512to1023Octets_WIDTH 32
3059#define XgRxPkts1024to15xxOctets_offset 0x64
3060#define XgRxPkts1024to15xxOctets_WIDTH 32
3061#define XgRxPkts15xxtoMaxOctets_offset 0x68
3062#define XgRxPkts15xxtoMaxOctets_WIDTH 32
3063#define XgRxLengthError_offset 0x6C
3064#define XgRxLengthError_WIDTH 32
3065#define XgTxPkts_offset 0x80
3066#define XgTxPkts_WIDTH 32
3067#define XgTxOctets_offset 0x88
3068#define XgTxOctets_WIDTH 48
3069#define XgTxMulticastPkts_offset 0x90
3070#define XgTxMulticastPkts_WIDTH 32
3071#define XgTxBroadcastPkts_offset 0x94
3072#define XgTxBroadcastPkts_WIDTH 32
3073#define XgTxUnicastPkts_offset 0x98
3074#define XgTxUnicastPkts_WIDTH 32
3075#define XgTxControlPkts_offset 0x9C
3076#define XgTxControlPkts_WIDTH 32
3077#define XgTxPausePkts_offset 0xA0
3078#define XgTxPausePkts_WIDTH 32
3079#define XgTxPkts64Octets_offset 0xA4
3080#define XgTxPkts64Octets_WIDTH 32
3081#define XgTxPkts65to127Octets_offset 0xA8
3082#define XgTxPkts65to127Octets_WIDTH 32
3083#define XgTxPkts128to255Octets_offset 0xAC
3084#define XgTxPkts128to255Octets_WIDTH 32
3085#define XgTxPkts256to511Octets_offset 0xB0
3086#define XgTxPkts256to511Octets_WIDTH 32
3087#define XgTxPkts512to1023Octets_offset 0xB4
3088#define XgTxPkts512to1023Octets_WIDTH 32
3089#define XgTxPkts1024to15xxOctets_offset 0xB8
3090#define XgTxPkts1024to15xxOctets_WIDTH 32
3091#define XgTxPkts1519toMaxOctets_offset 0xBC
3092#define XgTxPkts1519toMaxOctets_WIDTH 32
3093#define XgTxUndersizePkts_offset 0xC0
3094#define XgTxUndersizePkts_WIDTH 32
3095#define XgTxOversizePkts_offset 0xC4
3096#define XgTxOversizePkts_WIDTH 32
3097#define XgTxNonTcpUdpPkt_offset 0xC8
3098#define XgTxNonTcpUdpPkt_WIDTH 16
3099#define XgTxMacSrcErrPkt_offset 0xCC
3100#define XgTxMacSrcErrPkt_WIDTH 16
3101#define XgTxIpSrcErrPkt_offset 0xD0
3102#define XgTxIpSrcErrPkt_WIDTH 16
3103#define XgDmaDone_offset 0xD4
3104#define XgDmaDone_WIDTH 32
3105
3106#define FALCON_STATS_NOT_DONE 0x00000000
3107#define FALCON_STATS_DONE 0xffffffff
3108
3109/* Interrupt status register bits */
3110#define FATAL_INT_LBN 64
3111#define FATAL_INT_WIDTH 1
3112#define INT_EVQS_LBN 40
3113#define INT_EVQS_WIDTH 4
3114#define INT_FLAG_LBN 32
3115#define INT_FLAG_WIDTH 1
3116#define EVQ_FIFO_HF_LBN 1
3117#define EVQ_FIFO_HF_WIDTH 1
3118#define EVQ_FIFO_AF_LBN 0
3119#define EVQ_FIFO_AF_WIDTH 1
3120
3121/**************************************************************************
3122 *
3123 * Falcon non-volatile configuration
3124 *
3125 **************************************************************************
3126 */
3127
3128/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
3129struct falcon_nvconfig_board_v2 {
3130 __le16 nports;
3131 u8 port0_phy_addr;
3132 u8 port0_phy_type;
3133 u8 port1_phy_addr;
3134 u8 port1_phy_type;
3135 __le16 asic_sub_revision;
3136 __le16 board_revision;
3137} __packed;
3138
3139/* Board configuration v3 extra information */
3140struct falcon_nvconfig_board_v3 {
3141 __le32 spi_device_type[2];
3142} __packed;
3143
3144/* Bit numbers for spi_device_type */
3145#define SPI_DEV_TYPE_SIZE_LBN 0
3146#define SPI_DEV_TYPE_SIZE_WIDTH 5
3147#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
3148#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
3149#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
3150#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
3151#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
3152#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
3153#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
3154#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
3155#define SPI_DEV_TYPE_FIELD(type, field) \
3156 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
3157
3158#define FALCON_NVCONFIG_OFFSET 0x300
3159
3160#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
3161struct falcon_nvconfig {
3162 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
3163 u8 mac_address[2][8]; /* 0x310 */
3164 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
3165 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
3166 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
3167 efx_oword_t hw_init_reg; /* 0x350 */
3168 efx_oword_t nic_stat_reg; /* 0x360 */
3169 efx_oword_t glb_ctl_reg; /* 0x370 */
3170 efx_oword_t srm_cfg_reg; /* 0x380 */
3171 efx_oword_t spare_reg; /* 0x390 */
3172 __le16 board_magic_num; /* 0x3A0 */
3173 __le16 board_struct_ver;
3174 __le16 board_checksum;
3175 struct falcon_nvconfig_board_v2 board_v2;
3176 efx_oword_t ee_base_page_reg; /* 0x3B0 */
3177 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
3178} __packed;
3179
3180#endif /* EFX_REGS_H */
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 01f9432c31ef..a60c7188fdad 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -61,7 +61,7 @@
61 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? 61 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
62 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) 62 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
63 */ 63 */
64static int rx_alloc_method = RX_ALLOC_METHOD_PAGE; 64static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
65 65
66#define RX_ALLOC_LEVEL_LRO 0x2000 66#define RX_ALLOC_LEVEL_LRO 0x2000
67#define RX_ALLOC_LEVEL_MAX 0x3000 67#define RX_ALLOC_LEVEL_MAX 0x3000
@@ -293,8 +293,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
293 * fill anyway. 293 * fill anyway.
294 */ 294 */
295 fill_level = (rx_queue->added_count - rx_queue->removed_count); 295 fill_level = (rx_queue->added_count - rx_queue->removed_count);
296 EFX_BUG_ON_PARANOID(fill_level > 296 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
297 rx_queue->efx->type->rxd_ring_mask + 1);
298 297
299 /* Don't fill if we don't need to */ 298 /* Don't fill if we don't need to */
300 if (fill_level >= rx_queue->fast_fill_trigger) 299 if (fill_level >= rx_queue->fast_fill_trigger)
@@ -316,8 +315,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
316 retry: 315 retry:
317 /* Recalculate current fill level now that we have the lock */ 316 /* Recalculate current fill level now that we have the lock */
318 fill_level = (rx_queue->added_count - rx_queue->removed_count); 317 fill_level = (rx_queue->added_count - rx_queue->removed_count);
319 EFX_BUG_ON_PARANOID(fill_level > 318 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
320 rx_queue->efx->type->rxd_ring_mask + 1);
321 space = rx_queue->fast_fill_limit - fill_level; 319 space = rx_queue->fast_fill_limit - fill_level;
322 if (space < EFX_RX_BATCH) 320 if (space < EFX_RX_BATCH)
323 goto out_unlock; 321 goto out_unlock;
@@ -329,8 +327,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
329 327
330 do { 328 do {
331 for (i = 0; i < EFX_RX_BATCH; ++i) { 329 for (i = 0; i < EFX_RX_BATCH; ++i) {
332 index = (rx_queue->added_count & 330 index = rx_queue->added_count & EFX_RXQ_MASK;
333 rx_queue->efx->type->rxd_ring_mask);
334 rx_buf = efx_rx_buffer(rx_queue, index); 331 rx_buf = efx_rx_buffer(rx_queue, index);
335 rc = efx_init_rx_buffer(rx_queue, rx_buf); 332 rc = efx_init_rx_buffer(rx_queue, rx_buf);
336 if (unlikely(rc)) 333 if (unlikely(rc))
@@ -444,9 +441,11 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
444 * the appropriate LRO method 441 * the appropriate LRO method
445 */ 442 */
446static void efx_rx_packet_lro(struct efx_channel *channel, 443static void efx_rx_packet_lro(struct efx_channel *channel,
447 struct efx_rx_buffer *rx_buf) 444 struct efx_rx_buffer *rx_buf,
445 bool checksummed)
448{ 446{
449 struct napi_struct *napi = &channel->napi_str; 447 struct napi_struct *napi = &channel->napi_str;
448 gro_result_t gro_result;
450 449
451 /* Pass the skb/page into the LRO engine */ 450 /* Pass the skb/page into the LRO engine */
452 if (rx_buf->page) { 451 if (rx_buf->page) {
@@ -454,6 +453,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
454 453
455 if (!skb) { 454 if (!skb) {
456 put_page(rx_buf->page); 455 put_page(rx_buf->page);
456 gro_result = GRO_DROP;
457 goto out; 457 goto out;
458 } 458 }
459 459
@@ -466,19 +466,28 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
466 skb->len = rx_buf->len; 466 skb->len = rx_buf->len;
467 skb->data_len = rx_buf->len; 467 skb->data_len = rx_buf->len;
468 skb->truesize += rx_buf->len; 468 skb->truesize += rx_buf->len;
469 skb->ip_summed = CHECKSUM_UNNECESSARY; 469 skb->ip_summed =
470 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
470 471
471 napi_gro_frags(napi); 472 gro_result = napi_gro_frags(napi);
472 473
473out: 474out:
474 EFX_BUG_ON_PARANOID(rx_buf->skb); 475 EFX_BUG_ON_PARANOID(rx_buf->skb);
475 rx_buf->page = NULL; 476 rx_buf->page = NULL;
476 } else { 477 } else {
477 EFX_BUG_ON_PARANOID(!rx_buf->skb); 478 EFX_BUG_ON_PARANOID(!rx_buf->skb);
479 EFX_BUG_ON_PARANOID(!checksummed);
478 480
479 napi_gro_receive(napi, rx_buf->skb); 481 gro_result = napi_gro_receive(napi, rx_buf->skb);
480 rx_buf->skb = NULL; 482 rx_buf->skb = NULL;
481 } 483 }
484
485 if (gro_result == GRO_NORMAL) {
486 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
487 } else if (gro_result != GRO_DROP) {
488 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
489 channel->irq_mod_score += 2;
490 }
482} 491}
483 492
484void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 493void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
@@ -570,7 +579,7 @@ void __efx_rx_packet(struct efx_channel *channel,
570 } 579 }
571 580
572 if (likely(checksummed || rx_buf->page)) { 581 if (likely(checksummed || rx_buf->page)) {
573 efx_rx_packet_lro(channel, rx_buf); 582 efx_rx_packet_lro(channel, rx_buf, checksummed);
574 goto done; 583 goto done;
575 } 584 }
576 585
@@ -629,7 +638,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
629 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); 638 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
630 639
631 /* Allocate RX buffers */ 640 /* Allocate RX buffers */
632 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); 641 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
633 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 642 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
634 if (!rx_queue->buffer) 643 if (!rx_queue->buffer)
635 return -ENOMEM; 644 return -ENOMEM;
@@ -644,7 +653,6 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
644 653
645void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 654void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
646{ 655{
647 struct efx_nic *efx = rx_queue->efx;
648 unsigned int max_fill, trigger, limit; 656 unsigned int max_fill, trigger, limit;
649 657
650 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); 658 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
@@ -657,7 +665,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
657 rx_queue->min_overfill = -1U; 665 rx_queue->min_overfill = -1U;
658 666
659 /* Initialise limit fields */ 667 /* Initialise limit fields */
660 max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM; 668 max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
661 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 669 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
662 limit = max_fill * min(rx_refill_limit, 100U) / 100U; 670 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
663 671
@@ -680,7 +688,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
680 688
681 /* Release RX buffers NB start at index 0 not current HW ptr */ 689 /* Release RX buffers NB start at index 0 not current HW ptr */
682 if (rx_queue->buffer) { 690 if (rx_queue->buffer) {
683 for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { 691 for (i = 0; i <= EFX_RXQ_MASK; i++) {
684 rx_buf = efx_rx_buffer(rx_queue, i); 692 rx_buf = efx_rx_buffer(rx_queue, i);
685 efx_fini_rx_buffer(rx_queue, rx_buf); 693 efx_fini_rx_buffer(rx_queue, rx_buf);
686 } 694 }
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 817c7efc11e0..7a9386f97c42 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -24,10 +24,9 @@
24#include "efx.h" 24#include "efx.h"
25#include "falcon.h" 25#include "falcon.h"
26#include "selftest.h" 26#include "selftest.h"
27#include "boards.h"
28#include "workarounds.h" 27#include "workarounds.h"
29#include "spi.h" 28#include "spi.h"
30#include "falcon_io.h" 29#include "io.h"
31#include "mdio_10g.h" 30#include "mdio_10g.h"
32 31
33/* 32/*
@@ -527,7 +526,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
527 526
528 for (i = 0; i < 3; i++) { 527 for (i = 0; i < 3; i++) {
529 /* Determine how many packets to send */ 528 /* Determine how many packets to send */
530 state->packet_count = (efx->type->txd_ring_mask + 1) / 3; 529 state->packet_count = EFX_TXQ_SIZE / 3;
531 state->packet_count = min(1 << (i << 2), state->packet_count); 530 state->packet_count = min(1 << (i << 2), state->packet_count);
532 state->skbs = kzalloc(sizeof(state->skbs[0]) * 531 state->skbs = kzalloc(sizeof(state->skbs[0]) *
533 state->packet_count, GFP_KERNEL); 532 state->packet_count, GFP_KERNEL);
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 1a3495c676c0..390b27b5ace9 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -14,8 +14,7 @@
14#include "mdio_10g.h" 14#include "mdio_10g.h"
15#include "falcon.h" 15#include "falcon.h"
16#include "phy.h" 16#include "phy.h"
17#include "falcon_hwdefs.h" 17#include "regs.h"
18#include "boards.h"
19#include "workarounds.h" 18#include "workarounds.h"
20#include "selftest.h" 19#include "selftest.h"
21 20
@@ -752,6 +751,7 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
752 751
753 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); 752 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
754 753
754 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
755 if (efx->phy_type != PHY_TYPE_SFX7101) { 755 if (efx->phy_type != PHY_TYPE_SFX7101) {
756 ecmd->supported |= (SUPPORTED_100baseT_Full | 756 ecmd->supported |= (SUPPORTED_100baseT_Full |
757 SUPPORTED_1000baseT_Full); 757 SUPPORTED_1000baseT_Full);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 489c4de31447..303919a34df6 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -26,8 +26,7 @@
26 * The tx_queue descriptor ring fill-level must fall below this value 26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue 27 * before we restart the netif queue
28 */ 28 */
29#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \ 29#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
31 30
32/* We want to be able to nest calls to netif_stop_queue(), since each 31/* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue. 32 * channel can have an individual stop on the queue.
@@ -125,6 +124,24 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
125} 124}
126 125
127 126
127static inline unsigned
128efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
129{
130 /* Depending on the NIC revision, we can use descriptor
131 * lengths up to 8K or 8K-1. However, since PCI Express
132 * devices must split read requests at 4K boundaries, there is
133 * little benefit from using descriptors that cross those
134 * boundaries and we keep things simple by not doing so.
135 */
136 unsigned len = (~dma_addr & 0xfff) + 1;
137
138 /* Work around hardware bug for unaligned buffers. */
139 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
140 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
141
142 return len;
143}
144
128/* 145/*
129 * Add a socket buffer to a TX queue 146 * Add a socket buffer to a TX queue
130 * 147 *
@@ -147,7 +164,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
147 skb_frag_t *fragment; 164 skb_frag_t *fragment;
148 struct page *page; 165 struct page *page;
149 int page_offset; 166 int page_offset;
150 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; 167 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
151 dma_addr_t dma_addr, unmap_addr = 0; 168 dma_addr_t dma_addr, unmap_addr = 0;
152 unsigned int dma_len; 169 unsigned int dma_len;
153 bool unmap_single; 170 bool unmap_single;
@@ -171,7 +188,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
171 } 188 }
172 189
173 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 190 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
174 q_space = efx->type->txd_ring_mask - 1 - fill_level; 191 q_space = EFX_TXQ_MASK - 1 - fill_level;
175 192
176 /* Map for DMA. Use pci_map_single rather than pci_map_page 193 /* Map for DMA. Use pci_map_single rather than pci_map_page
177 * since this is more efficient on machines with sparse 194 * since this is more efficient on machines with sparse
@@ -208,16 +225,14 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
208 &tx_queue->read_count; 225 &tx_queue->read_count;
209 fill_level = (tx_queue->insert_count 226 fill_level = (tx_queue->insert_count
210 - tx_queue->old_read_count); 227 - tx_queue->old_read_count);
211 q_space = (efx->type->txd_ring_mask - 1 - 228 q_space = EFX_TXQ_MASK - 1 - fill_level;
212 fill_level);
213 if (unlikely(q_space-- <= 0)) 229 if (unlikely(q_space-- <= 0))
214 goto stop; 230 goto stop;
215 smp_mb(); 231 smp_mb();
216 --tx_queue->stopped; 232 --tx_queue->stopped;
217 } 233 }
218 234
219 insert_ptr = (tx_queue->insert_count & 235 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
220 efx->type->txd_ring_mask);
221 buffer = &tx_queue->buffer[insert_ptr]; 236 buffer = &tx_queue->buffer[insert_ptr];
222 efx_tsoh_free(tx_queue, buffer); 237 efx_tsoh_free(tx_queue, buffer);
223 EFX_BUG_ON_PARANOID(buffer->tsoh); 238 EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -226,14 +241,10 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
226 EFX_BUG_ON_PARANOID(!buffer->continuation); 241 EFX_BUG_ON_PARANOID(!buffer->continuation);
227 EFX_BUG_ON_PARANOID(buffer->unmap_len); 242 EFX_BUG_ON_PARANOID(buffer->unmap_len);
228 243
229 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); 244 dma_len = efx_max_tx_len(efx, dma_addr);
230 if (likely(dma_len > len)) 245 if (likely(dma_len >= len))
231 dma_len = len; 246 dma_len = len;
232 247
233 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
234 if (misalign && dma_len + misalign > 512)
235 dma_len = 512 - misalign;
236
237 /* Fill out per descriptor fields */ 248 /* Fill out per descriptor fields */
238 buffer->len = dma_len; 249 buffer->len = dma_len;
239 buffer->dma_addr = dma_addr; 250 buffer->dma_addr = dma_addr;
@@ -289,7 +300,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
289 /* Work backwards until we hit the original insert pointer value */ 300 /* Work backwards until we hit the original insert pointer value */
290 while (tx_queue->insert_count != tx_queue->write_count) { 301 while (tx_queue->insert_count != tx_queue->write_count) {
291 --tx_queue->insert_count; 302 --tx_queue->insert_count;
292 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 303 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
293 buffer = &tx_queue->buffer[insert_ptr]; 304 buffer = &tx_queue->buffer[insert_ptr];
294 efx_dequeue_buffer(tx_queue, buffer); 305 efx_dequeue_buffer(tx_queue, buffer);
295 buffer->len = 0; 306 buffer->len = 0;
@@ -318,10 +329,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
318{ 329{
319 struct efx_nic *efx = tx_queue->efx; 330 struct efx_nic *efx = tx_queue->efx;
320 unsigned int stop_index, read_ptr; 331 unsigned int stop_index, read_ptr;
321 unsigned int mask = tx_queue->efx->type->txd_ring_mask;
322 332
323 stop_index = (index + 1) & mask; 333 stop_index = (index + 1) & EFX_TXQ_MASK;
324 read_ptr = tx_queue->read_count & mask; 334 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
325 335
326 while (read_ptr != stop_index) { 336 while (read_ptr != stop_index) {
327 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 337 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -338,7 +348,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
338 buffer->len = 0; 348 buffer->len = 0;
339 349
340 ++tx_queue->read_count; 350 ++tx_queue->read_count;
341 read_ptr = tx_queue->read_count & mask; 351 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
342 } 352 }
343} 353}
344 354
@@ -391,7 +401,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
391 unsigned fill_level; 401 unsigned fill_level;
392 struct efx_nic *efx = tx_queue->efx; 402 struct efx_nic *efx = tx_queue->efx;
393 403
394 EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask); 404 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
395 405
396 efx_dequeue_buffers(tx_queue, index); 406 efx_dequeue_buffers(tx_queue, index);
397 407
@@ -401,7 +411,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
401 smp_mb(); 411 smp_mb();
402 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 412 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
403 fill_level = tx_queue->insert_count - tx_queue->read_count; 413 fill_level = tx_queue->insert_count - tx_queue->read_count;
404 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 414 if (fill_level < EFX_TXQ_THRESHOLD) {
405 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 415 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
406 416
407 /* Do this under netif_tx_lock(), to avoid racing 417 /* Do this under netif_tx_lock(), to avoid racing
@@ -425,11 +435,11 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
425 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); 435 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
426 436
427 /* Allocate software ring */ 437 /* Allocate software ring */
428 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); 438 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
429 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 439 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
430 if (!tx_queue->buffer) 440 if (!tx_queue->buffer)
431 return -ENOMEM; 441 return -ENOMEM;
432 for (i = 0; i <= efx->type->txd_ring_mask; ++i) 442 for (i = 0; i <= EFX_TXQ_MASK; ++i)
433 tx_queue->buffer[i].continuation = true; 443 tx_queue->buffer[i].continuation = true;
434 444
435 /* Allocate hardware ring */ 445 /* Allocate hardware ring */
@@ -468,8 +478,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
468 478
469 /* Free any buffers left in the ring */ 479 /* Free any buffers left in the ring */
470 while (tx_queue->read_count != tx_queue->write_count) { 480 while (tx_queue->read_count != tx_queue->write_count) {
471 buffer = &tx_queue->buffer[tx_queue->read_count & 481 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
472 tx_queue->efx->type->txd_ring_mask];
473 efx_dequeue_buffer(tx_queue, buffer); 482 efx_dequeue_buffer(tx_queue, buffer);
474 buffer->continuation = true; 483 buffer->continuation = true;
475 buffer->len = 0; 484 buffer->len = 0;
@@ -708,14 +717,14 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
708{ 717{
709 struct efx_tx_buffer *buffer; 718 struct efx_tx_buffer *buffer;
710 struct efx_nic *efx = tx_queue->efx; 719 struct efx_nic *efx = tx_queue->efx;
711 unsigned dma_len, fill_level, insert_ptr, misalign; 720 unsigned dma_len, fill_level, insert_ptr;
712 int q_space; 721 int q_space;
713 722
714 EFX_BUG_ON_PARANOID(len <= 0); 723 EFX_BUG_ON_PARANOID(len <= 0);
715 724
716 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 725 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
717 /* -1 as there is no way to represent all descriptors used */ 726 /* -1 as there is no way to represent all descriptors used */
718 q_space = efx->type->txd_ring_mask - 1 - fill_level; 727 q_space = EFX_TXQ_MASK - 1 - fill_level;
719 728
720 while (1) { 729 while (1) {
721 if (unlikely(q_space-- <= 0)) { 730 if (unlikely(q_space-- <= 0)) {
@@ -731,7 +740,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
731 *(volatile unsigned *)&tx_queue->read_count; 740 *(volatile unsigned *)&tx_queue->read_count;
732 fill_level = (tx_queue->insert_count 741 fill_level = (tx_queue->insert_count
733 - tx_queue->old_read_count); 742 - tx_queue->old_read_count);
734 q_space = efx->type->txd_ring_mask - 1 - fill_level; 743 q_space = EFX_TXQ_MASK - 1 - fill_level;
735 if (unlikely(q_space-- <= 0)) { 744 if (unlikely(q_space-- <= 0)) {
736 *final_buffer = NULL; 745 *final_buffer = NULL;
737 return 1; 746 return 1;
@@ -740,13 +749,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
740 --tx_queue->stopped; 749 --tx_queue->stopped;
741 } 750 }
742 751
743 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 752 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
744 buffer = &tx_queue->buffer[insert_ptr]; 753 buffer = &tx_queue->buffer[insert_ptr];
745 ++tx_queue->insert_count; 754 ++tx_queue->insert_count;
746 755
747 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 756 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
748 tx_queue->read_count > 757 tx_queue->read_count >
749 efx->type->txd_ring_mask); 758 EFX_TXQ_MASK);
750 759
751 efx_tsoh_free(tx_queue, buffer); 760 efx_tsoh_free(tx_queue, buffer);
752 EFX_BUG_ON_PARANOID(buffer->len); 761 EFX_BUG_ON_PARANOID(buffer->len);
@@ -757,12 +766,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
757 766
758 buffer->dma_addr = dma_addr; 767 buffer->dma_addr = dma_addr;
759 768
760 /* Ensure we do not cross a boundary unsupported by H/W */ 769 dma_len = efx_max_tx_len(efx, dma_addr);
761 dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
762
763 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
764 if (misalign && dma_len + misalign > 512)
765 dma_len = 512 - misalign;
766 770
767 /* If there is enough space to send then do so */ 771 /* If there is enough space to send then do so */
768 if (dma_len >= len) 772 if (dma_len >= len)
@@ -792,8 +796,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
792{ 796{
793 struct efx_tx_buffer *buffer; 797 struct efx_tx_buffer *buffer;
794 798
795 buffer = &tx_queue->buffer[tx_queue->insert_count & 799 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
796 tx_queue->efx->type->txd_ring_mask];
797 efx_tsoh_free(tx_queue, buffer); 800 efx_tsoh_free(tx_queue, buffer);
798 EFX_BUG_ON_PARANOID(buffer->len); 801 EFX_BUG_ON_PARANOID(buffer->len);
799 EFX_BUG_ON_PARANOID(buffer->unmap_len); 802 EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -818,7 +821,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
818 while (tx_queue->insert_count != tx_queue->write_count) { 821 while (tx_queue->insert_count != tx_queue->write_count) {
819 --tx_queue->insert_count; 822 --tx_queue->insert_count;
820 buffer = &tx_queue->buffer[tx_queue->insert_count & 823 buffer = &tx_queue->buffer[tx_queue->insert_count &
821 tx_queue->efx->type->txd_ring_mask]; 824 EFX_TXQ_MASK];
822 efx_tsoh_free(tx_queue, buffer); 825 efx_tsoh_free(tx_queue, buffer);
823 EFX_BUG_ON_PARANOID(buffer->skb); 826 EFX_BUG_ON_PARANOID(buffer->skb);
824 buffer->len = 0; 827 buffer->len = 0;
@@ -1135,7 +1138,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1135 unsigned i; 1138 unsigned i;
1136 1139
1137 if (tx_queue->buffer) { 1140 if (tx_queue->buffer) {
1138 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1141 for (i = 0; i <= EFX_TXQ_MASK; ++i)
1139 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1142 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1140 } 1143 }
1141 1144
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index c821c15445a0..325029949488 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -41,6 +41,8 @@
41 41
42/* Spurious parity errors in TSORT buffers */ 42/* Spurious parity errors in TSORT buffers */
43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
44/* Unaligned read request >512 bytes after aligning may break TSORT */
45#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
44/* iSCSI parsing errors */ 46/* iSCSI parsing errors */
45#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A 47#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
46/* RX events go missing */ 48/* RX events go missing */
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 161181a4b3d6..5783f50d18e9 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -31,6 +31,8 @@
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <asm/cacheflush.h>
35
34#include "sh_eth.h" 36#include "sh_eth.h"
35 37
36/* There is CPU dependent code */ 38/* There is CPU dependent code */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 3a449d012d4b..a3d99913f184 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.25" 53#define DRV_VERSION "1.26"
54#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
55 55
56/* 56/*
@@ -102,6 +102,7 @@ MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
102static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = { 102static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ 103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
104 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ 104 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
105 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ 106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ 107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ 108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
@@ -139,6 +140,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
139 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ 140 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
140 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ 141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ 142 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
143 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
142 { 0 } 144 { 0 }
143}; 145};
144 146
@@ -602,6 +604,16 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
602 /* apply workaround for integrated resistors calibration */ 604 /* apply workaround for integrated resistors calibration */
603 gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); 605 gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
604 gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); 606 gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
607 } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
608 /* apply fixes in PHY AFE */
609 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
610
611 /* apply RDAC termination workaround */
612 gm_phy_write(hw, port, 24, 0x2800);
613 gm_phy_write(hw, port, 23, 0x2001);
614
615 /* set page register back to 0 */
616 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
605 } else if (hw->chip_id != CHIP_ID_YUKON_EX && 617 } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
606 hw->chip_id < CHIP_ID_YUKON_SUPR) { 618 hw->chip_id < CHIP_ID_YUKON_SUPR) {
607 /* no effect on Yukon-XL */ 619 /* no effect on Yukon-XL */
@@ -786,8 +798,7 @@ static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
786 798
787 if ( (hw->chip_id == CHIP_ID_YUKON_EX && 799 if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
788 hw->chip_rev != CHIP_REV_YU_EX_A0) || 800 hw->chip_rev != CHIP_REV_YU_EX_A0) ||
789 hw->chip_id == CHIP_ID_YUKON_FE_P || 801 hw->chip_id >= CHIP_ID_YUKON_FE_P) {
790 hw->chip_id == CHIP_ID_YUKON_SUPR) {
791 /* Yukon-Extreme B0 and further Extreme devices */ 802 /* Yukon-Extreme B0 and further Extreme devices */
792 /* enable Store & Forward mode for TX */ 803 /* enable Store & Forward mode for TX */
793 804
@@ -925,8 +936,14 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
925 936
926 /* On chips without ram buffer, pause is controled by MAC level */ 937 /* On chips without ram buffer, pause is controled by MAC level */
927 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { 938 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
928 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 939 /* Pause threshold is scaled by 8 in bytes */
929 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 940 if (hw->chip_id == CHIP_ID_YUKON_FE_P
941 && hw->chip_rev == CHIP_REV_YU_FE2_A0)
942 reg = 1568 / 8;
943 else
944 reg = 1024 / 8;
945 sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
946 sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
930 947
931 sky2_set_tx_stfwd(hw, port); 948 sky2_set_tx_stfwd(hw, port);
932 } 949 }
@@ -1397,6 +1414,31 @@ static int sky2_rx_start(struct sky2_port *sky2)
1397 1414
1398 /* Tell chip about available buffers */ 1415 /* Tell chip about available buffers */
1399 sky2_rx_update(sky2, rxq); 1416 sky2_rx_update(sky2, rxq);
1417
1418 if (hw->chip_id == CHIP_ID_YUKON_EX ||
1419 hw->chip_id == CHIP_ID_YUKON_SUPR) {
1420 /*
1421 * Disable flushing of non ASF packets;
1422 * must be done after initializing the BMUs;
1423 * drivers without ASF support should do this too, otherwise
1424 * it may happen that they cannot run on ASF devices;
1425 * remember that the MAC FIFO isn't reset during initialization.
1426 */
1427 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
1428 }
1429
1430 if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
1431 /* Enable RX Home Address & Routing Header checksum fix */
1432 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
1433 RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
1434
1435 /* Enable TX Home Address & Routing Header checksum fix */
1436 sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
1437 TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
1438 }
1439
1440
1441
1400 return 0; 1442 return 0;
1401nomem: 1443nomem:
1402 sky2_rx_clean(sky2); 1444 sky2_rx_clean(sky2);
@@ -2096,6 +2138,25 @@ out:
2096 spin_unlock(&sky2->phy_lock); 2138 spin_unlock(&sky2->phy_lock);
2097} 2139}
2098 2140
2141/* Special quick link interrupt (Yukon-2 Optima only) */
2142static void sky2_qlink_intr(struct sky2_hw *hw)
2143{
2144 struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
2145 u32 imask;
2146 u16 phy;
2147
2148 /* disable irq */
2149 imask = sky2_read32(hw, B0_IMSK);
2150 imask &= ~Y2_IS_PHY_QLNK;
2151 sky2_write32(hw, B0_IMSK, imask);
2152
2153 /* reset PHY Link Detect */
2154 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
2155 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
2156
2157 sky2_link_up(sky2);
2158}
2159
2099/* Transmit timeout is only called if we are running, carrier is up 2160/* Transmit timeout is only called if we are running, carrier is up
2100 * and tx queue is full (stopped). 2161 * and tx queue is full (stopped).
2101 */ 2162 */
@@ -2765,6 +2826,9 @@ static int sky2_poll(struct napi_struct *napi, int work_limit)
2765 if (status & Y2_IS_IRQ_PHY2) 2826 if (status & Y2_IS_IRQ_PHY2)
2766 sky2_phy_intr(hw, 1); 2827 sky2_phy_intr(hw, 1);
2767 2828
2829 if (status & Y2_IS_PHY_QLNK)
2830 sky2_qlink_intr(hw);
2831
2768 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { 2832 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
2769 work_done += sky2_status_intr(hw, work_limit - work_done, idx); 2833 work_done += sky2_status_intr(hw, work_limit - work_done, idx);
2770 2834
@@ -2814,6 +2878,7 @@ static u32 sky2_mhz(const struct sky2_hw *hw)
2814 case CHIP_ID_YUKON_EX: 2878 case CHIP_ID_YUKON_EX:
2815 case CHIP_ID_YUKON_SUPR: 2879 case CHIP_ID_YUKON_SUPR:
2816 case CHIP_ID_YUKON_UL_2: 2880 case CHIP_ID_YUKON_UL_2:
2881 case CHIP_ID_YUKON_OPT:
2817 return 125; 2882 return 125;
2818 2883
2819 case CHIP_ID_YUKON_FE: 2884 case CHIP_ID_YUKON_FE:
@@ -2903,6 +2968,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2903 break; 2968 break;
2904 2969
2905 case CHIP_ID_YUKON_UL_2: 2970 case CHIP_ID_YUKON_UL_2:
2971 case CHIP_ID_YUKON_OPT:
2906 hw->flags = SKY2_HW_GIGABIT 2972 hw->flags = SKY2_HW_GIGABIT
2907 | SKY2_HW_ADV_POWER_CTL; 2973 | SKY2_HW_ADV_POWER_CTL;
2908 break; 2974 break;
@@ -2985,6 +3051,52 @@ static void sky2_reset(struct sky2_hw *hw)
2985 sky2_write16(hw, SK_REG(i, GMAC_CTRL), 3051 sky2_write16(hw, SK_REG(i, GMAC_CTRL),
2986 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON 3052 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
2987 | GMC_BYP_RETR_ON); 3053 | GMC_BYP_RETR_ON);
3054
3055 }
3056
3057 if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
3058 /* enable MACSec clock gating */
3059 sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
3060 }
3061
3062 if (hw->chip_id == CHIP_ID_YUKON_OPT) {
3063 u16 reg;
3064 u32 msk;
3065
3066 if (hw->chip_rev == 0) {
3067 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
3068 sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
3069
3070 /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
3071 reg = 10;
3072 } else {
3073 /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
3074 reg = 3;
3075 }
3076
3077 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
3078
3079 /* reset PHY Link Detect */
3080 sky2_pci_write16(hw, PSM_CONFIG_REG4,
3081 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
3082 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
3083
3084
3085 /* enable PHY Quick Link */
3086 msk = sky2_read32(hw, B0_IMSK);
3087 msk |= Y2_IS_PHY_QLNK;
3088 sky2_write32(hw, B0_IMSK, msk);
3089
3090 /* check if PSMv2 was running before */
3091 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
3092 if (reg & PCI_EXP_LNKCTL_ASPMC) {
3093 int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3094 /* restore the PCIe Link Control register */
3095 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
3096 }
3097
3098 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3099 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
2988 } 3100 }
2989 3101
2990 /* Clear I2C IRQ noise */ 3102 /* Clear I2C IRQ noise */
@@ -4405,9 +4517,11 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
4405 "FE+", /* 0xb8 */ 4517 "FE+", /* 0xb8 */
4406 "Supreme", /* 0xb9 */ 4518 "Supreme", /* 0xb9 */
4407 "UL 2", /* 0xba */ 4519 "UL 2", /* 0xba */
4520 "Unknown", /* 0xbb */
4521 "Optima", /* 0xbc */
4408 }; 4522 };
4409 4523
4410 if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_UL_2) 4524 if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_OPT)
4411 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz); 4525 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
4412 else 4526 else
4413 snprintf(buf, sz, "(chip %#x)", chipid); 4527 snprintf(buf, sz, "(chip %#x)", chipid);
@@ -4537,6 +4651,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4537 goto err_out_free_netdev; 4651 goto err_out_free_netdev;
4538 } 4652 }
4539 4653
4654 netif_carrier_off(dev);
4655
4540 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); 4656 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
4541 4657
4542 err = request_irq(pdev->irq, sky2_intr, 4658 err = request_irq(pdev->irq, sky2_intr,
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index ed54129698b4..365d79c7d834 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -16,6 +16,13 @@ enum {
16 PCI_DEV_REG5 = 0x88, 16 PCI_DEV_REG5 = 0x88,
17 PCI_CFG_REG_0 = 0x90, 17 PCI_CFG_REG_0 = 0x90,
18 PCI_CFG_REG_1 = 0x94, 18 PCI_CFG_REG_1 = 0x94,
19
20 PSM_CONFIG_REG0 = 0x98,
21 PSM_CONFIG_REG1 = 0x9C,
22 PSM_CONFIG_REG2 = 0x160,
23 PSM_CONFIG_REG3 = 0x164,
24 PSM_CONFIG_REG4 = 0x168,
25
19}; 26};
20 27
21/* Yukon-2 */ 28/* Yukon-2 */
@@ -48,6 +55,37 @@ enum pci_dev_reg_2 {
48 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ 55 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
49}; 56};
50 57
58/* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */
59enum pci_dev_reg_3 {
60 P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */
61 P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */
62 P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */
63 P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */
64 P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */
65 P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */
66 P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */
67 P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */
68 P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */
69 P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */
70 P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */
71 P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */
72 P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */
73 P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */
74 P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */
75 P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */
76 P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */
77 P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */
78 P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */
79 P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */
80 PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS |
81 P_CLK_COR_REGS_D0_DIS |
82 P_CLK_COR_LNK1_D0_DIS |
83 P_CLK_MAC_LNK1_D0_DIS |
84 P_CLK_PCI_MST_ARB_DIS |
85 P_CLK_COR_COMMON_DIS |
86 P_CLK_COR_LNK1_BMU_DIS,
87};
88
51/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ 89/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
52enum pci_dev_reg_4 { 90enum pci_dev_reg_4 {
53 /* (Link Training & Status State Machine) */ 91 /* (Link Training & Status State Machine) */
@@ -114,7 +152,7 @@ enum pci_dev_reg_5 {
114 P_GAT_PCIE_RX_EL_IDLE, 152 P_GAT_PCIE_RX_EL_IDLE,
115}; 153};
116 154
117#/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */ 155/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */
118enum pci_cfg_reg1 { 156enum pci_cfg_reg1 {
119 P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */ 157 P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */
120 /* Bit 23..21: Release Clock on Event */ 158 /* Bit 23..21: Release Clock on Event */
@@ -145,6 +183,72 @@ enum pci_cfg_reg1 {
145 P_CF1_ENA_TXBMU_WR_IDLE, 183 P_CF1_ENA_TXBMU_WR_IDLE,
146}; 184};
147 185
186/* Yukon-Optima */
187enum {
188 PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */
189
190 PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */
191 PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */
192
193 PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */
194
195 PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */
196 PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */
197 PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */
198 PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */
199
200 PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */
201
202 PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */
203 PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */
204};
205
206/* Yukon-Supreme */
207enum {
208 PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */
209
210 PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */
211 PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */
212 PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */
213 PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */
214 PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */
215 PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */
216 PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */
217 PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */
218 PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */
219 PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */
220 PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */
221 PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */
222 PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */
223 PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */
224 PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */
225 PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */
226 PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */
227 PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */
228 PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */
229
230 PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */
231 PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */
232 PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */
233 PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */
234 PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */
235 PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */
236 PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */
237 PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */
238 PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */
239 PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */
240};
241
242/* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */
243enum {
244 /* PHY Link Detect Timer */
245 PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4,
246 PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4,
247
248 PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */
249 PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */
250};
251
148 252
149#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 253#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
150 PCI_STATUS_SIG_SYSTEM_ERROR | \ 254 PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -197,6 +301,9 @@ enum csr_regs {
197 B2_I2C_IRQ = 0x0168, 301 B2_I2C_IRQ = 0x0168,
198 B2_I2C_SW = 0x016c, 302 B2_I2C_SW = 0x016c,
199 303
304 Y2_PEX_PHY_DATA = 0x0170,
305 Y2_PEX_PHY_ADDR = 0x0172,
306
200 B3_RAM_ADDR = 0x0180, 307 B3_RAM_ADDR = 0x0180,
201 B3_RAM_DATA_LO = 0x0184, 308 B3_RAM_DATA_LO = 0x0184,
202 B3_RAM_DATA_HI = 0x0188, 309 B3_RAM_DATA_HI = 0x0188,
@@ -317,6 +424,10 @@ enum {
317 Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */ 424 Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */
318 Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */ 425 Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */
319 426
427 Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */
428 Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */
429 Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */
430
320 Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */ 431 Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */
321 Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */ 432 Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */
322 Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */ 433 Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */
@@ -435,6 +546,7 @@ enum {
435 CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */ 546 CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
436 CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */ 547 CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
437 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ 548 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
549 CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
438}; 550};
439enum yukon_ec_rev { 551enum yukon_ec_rev {
440 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ 552 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
@@ -459,6 +571,8 @@ enum yukon_ex_rev {
459}; 571};
460enum yukon_supr_rev { 572enum yukon_supr_rev {
461 CHIP_REV_YU_SU_A0 = 0, 573 CHIP_REV_YU_SU_A0 = 0,
574 CHIP_REV_YU_SU_B0 = 1,
575 CHIP_REV_YU_SU_B1 = 3,
462}; 576};
463 577
464 578
@@ -513,6 +627,12 @@ enum {
513 TIM_T_STEP = 1<<0, /* Test step */ 627 TIM_T_STEP = 1<<0, /* Test step */
514}; 628};
515 629
630/* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */
631enum {
632 PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */
633 PEX_DB_ACCESS = 1<<30, /* Access to debug register */
634};
635
516/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ 636/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
517 /* Bit 31..19: reserved */ 637 /* Bit 31..19: reserved */
518#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ 638#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
@@ -688,10 +808,11 @@ enum {
688 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ 808 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
689 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ 809 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
690 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ 810 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
691 RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */ 811 RX_GMF_FL_THR = 0x0c50,/* 16 bit Rx GMAC FIFO Flush Threshold */
812 RX_GMF_FL_CTRL = 0x0c52,/* 16 bit Rx GMAC FIFO Flush Control */
692 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */ 813 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
693 RX_GMF_UP_THR = 0x0c58,/* 8 bit Rx Upper Pause Thr (Yukon-EC_U) */ 814 RX_GMF_UP_THR = 0x0c58,/* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */
694 RX_GMF_LP_THR = 0x0c5a,/* 8 bit Rx Lower Pause Thr (Yukon-EC_U) */ 815 RX_GMF_LP_THR = 0x0c5a,/* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */
695 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */ 816 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
696 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ 817 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
697 818
@@ -754,6 +875,42 @@ enum {
754 BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */ 875 BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */
755}; 876};
756 877
878/* TBMU_TEST 0x06B8 Transmit BMU Test Register */
879enum {
880 TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */
881 TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */
882 TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */
883 TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */
884 TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */
885 TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */
886 TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */
887 TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */
888
889 TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */
890 TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */
891 TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */
892
893 TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */
894 TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */
895 TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */
896
897 TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */
898 TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */
899 TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */
900
901 TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */
902 TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */
903 TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */
904
905 TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */
906 TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */
907 TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */
908
909 TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */
910 TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */
911 TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */
912};
913
757/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ 914/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
758/* PREF_UNIT_CTRL 32 bit Prefetch Control register */ 915/* PREF_UNIT_CTRL 32 bit Prefetch Control register */
759enum { 916enum {
@@ -1674,6 +1831,12 @@ enum {
1674 1831
1675/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ 1832/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1676enum { 1833enum {
1834 RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */
1835 RX_GCLKMAC_OFF = 1<<30,
1836
1837 RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */
1838 RX_STFW_ENA = 1<<28,
1839
1677 RX_TRUNC_ON = 1<<27, /* enable packet truncation */ 1840 RX_TRUNC_ON = 1<<27, /* enable packet truncation */
1678 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */ 1841 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
1679 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */ 1842 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
@@ -1711,6 +1874,20 @@ enum {
1711 GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON, 1874 GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON,
1712}; 1875};
1713 1876
1877/* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */
1878enum {
1879 RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */
1880 RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */
1881 RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */
1882 RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */
1883 RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */
1884 RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */
1885 RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */
1886 RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */
1887 RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */
1888 RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */
1889};
1890
1714/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ 1891/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */
1715enum { 1892enum {
1716 TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */ 1893 TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
new file mode 100644
index 000000000000..35eaa5251d7f
--- /dev/null
+++ b/drivers/net/stmmac/Kconfig
@@ -0,0 +1,53 @@
1config STMMAC_ETH
2 tristate "STMicroelectronics 10/100/1000 Ethernet driver"
3 select MII
4 select PHYLIB
5 depends on NETDEVICES && CPU_SUBTYPE_ST40
6 help
7 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
8 controllers. ST Ethernet IPs are built around a Synopsys IP Core.
9
10if STMMAC_ETH
11
12config STMMAC_DA
13 bool "STMMAC DMA arbitration scheme"
14 default n
15 help
16 Selecting this option, rx has priority over Tx (only for Giga
17 Ethernet device).
18 By default, the DMA arbitration scheme is based on Round-robin
19 (rx:tx priority is 1:1).
20
21config STMMAC_DUAL_MAC
22 bool "STMMAC: dual mac support (EXPERIMENTAL)"
23 default n
24 depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
25 help
26 Some ST SoCs (for example the stx7141 and stx7200c2) have two
27 Ethernet Controllers. This option turns on the second Ethernet
28 device on this kind of platforms.
29
30config STMMAC_TIMER
31 bool "STMMAC Timer optimisation"
32 default n
33 help
34 Use an external timer for mitigating the number of network
35 interrupts.
36
37choice
38 prompt "Select Timer device"
39 depends on STMMAC_TIMER
40
41config STMMAC_TMU_TIMER
42 bool "TMU channel 2"
43 depends on CPU_SH4
44 help
45
46config STMMAC_RTC_TIMER
47 bool "Real time clock"
48 depends on RTC_CLASS
49 help
50
51endchoice
52
53endif
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
new file mode 100644
index 000000000000..b2d7a5564dfa
--- /dev/null
+++ b/drivers/net/stmmac/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
4 mac100.o gmac.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
new file mode 100644
index 000000000000..e49e5188e887
--- /dev/null
+++ b/drivers/net/stmmac/common.h
@@ -0,0 +1,330 @@
1/*******************************************************************************
2 STMMAC Common Header File
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include "descs.h"
26#include <linux/io.h>
27
28/* *********************************************
29 DMA CRS Control and Status Register Mapping
30 * *********************************************/
31#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
32#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
33#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
34#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
35#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
36#define DMA_STATUS 0x00001014 /* Status Register */
37#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
38#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
39#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
40#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
41#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
42
43/* ********************************
44 DMA Control register defines
45 * ********************************/
46#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
47#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
48
49/* **************************************
50 DMA Interrupt Enable register defines
51 * **************************************/
52/**** NORMAL INTERRUPT ****/
53#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
54#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
55#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
56#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
57#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
58
59#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
60 DMA_INTR_ENA_TIE)
61
62/**** ABNORMAL INTERRUPT ****/
63#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
64#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
65#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
66#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
67#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
68#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
69#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
70#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
71#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
72#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
73
74#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
75 DMA_INTR_ENA_UNE)
76
77/* DMA default interrupt mask */
78#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
79
80/* ****************************
81 * DMA Status register defines
82 * ****************************/
83#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
84#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
85#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
86#define DMA_STATUS_GMI 0x08000000
87#define DMA_STATUS_GLI 0x04000000
88#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
89#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
90#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
91#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
92#define DMA_STATUS_TS_SHIFT 20
93#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
94#define DMA_STATUS_RS_SHIFT 17
95#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
96#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
97#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
98#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
99#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
100#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
101#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
102#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
103#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
104#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
105#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
106#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
107#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
108#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
109#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
110
111/* Other defines */
112#define HASH_TABLE_SIZE 64
113#define PAUSE_TIME 0x200
114
115/* Flow Control defines */
116#define FLOW_OFF 0
117#define FLOW_RX 1
118#define FLOW_TX 2
119#define FLOW_AUTO (FLOW_TX | FLOW_RX)
120
121/* DMA STORE-AND-FORWARD Operation Mode */
122#define SF_DMA_MODE 1
123
124#define HW_CSUM 1
125#define NO_HW_CSUM 0
126
127/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
128#define BUF_SIZE_16KiB 16384
129#define BUF_SIZE_8KiB 8192
130#define BUF_SIZE_4KiB 4096
131#define BUF_SIZE_2KiB 2048
132
133/* Power Down and WOL */
134#define PMT_NOT_SUPPORTED 0
135#define PMT_SUPPORTED 1
136
137/* Common MAC defines */
138#define MAC_CTRL_REG 0x00000000 /* MAC Control */
139#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
140#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
141
142/* MAC Management Counters register */
143#define MMC_CONTROL 0x00000100 /* MMC Control */
144#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
145#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
146#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
147#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
148
149#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
150#define MMC_CONTROL_MAX_FRM_SHIFT 3
151#define MMC_CONTROL_MAX_FRAME 0x7FF
152
153struct stmmac_extra_stats {
154 /* Transmit errors */
155 unsigned long tx_underflow ____cacheline_aligned;
156 unsigned long tx_carrier;
157 unsigned long tx_losscarrier;
158 unsigned long tx_heartbeat;
159 unsigned long tx_deferred;
160 unsigned long tx_vlan;
161 unsigned long tx_jabber;
162 unsigned long tx_frame_flushed;
163 unsigned long tx_payload_error;
164 unsigned long tx_ip_header_error;
165 /* Receive errors */
166 unsigned long rx_desc;
167 unsigned long rx_partial;
168 unsigned long rx_runt;
169 unsigned long rx_toolong;
170 unsigned long rx_collision;
171 unsigned long rx_crc;
172 unsigned long rx_lenght;
173 unsigned long rx_mii;
174 unsigned long rx_multicast;
175 unsigned long rx_gmac_overflow;
176 unsigned long rx_watchdog;
177 unsigned long da_rx_filter_fail;
178 unsigned long sa_rx_filter_fail;
179 unsigned long rx_missed_cntr;
180 unsigned long rx_overflow_cntr;
181 unsigned long rx_vlan;
182 /* Tx/Rx IRQ errors */
183 unsigned long tx_undeflow_irq;
184 unsigned long tx_process_stopped_irq;
185 unsigned long tx_jabber_irq;
186 unsigned long rx_overflow_irq;
187 unsigned long rx_buf_unav_irq;
188 unsigned long rx_process_stopped_irq;
189 unsigned long rx_watchdog_irq;
190 unsigned long tx_early_irq;
191 unsigned long fatal_bus_error_irq;
192 /* Extra info */
193 unsigned long threshold;
194 unsigned long tx_pkt_n;
195 unsigned long rx_pkt_n;
196 unsigned long poll_n;
197 unsigned long sched_timer_n;
198 unsigned long normal_irq_n;
199};
200
201/* GMAC core can compute the checksums in HW. */
202enum rx_frame_status {
203 good_frame = 0,
204 discard_frame = 1,
205 csum_none = 2,
206};
207
208static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
209 unsigned int high, unsigned int low)
210{
211 unsigned long data;
212
213 data = (addr[5] << 8) | addr[4];
214 writel(data, ioaddr + high);
215 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
216 writel(data, ioaddr + low);
217
218 return;
219}
220
221static inline void stmmac_get_mac_addr(unsigned long ioaddr,
222 unsigned char *addr, unsigned int high,
223 unsigned int low)
224{
225 unsigned int hi_addr, lo_addr;
226
227 /* Read the MAC address from the hardware */
228 hi_addr = readl(ioaddr + high);
229 lo_addr = readl(ioaddr + low);
230
231 /* Extract the MAC address from the high and low words */
232 addr[0] = lo_addr & 0xff;
233 addr[1] = (lo_addr >> 8) & 0xff;
234 addr[2] = (lo_addr >> 16) & 0xff;
235 addr[3] = (lo_addr >> 24) & 0xff;
236 addr[4] = hi_addr & 0xff;
237 addr[5] = (hi_addr >> 8) & 0xff;
238
239 return;
240}
241
242struct stmmac_ops {
243 /* MAC core initialization */
244 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
245 /* DMA core initialization */
246 int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
247 /* Dump MAC registers */
248 void (*dump_mac_regs) (unsigned long ioaddr);
249 /* Dump DMA registers */
250 void (*dump_dma_regs) (unsigned long ioaddr);
251 /* Set tx/rx threshold in the csr6 register
252 * An invalid value enables the store-and-forward mode */
253 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
254 /* To track extra statistic (if supported) */
255 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
256 unsigned long ioaddr);
257 /* RX descriptor ring initialization */
258 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
259 int disable_rx_ic);
260 /* TX descriptor ring initialization */
261 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
262
263 /* Invoked by the xmit function to prepare the tx descriptor */
264 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
265 int csum_flag);
266 /* Set/get the owner of the descriptor */
267 void (*set_tx_owner) (struct dma_desc *p);
268 int (*get_tx_owner) (struct dma_desc *p);
269 /* Invoked by the xmit function to close the tx descriptor */
270 void (*close_tx_desc) (struct dma_desc *p);
271 /* Clean the tx descriptor as soon as the tx irq is received */
272 void (*release_tx_desc) (struct dma_desc *p);
273 /* Clear interrupt on tx frame completion. When this bit is
274 * set an interrupt happens as soon as the frame is transmitted */
275 void (*clear_tx_ic) (struct dma_desc *p);
276 /* Last tx segment reports the transmit status */
277 int (*get_tx_ls) (struct dma_desc *p);
278 /* Return the transmit status looking at the TDES1 */
279 int (*tx_status) (void *data, struct stmmac_extra_stats *x,
280 struct dma_desc *p, unsigned long ioaddr);
281 /* Get the buffer size from the descriptor */
282 int (*get_tx_len) (struct dma_desc *p);
283 /* Handle extra events on specific interrupts hw dependent */
284 void (*host_irq_status) (unsigned long ioaddr);
285 int (*get_rx_owner) (struct dma_desc *p);
286 void (*set_rx_owner) (struct dma_desc *p);
287 /* Get the receive frame size */
288 int (*get_rx_frame_len) (struct dma_desc *p);
289 /* Return the reception status looking at the RDES1 */
290 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
291 struct dma_desc *p);
292 /* Multicast filter setting */
293 void (*set_filter) (struct net_device *dev);
294 /* Flow control setting */
295 void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex,
296 unsigned int fc, unsigned int pause_time);
297 /* Set power management mode (e.g. magic frame) */
298 void (*pmt) (unsigned long ioaddr, unsigned long mode);
299 /* Set/Get Unicast MAC addresses */
300 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
301 unsigned int reg_n);
302 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
303 unsigned int reg_n);
304};
305
306struct mac_link {
307 int port;
308 int duplex;
309 int speed;
310};
311
312struct mii_regs {
313 unsigned int addr; /* MII Address */
314 unsigned int data; /* MII Data */
315};
316
317struct hw_cap {
318 unsigned int version; /* Core Version register (GMAC) */
319 unsigned int pmt; /* Power-Down mode (GMAC) */
320 struct mac_link link;
321 struct mii_regs mii;
322};
323
324struct mac_device_info {
325 struct hw_cap hw;
326 struct stmmac_ops *ops;
327};
328
329struct mac_device_info *gmac_setup(unsigned long addr);
330struct mac_device_info *mac100_setup(unsigned long addr);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
new file mode 100644
index 000000000000..6d2a0b2f5e57
--- /dev/null
+++ b/drivers/net/stmmac/descs.h
@@ -0,0 +1,163 @@
1/*******************************************************************************
2 Header File to describe the DMA descriptors
3 Use enhanced descriptors in case of GMAC Cores.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms and conditions of the GNU General Public License,
7 version 2, as published by the Free Software Foundation.
8
9 This program is distributed in the hope it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc.,
16 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
22*******************************************************************************/
23struct dma_desc {
24 /* Receive descriptor */
25 union {
26 struct {
27 /* RDES0 */
28 u32 reserved1:1;
29 u32 crc_error:1;
30 u32 dribbling:1;
31 u32 mii_error:1;
32 u32 receive_watchdog:1;
33 u32 frame_type:1;
34 u32 collision:1;
35 u32 frame_too_long:1;
36 u32 last_descriptor:1;
37 u32 first_descriptor:1;
38 u32 multicast_frame:1;
39 u32 run_frame:1;
40 u32 length_error:1;
41 u32 partial_frame_error:1;
42 u32 descriptor_error:1;
43 u32 error_summary:1;
44 u32 frame_length:14;
45 u32 filtering_fail:1;
46 u32 own:1;
47 /* RDES1 */
48 u32 buffer1_size:11;
49 u32 buffer2_size:11;
50 u32 reserved2:2;
51 u32 second_address_chained:1;
52 u32 end_ring:1;
53 u32 reserved3:5;
54 u32 disable_ic:1;
55 } rx;
56 struct {
57 /* RDES0 */
58 u32 payload_csum_error:1;
59 u32 crc_error:1;
60 u32 dribbling:1;
61 u32 error_gmii:1;
62 u32 receive_watchdog:1;
63 u32 frame_type:1;
64 u32 late_collision:1;
65 u32 ipc_csum_error:1;
66 u32 last_descriptor:1;
67 u32 first_descriptor:1;
68 u32 vlan_tag:1;
69 u32 overflow_error:1;
70 u32 length_error:1;
71 u32 sa_filter_fail:1;
72 u32 descriptor_error:1;
73 u32 error_summary:1;
74 u32 frame_length:14;
75 u32 da_filter_fail:1;
76 u32 own:1;
77 /* RDES1 */
78 u32 buffer1_size:13;
79 u32 reserved1:1;
80 u32 second_address_chained:1;
81 u32 end_ring:1;
82 u32 buffer2_size:13;
83 u32 reserved2:2;
84 u32 disable_ic:1;
85 } erx; /* -- enhanced -- */
86
87 /* Transmit descriptor */
88 struct {
89 /* TDES0 */
90 u32 deferred:1;
91 u32 underflow_error:1;
92 u32 excessive_deferral:1;
93 u32 collision_count:4;
94 u32 heartbeat_fail:1;
95 u32 excessive_collisions:1;
96 u32 late_collision:1;
97 u32 no_carrier:1;
98 u32 loss_carrier:1;
99 u32 reserved1:3;
100 u32 error_summary:1;
101 u32 reserved2:15;
102 u32 own:1;
103 /* TDES1 */
104 u32 buffer1_size:11;
105 u32 buffer2_size:11;
106 u32 reserved3:1;
107 u32 disable_padding:1;
108 u32 second_address_chained:1;
109 u32 end_ring:1;
110 u32 crc_disable:1;
111 u32 reserved4:2;
112 u32 first_segment:1;
113 u32 last_segment:1;
114 u32 interrupt:1;
115 } tx;
116 struct {
117 /* TDES0 */
118 u32 deferred:1;
119 u32 underflow_error:1;
120 u32 excessive_deferral:1;
121 u32 collision_count:4;
122 u32 vlan_frame:1;
123 u32 excessive_collisions:1;
124 u32 late_collision:1;
125 u32 no_carrier:1;
126 u32 loss_carrier:1;
127 u32 payload_error:1;
128 u32 frame_flushed:1;
129 u32 jabber_timeout:1;
130 u32 error_summary:1;
131 u32 ip_header_error:1;
132 u32 time_stamp_status:1;
133 u32 reserved1:2;
134 u32 second_address_chained:1;
135 u32 end_ring:1;
136 u32 checksum_insertion:2;
137 u32 reserved2:1;
138 u32 time_stamp_enable:1;
139 u32 disable_padding:1;
140 u32 crc_disable:1;
141 u32 first_segment:1;
142 u32 last_segment:1;
143 u32 interrupt:1;
144 u32 own:1;
145 /* TDES1 */
146 u32 buffer1_size:13;
147 u32 reserved3:3;
148 u32 buffer2_size:13;
149 u32 reserved4:3;
150 } etx; /* -- enhanced -- */
151 } des01;
152 unsigned int des2;
153 unsigned int des3;
154};
155
156/* Transmit checksum insertion control */
157enum tdes_csum_insertion {
158 cic_disabled = 0, /* Checksum Insertion Control */
159 cic_only_ip = 1, /* Only IP header */
160 cic_no_pseudoheader = 2, /* IP header but pseudoheader
161 * is not calculated */
162 cic_full = 3, /* IP header and pseudoheader */
163};
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c
new file mode 100644
index 000000000000..b624bb5bae0a
--- /dev/null
+++ b/drivers/net/stmmac/gmac.c
@@ -0,0 +1,693 @@
1/*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code.
5
6 Copyright (C) 2007-2009 STMicroelectronics Ltd
7
8 This program is free software; you can redistribute it and/or modify it
9 under the terms and conditions of the GNU General Public License,
10 version 2, as published by the Free Software Foundation.
11
12 This program is distributed in the hope it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 You should have received a copy of the GNU General Public License along with
18 this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20
21 The full GNU General Public License is included in this distribution in
22 the file called "COPYING".
23
24 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/
26
27#include <linux/netdevice.h>
28#include <linux/crc32.h>
29#include <linux/mii.h>
30#include <linux/phy.h>
31
32#include "stmmac.h"
33#include "gmac.h"
34
35#undef GMAC_DEBUG
36/*#define GMAC_DEBUG*/
37#undef FRAME_FILTER_DEBUG
38/*#define FRAME_FILTER_DEBUG*/
39#ifdef GMAC_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44
45static void gmac_dump_regs(unsigned long ioaddr)
46{
47 int i;
48 pr_info("\t----------------------------------------------\n"
49 "\t GMAC registers (base addr = 0x%8x)\n"
50 "\t----------------------------------------------\n",
51 (unsigned int)ioaddr);
52
53 for (i = 0; i < 55; i++) {
54 int offset = i * 4;
55 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
56 offset, readl(ioaddr + offset));
57 }
58 return;
59}
60
61static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
62{
63 u32 value = readl(ioaddr + DMA_BUS_MODE);
64 /* DMA SW reset */
65 value |= DMA_BUS_MODE_SFT_RESET;
66 writel(value, ioaddr + DMA_BUS_MODE);
67 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
68
69 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
70 ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
71 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
72
73#ifdef CONFIG_STMMAC_DA
74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
75#endif
76 writel(value, ioaddr + DMA_BUS_MODE);
77
78 /* Mask interrupts by writing to CSR7 */
79 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
80
81 /* The base address of the RX/TX descriptor lists must be written into
82 * DMA CSR3 and CSR4, respectively. */
83 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
84 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
85
86 return 0;
87}
88
89/* Transmit FIFO flush operation */
90static void gmac_flush_tx_fifo(unsigned long ioaddr)
91{
92 u32 csr6 = readl(ioaddr + DMA_CONTROL);
93 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
94
95 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
96}
97
98static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
99 int rxmode)
100{
101 u32 csr6 = readl(ioaddr + DMA_CONTROL);
102
103 if (txmode == SF_DMA_MODE) {
104 DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
105 /* Transmit COE type 2 cannot be done in cut-through mode. */
106 csr6 |= DMA_CONTROL_TSF;
107 /* Operating on second frame increase the performance
108 * especially when transmit store-and-forward is used.*/
109 csr6 |= DMA_CONTROL_OSF;
110 } else {
111 DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
112 " (threshold = %d)\n", txmode);
113 csr6 &= ~DMA_CONTROL_TSF;
114 csr6 &= DMA_CONTROL_TC_TX_MASK;
115 /* Set the transmit threashold */
116 if (txmode <= 32)
117 csr6 |= DMA_CONTROL_TTC_32;
118 else if (txmode <= 64)
119 csr6 |= DMA_CONTROL_TTC_64;
120 else if (txmode <= 128)
121 csr6 |= DMA_CONTROL_TTC_128;
122 else if (txmode <= 192)
123 csr6 |= DMA_CONTROL_TTC_192;
124 else
125 csr6 |= DMA_CONTROL_TTC_256;
126 }
127
128 if (rxmode == SF_DMA_MODE) {
129 DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
130 csr6 |= DMA_CONTROL_RSF;
131 } else {
132 DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
133 " (threshold = %d)\n", rxmode);
134 csr6 &= ~DMA_CONTROL_RSF;
135 csr6 &= DMA_CONTROL_TC_RX_MASK;
136 if (rxmode <= 32)
137 csr6 |= DMA_CONTROL_RTC_32;
138 else if (rxmode <= 64)
139 csr6 |= DMA_CONTROL_RTC_64;
140 else if (rxmode <= 96)
141 csr6 |= DMA_CONTROL_RTC_96;
142 else
143 csr6 |= DMA_CONTROL_RTC_128;
144 }
145
146 writel(csr6, ioaddr + DMA_CONTROL);
147 return;
148}
149
150/* Not yet implemented --- no RMON module */
151static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
152 unsigned long ioaddr)
153{
154 return;
155}
156
157static void gmac_dump_dma_regs(unsigned long ioaddr)
158{
159 int i;
160 pr_info(" DMA registers\n");
161 for (i = 0; i < 22; i++) {
162 if ((i < 9) || (i > 17)) {
163 int offset = i * 4;
164 pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
165 (DMA_BUS_MODE + offset),
166 readl(ioaddr + DMA_BUS_MODE + offset));
167 }
168 }
169 return;
170}
171
172static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
173 struct dma_desc *p, unsigned long ioaddr)
174{
175 int ret = 0;
176 struct net_device_stats *stats = (struct net_device_stats *)data;
177
178 if (unlikely(p->des01.etx.error_summary)) {
179 DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
180 if (unlikely(p->des01.etx.jabber_timeout)) {
181 DBG(KERN_ERR "\tjabber_timeout error\n");
182 x->tx_jabber++;
183 }
184
185 if (unlikely(p->des01.etx.frame_flushed)) {
186 DBG(KERN_ERR "\tframe_flushed error\n");
187 x->tx_frame_flushed++;
188 gmac_flush_tx_fifo(ioaddr);
189 }
190
191 if (unlikely(p->des01.etx.loss_carrier)) {
192 DBG(KERN_ERR "\tloss_carrier error\n");
193 x->tx_losscarrier++;
194 stats->tx_carrier_errors++;
195 }
196 if (unlikely(p->des01.etx.no_carrier)) {
197 DBG(KERN_ERR "\tno_carrier error\n");
198 x->tx_carrier++;
199 stats->tx_carrier_errors++;
200 }
201 if (unlikely(p->des01.etx.late_collision)) {
202 DBG(KERN_ERR "\tlate_collision error\n");
203 stats->collisions += p->des01.etx.collision_count;
204 }
205 if (unlikely(p->des01.etx.excessive_collisions)) {
206 DBG(KERN_ERR "\texcessive_collisions\n");
207 stats->collisions += p->des01.etx.collision_count;
208 }
209 if (unlikely(p->des01.etx.excessive_deferral)) {
210 DBG(KERN_INFO "\texcessive tx_deferral\n");
211 x->tx_deferred++;
212 }
213
214 if (unlikely(p->des01.etx.underflow_error)) {
215 DBG(KERN_ERR "\tunderflow error\n");
216 gmac_flush_tx_fifo(ioaddr);
217 x->tx_underflow++;
218 }
219
220 if (unlikely(p->des01.etx.ip_header_error)) {
221 DBG(KERN_ERR "\tTX IP header csum error\n");
222 x->tx_ip_header_error++;
223 }
224
225 if (unlikely(p->des01.etx.payload_error)) {
226 DBG(KERN_ERR "\tAddr/Payload csum error\n");
227 x->tx_payload_error++;
228 gmac_flush_tx_fifo(ioaddr);
229 }
230
231 ret = -1;
232 }
233
234 if (unlikely(p->des01.etx.deferred)) {
235 DBG(KERN_INFO "GMAC TX status: tx deferred\n");
236 x->tx_deferred++;
237 }
238#ifdef STMMAC_VLAN_TAG_USED
239 if (p->des01.etx.vlan_frame) {
240 DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
241 x->tx_vlan++;
242 }
243#endif
244
245 return ret;
246}
247
248static int gmac_get_tx_len(struct dma_desc *p)
249{
250 return p->des01.etx.buffer1_size;
251}
252
253static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
254{
255 int ret = good_frame;
256 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
257
258 /* bits 5 7 0 | Frame status
259 * ----------------------------------------------------------
260 * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
261 * 1 0 0 | IPv4/6 No CSUM errorS.
262 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
263 * 1 1 0 | IPv4/6 CSUM IP HR error
264 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
265 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
266 * 0 1 1 | COE bypassed.. no IPv4/6 frame
267 * 0 1 0 | Reserved.
268 */
269 if (status == 0x0) {
270 DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
271 ret = good_frame;
272 } else if (status == 0x4) {
273 DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
274 ret = good_frame;
275 } else if (status == 0x5) {
276 DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
277 ret = csum_none;
278 } else if (status == 0x6) {
279 DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
280 ret = csum_none;
281 } else if (status == 0x7) {
282 DBG(KERN_ERR
283 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
284 ret = csum_none;
285 } else if (status == 0x1) {
286 DBG(KERN_ERR
287 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
288 ret = discard_frame;
289 } else if (status == 0x3) {
290 DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
291 ret = discard_frame;
292 }
293 return ret;
294}
295
296static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
297 struct dma_desc *p)
298{
299 int ret = good_frame;
300 struct net_device_stats *stats = (struct net_device_stats *)data;
301
302 if (unlikely(p->des01.erx.error_summary)) {
303 DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
304 if (unlikely(p->des01.erx.descriptor_error)) {
305 DBG(KERN_ERR "\tdescriptor error\n");
306 x->rx_desc++;
307 stats->rx_length_errors++;
308 }
309 if (unlikely(p->des01.erx.overflow_error)) {
310 DBG(KERN_ERR "\toverflow error\n");
311 x->rx_gmac_overflow++;
312 }
313
314 if (unlikely(p->des01.erx.ipc_csum_error))
315 DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
316
317 if (unlikely(p->des01.erx.late_collision)) {
318 DBG(KERN_ERR "\tlate_collision error\n");
319 stats->collisions++;
320 stats->collisions++;
321 }
322 if (unlikely(p->des01.erx.receive_watchdog)) {
323 DBG(KERN_ERR "\treceive_watchdog error\n");
324 x->rx_watchdog++;
325 }
326 if (unlikely(p->des01.erx.error_gmii)) {
327 DBG(KERN_ERR "\tReceive Error\n");
328 x->rx_mii++;
329 }
330 if (unlikely(p->des01.erx.crc_error)) {
331 DBG(KERN_ERR "\tCRC error\n");
332 x->rx_crc++;
333 stats->rx_crc_errors++;
334 }
335 ret = discard_frame;
336 }
337
338 /* After a payload csum error, the ES bit is set.
339 * It doesn't match with the information reported into the databook.
340 * At any rate, we need to understand if the CSUM hw computation is ok
341 * and report this info to the upper layers. */
342 ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
343 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
344
345 if (unlikely(p->des01.erx.dribbling)) {
346 DBG(KERN_ERR "GMAC RX: dribbling error\n");
347 ret = discard_frame;
348 }
349 if (unlikely(p->des01.erx.sa_filter_fail)) {
350 DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
351 x->sa_rx_filter_fail++;
352 ret = discard_frame;
353 }
354 if (unlikely(p->des01.erx.da_filter_fail)) {
355 DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
356 x->da_rx_filter_fail++;
357 ret = discard_frame;
358 }
359 if (unlikely(p->des01.erx.length_error)) {
360 DBG(KERN_ERR "GMAC RX: length_error error\n");
361 x->rx_lenght++;
362 ret = discard_frame;
363 }
364#ifdef STMMAC_VLAN_TAG_USED
365 if (p->des01.erx.vlan_tag) {
366 DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
367 x->rx_vlan++;
368 }
369#endif
370 return ret;
371}
372
373static void gmac_irq_status(unsigned long ioaddr)
374{
375 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
376
377 /* Not used events (e.g. MMC interrupts) are not handled. */
378 if ((intr_status & mmc_tx_irq))
379 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
380 readl(ioaddr + GMAC_MMC_TX_INTR));
381 if (unlikely(intr_status & mmc_rx_irq))
382 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
383 readl(ioaddr + GMAC_MMC_RX_INTR));
384 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
385 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
386 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
387 if (unlikely(intr_status & pmt_irq)) {
388 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
389 /* clear the PMT bits 5 and 6 by reading the PMT
390 * status register. */
391 readl(ioaddr + GMAC_PMT);
392 }
393
394 return;
395}
396
397static void gmac_core_init(unsigned long ioaddr)
398{
399 u32 value = readl(ioaddr + GMAC_CONTROL);
400 value |= GMAC_CORE_INIT;
401 writel(value, ioaddr + GMAC_CONTROL);
402
403 /* STBus Bridge Configuration */
404 /*writel(0xc5608, ioaddr + 0x00007000);*/
405
406 /* Freeze MMC counters */
407 writel(0x8, ioaddr + GMAC_MMC_CTRL);
408 /* Mask GMAC interrupts */
409 writel(0x207, ioaddr + GMAC_INT_MASK);
410
411#ifdef STMMAC_VLAN_TAG_USED
412 /* Tag detection without filtering */
413 writel(0x0, ioaddr + GMAC_VLAN_TAG);
414#endif
415 return;
416}
417
418static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
419 unsigned int reg_n)
420{
421 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
422 GMAC_ADDR_LOW(reg_n));
423}
424
425static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
426 unsigned int reg_n)
427{
428 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
429 GMAC_ADDR_LOW(reg_n));
430}
431
432static void gmac_set_filter(struct net_device *dev)
433{
434 unsigned long ioaddr = dev->base_addr;
435 unsigned int value = 0;
436
437 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
438 __func__, dev->mc_count, dev->uc_count);
439
440 if (dev->flags & IFF_PROMISC)
441 value = GMAC_FRAME_FILTER_PR;
442 else if ((dev->mc_count > HASH_TABLE_SIZE)
443 || (dev->flags & IFF_ALLMULTI)) {
444 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
445 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
446 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
447 } else if (dev->mc_count > 0) {
448 int i;
449 u32 mc_filter[2];
450 struct dev_mc_list *mclist;
451
452 /* Hash filter for multicast */
453 value = GMAC_FRAME_FILTER_HMC;
454
455 memset(mc_filter, 0, sizeof(mc_filter));
456 for (i = 0, mclist = dev->mc_list;
457 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
458 /* The upper 6 bits of the calculated CRC are used to
459 index the contens of the hash table */
460 int bit_nr =
461 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
462 /* The most significant bit determines the register to
463 * use (H/L) while the other 5 bits determine the bit
464 * within the register. */
465 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
466 }
467 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
468 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
469 }
470
471 /* Handle multiple unicast addresses (perfect filtering)*/
472 if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
473 /* Switch to promiscuous mode is more than 16 addrs
474 are required */
475 value |= GMAC_FRAME_FILTER_PR;
476 else {
477 int i;
478 struct dev_addr_list *uc_ptr = dev->uc_list;
479
480 for (i = 0; i < dev->uc_count; i++) {
481 gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
482 i + 1);
483
484 DBG(KERN_INFO "\t%d "
485 "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
486 "%02x\n", i + 1,
487 uc_ptr->da_addr[0], uc_ptr->da_addr[1],
488 uc_ptr->da_addr[2], uc_ptr->da_addr[3],
489 uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
490 uc_ptr = uc_ptr->next;
491 }
492 }
493
494#ifdef FRAME_FILTER_DEBUG
495 /* Enable Receive all mode (to debug filtering_fail errors) */
496 value |= GMAC_FRAME_FILTER_RA;
497#endif
498 writel(value, ioaddr + GMAC_FRAME_FILTER);
499
500 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
501 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
502 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
503
504 return;
505}
506
507static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
508 unsigned int fc, unsigned int pause_time)
509{
510 unsigned int flow = 0;
511
512 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
513 if (fc & FLOW_RX) {
514 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
515 flow |= GMAC_FLOW_CTRL_RFE;
516 }
517 if (fc & FLOW_TX) {
518 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
519 flow |= GMAC_FLOW_CTRL_TFE;
520 }
521
522 if (duplex) {
523 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
524 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
525 }
526
527 writel(flow, ioaddr + GMAC_FLOW_CTRL);
528 return;
529}
530
531static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
532{
533 unsigned int pmt = 0;
534
535 if (mode == WAKE_MAGIC) {
536 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
537 pmt |= power_down | magic_pkt_en;
538 } else if (mode == WAKE_UCAST) {
539 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
540 pmt |= global_unicast;
541 }
542
543 writel(pmt, ioaddr + GMAC_PMT);
544 return;
545}
546
547static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
548 int disable_rx_ic)
549{
550 int i;
551 for (i = 0; i < ring_size; i++) {
552 p->des01.erx.own = 1;
553 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
554 /* To support jumbo frames */
555 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
556 if (i == ring_size - 1)
557 p->des01.erx.end_ring = 1;
558 if (disable_rx_ic)
559 p->des01.erx.disable_ic = 1;
560 p++;
561 }
562 return;
563}
564
565static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
566{
567 int i;
568
569 for (i = 0; i < ring_size; i++) {
570 p->des01.etx.own = 0;
571 if (i == ring_size - 1)
572 p->des01.etx.end_ring = 1;
573 p++;
574 }
575
576 return;
577}
578
579static int gmac_get_tx_owner(struct dma_desc *p)
580{
581 return p->des01.etx.own;
582}
583
584static int gmac_get_rx_owner(struct dma_desc *p)
585{
586 return p->des01.erx.own;
587}
588
589static void gmac_set_tx_owner(struct dma_desc *p)
590{
591 p->des01.etx.own = 1;
592}
593
594static void gmac_set_rx_owner(struct dma_desc *p)
595{
596 p->des01.erx.own = 1;
597}
598
599static int gmac_get_tx_ls(struct dma_desc *p)
600{
601 return p->des01.etx.last_segment;
602}
603
604static void gmac_release_tx_desc(struct dma_desc *p)
605{
606 int ter = p->des01.etx.end_ring;
607
608 memset(p, 0, sizeof(struct dma_desc));
609 p->des01.etx.end_ring = ter;
610
611 return;
612}
613
614static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
615 int csum_flag)
616{
617 p->des01.etx.first_segment = is_fs;
618 if (unlikely(len > BUF_SIZE_4KiB)) {
619 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
620 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
621 } else {
622 p->des01.etx.buffer1_size = len;
623 }
624 if (likely(csum_flag))
625 p->des01.etx.checksum_insertion = cic_full;
626}
627
628static void gmac_clear_tx_ic(struct dma_desc *p)
629{
630 p->des01.etx.interrupt = 0;
631}
632
633static void gmac_close_tx_desc(struct dma_desc *p)
634{
635 p->des01.etx.last_segment = 1;
636 p->des01.etx.interrupt = 1;
637}
638
639static int gmac_get_rx_frame_len(struct dma_desc *p)
640{
641 return p->des01.erx.frame_length;
642}
643
644struct stmmac_ops gmac_driver = {
645 .core_init = gmac_core_init,
646 .dump_mac_regs = gmac_dump_regs,
647 .dma_init = gmac_dma_init,
648 .dump_dma_regs = gmac_dump_dma_regs,
649 .dma_mode = gmac_dma_operation_mode,
650 .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
651 .tx_status = gmac_get_tx_frame_status,
652 .rx_status = gmac_get_rx_frame_status,
653 .get_tx_len = gmac_get_tx_len,
654 .set_filter = gmac_set_filter,
655 .flow_ctrl = gmac_flow_ctrl,
656 .pmt = gmac_pmt,
657 .init_rx_desc = gmac_init_rx_desc,
658 .init_tx_desc = gmac_init_tx_desc,
659 .get_tx_owner = gmac_get_tx_owner,
660 .get_rx_owner = gmac_get_rx_owner,
661 .release_tx_desc = gmac_release_tx_desc,
662 .prepare_tx_desc = gmac_prepare_tx_desc,
663 .clear_tx_ic = gmac_clear_tx_ic,
664 .close_tx_desc = gmac_close_tx_desc,
665 .get_tx_ls = gmac_get_tx_ls,
666 .set_tx_owner = gmac_set_tx_owner,
667 .set_rx_owner = gmac_set_rx_owner,
668 .get_rx_frame_len = gmac_get_rx_frame_len,
669 .host_irq_status = gmac_irq_status,
670 .set_umac_addr = gmac_set_umac_addr,
671 .get_umac_addr = gmac_get_umac_addr,
672};
673
674struct mac_device_info *gmac_setup(unsigned long ioaddr)
675{
676 struct mac_device_info *mac;
677 u32 uid = readl(ioaddr + GMAC_VERSION);
678
679 pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
680 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
681
682 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
683
684 mac->ops = &gmac_driver;
685 mac->hw.pmt = PMT_SUPPORTED;
686 mac->hw.link.port = GMAC_CONTROL_PS;
687 mac->hw.link.duplex = GMAC_CONTROL_DM;
688 mac->hw.link.speed = GMAC_CONTROL_FES;
689 mac->hw.mii.addr = GMAC_MII_ADDR;
690 mac->hw.mii.data = GMAC_MII_DATA;
691
692 return mac;
693}
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/gmac.h
new file mode 100644
index 000000000000..684a363120a9
--- /dev/null
+++ b/drivers/net/stmmac/gmac.h
@@ -0,0 +1,204 @@
1/*******************************************************************************
2 Copyright (C) 2007-2009 STMicroelectronics Ltd
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms and conditions of the GNU General Public License,
6 version 2, as published by the Free Software Foundation.
7
8 This program is distributed in the hope it will be useful, but WITHOUT
9 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 more details.
12
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16
17 The full GNU General Public License is included in this distribution in
18 the file called "COPYING".
19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/
22
23#define GMAC_CONTROL 0x00000000 /* Configuration */
24#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
25#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
26#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
27#define GMAC_MII_ADDR 0x00000010 /* MII Address */
28#define GMAC_MII_DATA 0x00000014 /* MII Data */
29#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
30#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
31#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
32#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
33
34#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
35enum gmac_irq_status {
36 time_stamp_irq = 0x0200,
37 mmc_rx_csum_offload_irq = 0x0080,
38 mmc_tx_irq = 0x0040,
39 mmc_rx_irq = 0x0020,
40 mmc_irq = 0x0010,
41 pmt_irq = 0x0008,
42 pcs_ane_irq = 0x0004,
43 pcs_link_irq = 0x0002,
44 rgmii_irq = 0x0001,
45};
46#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
47
48/* PMT Control and Status */
49#define GMAC_PMT 0x0000002c
50enum power_event {
51 pointer_reset = 0x80000000,
52 global_unicast = 0x00000200,
53 wake_up_rx_frame = 0x00000040,
54 magic_frame = 0x00000020,
55 wake_up_frame_en = 0x00000004,
56 magic_pkt_en = 0x00000002,
57 power_down = 0x00000001,
58};
59
60/* GMAC HW ADDR regs */
61#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8))
62#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8))
63#define GMAC_MAX_UNICAST_ADDRESSES 16
64
65#define GMAC_AN_CTRL 0x000000c0 /* AN control */
66#define GMAC_AN_STATUS 0x000000c4 /* AN status */
67#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
68#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */
69#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
70#define GMAC_TBI 0x000000d4 /* TBI extend status */
71#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */
72
73/* GMAC Configuration defines */
74#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
75#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
76#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
77#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
78#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
79enum inter_frame_gap {
80 GMAC_CONTROL_IFG_88 = 0x00040000,
81 GMAC_CONTROL_IFG_80 = 0x00020000,
82 GMAC_CONTROL_IFG_40 = 0x000e0000,
83};
84#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */
85#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
86#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
87#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
88#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
89#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
90#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
91#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
92#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
93#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */
94#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
95#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
96#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
97
98#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
99 GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE)
100
101/* GMAC Frame Filter defines */
102#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
103#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
104#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
105#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
106#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
107#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
108#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
109#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
110#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
111#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
112/* GMII ADDR defines */
113#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
114#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
115/* GMAC FLOW CTRL defines */
116#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
117#define GMAC_FLOW_CTRL_PT_SHIFT 16
118#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
119#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
120#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
121
122/*--- DMA BLOCK defines ---*/
123/* DMA Bus Mode register defines */
124#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
125#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
126#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
127#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
128/* Programmable burst length (passed thorugh platform)*/
129#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
130#define DMA_BUS_MODE_PBL_SHIFT 8
131
132enum rx_tx_priority_ratio {
133 double_ratio = 0x00004000, /*2:1 */
134 triple_ratio = 0x00008000, /*3:1 */
135 quadruple_ratio = 0x0000c000, /*4:1 */
136};
137
138#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
139#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
140#define DMA_BUS_MODE_RPBL_SHIFT 17
141#define DMA_BUS_MODE_USP 0x00800000
142#define DMA_BUS_MODE_4PBL 0x01000000
143#define DMA_BUS_MODE_AAL 0x02000000
144
145/* DMA CRS Control and Status Register Mapping */
146#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
147#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
148/* DMA Bus Mode register defines */
149#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
150#define DMA_BUS_PR_RATIO_SHIFT 14
151#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
152
153/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
154#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */
155#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
156#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
157/* Theshold for Activating the FC */
158enum rfa {
159 act_full_minus_1 = 0x00800000,
160 act_full_minus_2 = 0x00800200,
161 act_full_minus_3 = 0x00800400,
162 act_full_minus_4 = 0x00800600,
163};
164/* Theshold for Deactivating the FC */
165enum rfd {
166 deac_full_minus_1 = 0x00400000,
167 deac_full_minus_2 = 0x00400800,
168 deac_full_minus_3 = 0x00401000,
169 deac_full_minus_4 = 0x00401800,
170};
171#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
172#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
173
174enum ttc_control {
175 DMA_CONTROL_TTC_64 = 0x00000000,
176 DMA_CONTROL_TTC_128 = 0x00004000,
177 DMA_CONTROL_TTC_192 = 0x00008000,
178 DMA_CONTROL_TTC_256 = 0x0000c000,
179 DMA_CONTROL_TTC_40 = 0x00010000,
180 DMA_CONTROL_TTC_32 = 0x00014000,
181 DMA_CONTROL_TTC_24 = 0x00018000,
182 DMA_CONTROL_TTC_16 = 0x0001c000,
183};
184#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
185
186#define DMA_CONTROL_EFC 0x00000100
187#define DMA_CONTROL_FEF 0x00000080
188#define DMA_CONTROL_FUF 0x00000040
189
190enum rtc_control {
191 DMA_CONTROL_RTC_64 = 0x00000000,
192 DMA_CONTROL_RTC_32 = 0x00000008,
193 DMA_CONTROL_RTC_96 = 0x00000010,
194 DMA_CONTROL_RTC_128 = 0x00000018,
195};
196#define DMA_CONTROL_TC_RX_MASK 0xffffffe7
197
198#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
199
200/* MMC registers offset */
201#define GMAC_MMC_CTRL 0x100
202#define GMAC_MMC_RX_INTR 0x104
203#define GMAC_MMC_TX_INTR 0x108
204#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/mac100.c
new file mode 100644
index 000000000000..625171b6062b
--- /dev/null
+++ b/drivers/net/stmmac/mac100.c
@@ -0,0 +1,517 @@
1/*******************************************************************************
2 This is the driver for the MAC 10/100 on-chip Ethernet controller
3 currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
4
5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
6 this code.
7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9
10 This program is free software; you can redistribute it and/or modify it
11 under the terms and conditions of the GNU General Public License,
12 version 2, as published by the Free Software Foundation.
13
14 This program is distributed in the hope it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22
23 The full GNU General Public License is included in this distribution in
24 the file called "COPYING".
25
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/
28
29#include <linux/netdevice.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/phy.h>
33
34#include "common.h"
35#include "mac100.h"
36
37#undef MAC100_DEBUG
38/*#define MAC100_DEBUG*/
39#ifdef MAC100_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44
45static void mac100_core_init(unsigned long ioaddr)
46{
47 u32 value = readl(ioaddr + MAC_CONTROL);
48
49 writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
50
51#ifdef STMMAC_VLAN_TAG_USED
52 writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
53#endif
54 return;
55}
56
57static void mac100_dump_mac_regs(unsigned long ioaddr)
58{
59 pr_info("\t----------------------------------------------\n"
60 "\t MAC100 CSR (base addr = 0x%8x)\n"
61 "\t----------------------------------------------\n",
62 (unsigned int)ioaddr);
63 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
64 readl(ioaddr + MAC_CONTROL));
65 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
66 readl(ioaddr + MAC_ADDR_HIGH));
67 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
68 readl(ioaddr + MAC_ADDR_LOW));
69 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
70 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
71 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
72 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
73 pr_info("\tflow control (offset 0x%x): 0x%08x\n",
74 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
75 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
76 readl(ioaddr + MAC_VLAN1));
77 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
78 readl(ioaddr + MAC_VLAN2));
79 pr_info("\n\tMAC management counter registers\n");
80 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
81 MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
82 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
83 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
84 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
85 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
86 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
87 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
88 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
89 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
90 return;
91}
92
93static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
94 u32 dma_rx)
95{
96 u32 value = readl(ioaddr + DMA_BUS_MODE);
97 /* DMA SW reset */
98 value |= DMA_BUS_MODE_SFT_RESET;
99 writel(value, ioaddr + DMA_BUS_MODE);
100 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
101
102 /* Enable Application Access by writing to DMA CSR0 */
103 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
104 ioaddr + DMA_BUS_MODE);
105
106 /* Mask interrupts by writing to CSR7 */
107 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
108
109 /* The base address of the RX/TX descriptor lists must be written into
110 * DMA CSR3 and CSR4, respectively. */
111 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
112 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
113
114 return 0;
115}
116
117/* Store and Forward capability is not used at all..
118 * The transmit threshold can be programmed by
119 * setting the TTC bits in the DMA control register.*/
120static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
121 int rxmode)
122{
123 u32 csr6 = readl(ioaddr + DMA_CONTROL);
124
125 if (txmode <= 32)
126 csr6 |= DMA_CONTROL_TTC_32;
127 else if (txmode <= 64)
128 csr6 |= DMA_CONTROL_TTC_64;
129 else
130 csr6 |= DMA_CONTROL_TTC_128;
131
132 writel(csr6, ioaddr + DMA_CONTROL);
133
134 return;
135}
136
137static void mac100_dump_dma_regs(unsigned long ioaddr)
138{
139 int i;
140
141 DBG(KERN_DEBUG "MAC100 DMA CSR \n");
142 for (i = 0; i < 9; i++)
143 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
144 (DMA_BUS_MODE + i * 4),
145 readl(ioaddr + DMA_BUS_MODE + i * 4));
146 DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
147 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
148 DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
149 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
150 return;
151}
152
153/* DMA controller has two counters to track the number of
154 the receive missed frames. */
155static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
156 unsigned long ioaddr)
157{
158 struct net_device_stats *stats = (struct net_device_stats *)data;
159 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
160
161 if (unlikely(csr8)) {
162 if (csr8 & DMA_MISSED_FRAME_OVE) {
163 stats->rx_over_errors += 0x800;
164 x->rx_overflow_cntr += 0x800;
165 } else {
166 unsigned int ove_cntr;
167 ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
168 stats->rx_over_errors += ove_cntr;
169 x->rx_overflow_cntr += ove_cntr;
170 }
171
172 if (csr8 & DMA_MISSED_FRAME_OVE_M) {
173 stats->rx_missed_errors += 0xffff;
174 x->rx_missed_cntr += 0xffff;
175 } else {
176 unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
177 stats->rx_missed_errors += miss_f;
178 x->rx_missed_cntr += miss_f;
179 }
180 }
181 return;
182}
183
184static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
185 struct dma_desc *p, unsigned long ioaddr)
186{
187 int ret = 0;
188 struct net_device_stats *stats = (struct net_device_stats *)data;
189
190 if (unlikely(p->des01.tx.error_summary)) {
191 if (unlikely(p->des01.tx.underflow_error)) {
192 x->tx_underflow++;
193 stats->tx_fifo_errors++;
194 }
195 if (unlikely(p->des01.tx.no_carrier)) {
196 x->tx_carrier++;
197 stats->tx_carrier_errors++;
198 }
199 if (unlikely(p->des01.tx.loss_carrier)) {
200 x->tx_losscarrier++;
201 stats->tx_carrier_errors++;
202 }
203 if (unlikely((p->des01.tx.excessive_deferral) ||
204 (p->des01.tx.excessive_collisions) ||
205 (p->des01.tx.late_collision)))
206 stats->collisions += p->des01.tx.collision_count;
207 ret = -1;
208 }
209 if (unlikely(p->des01.tx.heartbeat_fail)) {
210 x->tx_heartbeat++;
211 stats->tx_heartbeat_errors++;
212 ret = -1;
213 }
214 if (unlikely(p->des01.tx.deferred))
215 x->tx_deferred++;
216
217 return ret;
218}
219
220static int mac100_get_tx_len(struct dma_desc *p)
221{
222 return p->des01.tx.buffer1_size;
223}
224
225/* This function verifies if each incoming frame has some errors
226 * and, if required, updates the multicast statistics.
227 * In case of success, it returns csum_none becasue the device
228 * is not able to compute the csum in HW. */
229static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
230 struct dma_desc *p)
231{
232 int ret = csum_none;
233 struct net_device_stats *stats = (struct net_device_stats *)data;
234
235 if (unlikely(p->des01.rx.last_descriptor == 0)) {
236 pr_warning("mac100 Error: Oversized Ethernet "
237 "frame spanned multiple buffers\n");
238 stats->rx_length_errors++;
239 return discard_frame;
240 }
241
242 if (unlikely(p->des01.rx.error_summary)) {
243 if (unlikely(p->des01.rx.descriptor_error))
244 x->rx_desc++;
245 if (unlikely(p->des01.rx.partial_frame_error))
246 x->rx_partial++;
247 if (unlikely(p->des01.rx.run_frame))
248 x->rx_runt++;
249 if (unlikely(p->des01.rx.frame_too_long))
250 x->rx_toolong++;
251 if (unlikely(p->des01.rx.collision)) {
252 x->rx_collision++;
253 stats->collisions++;
254 }
255 if (unlikely(p->des01.rx.crc_error)) {
256 x->rx_crc++;
257 stats->rx_crc_errors++;
258 }
259 ret = discard_frame;
260 }
261 if (unlikely(p->des01.rx.dribbling))
262 ret = discard_frame;
263
264 if (unlikely(p->des01.rx.length_error)) {
265 x->rx_lenght++;
266 ret = discard_frame;
267 }
268 if (unlikely(p->des01.rx.mii_error)) {
269 x->rx_mii++;
270 ret = discard_frame;
271 }
272 if (p->des01.rx.multicast_frame) {
273 x->rx_multicast++;
274 stats->multicast++;
275 }
276 return ret;
277}
278
279static void mac100_irq_status(unsigned long ioaddr)
280{
281 return;
282}
283
284static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
285 unsigned int reg_n)
286{
287 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
288}
289
290static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
291 unsigned int reg_n)
292{
293 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
294}
295
296static void mac100_set_filter(struct net_device *dev)
297{
298 unsigned long ioaddr = dev->base_addr;
299 u32 value = readl(ioaddr + MAC_CONTROL);
300
301 if (dev->flags & IFF_PROMISC) {
302 value |= MAC_CONTROL_PR;
303 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
304 MAC_CONTROL_HP);
305 } else if ((dev->mc_count > HASH_TABLE_SIZE)
306 || (dev->flags & IFF_ALLMULTI)) {
307 value |= MAC_CONTROL_PM;
308 value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
309 writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
310 writel(0xffffffff, ioaddr + MAC_HASH_LOW);
311 } else if (dev->mc_count == 0) { /* no multicast */
312 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
313 MAC_CONTROL_HO | MAC_CONTROL_HP);
314 } else {
315 int i;
316 u32 mc_filter[2];
317 struct dev_mc_list *mclist;
318
319 /* Perfect filter mode for physical address and Hash
320 filter for multicast */
321 value |= MAC_CONTROL_HP;
322 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
323 | MAC_CONTROL_HO);
324
325 memset(mc_filter, 0, sizeof(mc_filter));
326 for (i = 0, mclist = dev->mc_list;
327 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
328 /* The upper 6 bits of the calculated CRC are used to
329 * index the contens of the hash table */
330 int bit_nr =
331 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
332 /* The most significant bit determines the register to
333 * use (H/L) while the other 5 bits determine the bit
334 * within the register. */
335 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
336 }
337 writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
338 writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
339 }
340
341 writel(value, ioaddr + MAC_CONTROL);
342
343 DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
344 "HI 0x%08x, LO 0x%08x\n",
345 __func__, readl(ioaddr + MAC_CONTROL),
346 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
347 return;
348}
349
350static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
351 unsigned int fc, unsigned int pause_time)
352{
353 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
354
355 if (duplex)
356 flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
357 writel(flow, ioaddr + MAC_FLOW_CTRL);
358
359 return;
360}
361
362/* No PMT module supported in our SoC for the Ethernet Controller. */
363static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
364{
365 return;
366}
367
368static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
369 int disable_rx_ic)
370{
371 int i;
372 for (i = 0; i < ring_size; i++) {
373 p->des01.rx.own = 1;
374 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
375 if (i == ring_size - 1)
376 p->des01.rx.end_ring = 1;
377 if (disable_rx_ic)
378 p->des01.rx.disable_ic = 1;
379 p++;
380 }
381 return;
382}
383
384static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
385{
386 int i;
387 for (i = 0; i < ring_size; i++) {
388 p->des01.tx.own = 0;
389 if (i == ring_size - 1)
390 p->des01.tx.end_ring = 1;
391 p++;
392 }
393 return;
394}
395
396static int mac100_get_tx_owner(struct dma_desc *p)
397{
398 return p->des01.tx.own;
399}
400
401static int mac100_get_rx_owner(struct dma_desc *p)
402{
403 return p->des01.rx.own;
404}
405
406static void mac100_set_tx_owner(struct dma_desc *p)
407{
408 p->des01.tx.own = 1;
409}
410
411static void mac100_set_rx_owner(struct dma_desc *p)
412{
413 p->des01.rx.own = 1;
414}
415
416static int mac100_get_tx_ls(struct dma_desc *p)
417{
418 return p->des01.tx.last_segment;
419}
420
421static void mac100_release_tx_desc(struct dma_desc *p)
422{
423 int ter = p->des01.tx.end_ring;
424
425 /* clean field used within the xmit */
426 p->des01.tx.first_segment = 0;
427 p->des01.tx.last_segment = 0;
428 p->des01.tx.buffer1_size = 0;
429
430 /* clean status reported */
431 p->des01.tx.error_summary = 0;
432 p->des01.tx.underflow_error = 0;
433 p->des01.tx.no_carrier = 0;
434 p->des01.tx.loss_carrier = 0;
435 p->des01.tx.excessive_deferral = 0;
436 p->des01.tx.excessive_collisions = 0;
437 p->des01.tx.late_collision = 0;
438 p->des01.tx.heartbeat_fail = 0;
439 p->des01.tx.deferred = 0;
440
441 /* set termination field */
442 p->des01.tx.end_ring = ter;
443
444 return;
445}
446
447static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
448 int csum_flag)
449{
450 p->des01.tx.first_segment = is_fs;
451 p->des01.tx.buffer1_size = len;
452}
453
454static void mac100_clear_tx_ic(struct dma_desc *p)
455{
456 p->des01.tx.interrupt = 0;
457}
458
459static void mac100_close_tx_desc(struct dma_desc *p)
460{
461 p->des01.tx.last_segment = 1;
462 p->des01.tx.interrupt = 1;
463}
464
465static int mac100_get_rx_frame_len(struct dma_desc *p)
466{
467 return p->des01.rx.frame_length;
468}
469
470struct stmmac_ops mac100_driver = {
471 .core_init = mac100_core_init,
472 .dump_mac_regs = mac100_dump_mac_regs,
473 .dma_init = mac100_dma_init,
474 .dump_dma_regs = mac100_dump_dma_regs,
475 .dma_mode = mac100_dma_operation_mode,
476 .dma_diagnostic_fr = mac100_dma_diagnostic_fr,
477 .tx_status = mac100_get_tx_frame_status,
478 .rx_status = mac100_get_rx_frame_status,
479 .get_tx_len = mac100_get_tx_len,
480 .set_filter = mac100_set_filter,
481 .flow_ctrl = mac100_flow_ctrl,
482 .pmt = mac100_pmt,
483 .init_rx_desc = mac100_init_rx_desc,
484 .init_tx_desc = mac100_init_tx_desc,
485 .get_tx_owner = mac100_get_tx_owner,
486 .get_rx_owner = mac100_get_rx_owner,
487 .release_tx_desc = mac100_release_tx_desc,
488 .prepare_tx_desc = mac100_prepare_tx_desc,
489 .clear_tx_ic = mac100_clear_tx_ic,
490 .close_tx_desc = mac100_close_tx_desc,
491 .get_tx_ls = mac100_get_tx_ls,
492 .set_tx_owner = mac100_set_tx_owner,
493 .set_rx_owner = mac100_set_rx_owner,
494 .get_rx_frame_len = mac100_get_rx_frame_len,
495 .host_irq_status = mac100_irq_status,
496 .set_umac_addr = mac100_set_umac_addr,
497 .get_umac_addr = mac100_get_umac_addr,
498};
499
500struct mac_device_info *mac100_setup(unsigned long ioaddr)
501{
502 struct mac_device_info *mac;
503
504 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
505
506 pr_info("\tMAC 10/100\n");
507
508 mac->ops = &mac100_driver;
509 mac->hw.pmt = PMT_NOT_SUPPORTED;
510 mac->hw.link.port = MAC_CONTROL_PS;
511 mac->hw.link.duplex = MAC_CONTROL_F;
512 mac->hw.link.speed = 0;
513 mac->hw.mii.addr = MAC_MII_ADDR;
514 mac->hw.mii.data = MAC_MII_DATA;
515
516 return mac;
517}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/mac100.h
new file mode 100644
index 000000000000..0f8f110d004a
--- /dev/null
+++ b/drivers/net/stmmac/mac100.h
@@ -0,0 +1,116 @@
1/*******************************************************************************
2 MAC 10/100 Header File
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25/*----------------------------------------------------------------------------
26 * MAC BLOCK defines
27 *---------------------------------------------------------------------------*/
28/* MAC CSR offset */
29#define MAC_CONTROL 0x00000000 /* MAC Control */
30#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */
31#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */
32#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */
33#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */
34#define MAC_MII_ADDR 0x00000014 /* MII Address */
35#define MAC_MII_DATA 0x00000018 /* MII Data */
36#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */
37#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */
38#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
39
40/* MAC CTRL defines */
41#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
42#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
43#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
44#define MAC_CONTROL_PS 0x08000000 /* Port Select */
45#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
46#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
47#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
48#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
49#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
50#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
51#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
52#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
53#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
54#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
55#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
56#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
57#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
58#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
59#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
60#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
61#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
62#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
63#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
64#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
65#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
66
67#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
68
69/* MAC FLOW CTRL defines */
70#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
71#define MAC_FLOW_CTRL_PT_SHIFT 16
72#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
73#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
74#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
75
76/* MII ADDR defines */
77#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
78#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
79
80/*----------------------------------------------------------------------------
81 * DMA BLOCK defines
82 *---------------------------------------------------------------------------*/
83
84/* DMA Bus Mode register defines */
85#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
86#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
87#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
88#define DMA_BUS_MODE_PBL_SHIFT 8
89#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
90#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
91#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
92#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
93#define DMA_BUS_MODE_DEFAULT 0x00000000
94
95/* DMA Control register defines */
96#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
97
98/* Transmit Threshold Control */
99enum ttc_control {
100 DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
101 DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */
102 DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */
103 DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */
104 DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */
105 DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */
106 DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */
107 DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */
108 DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */
109 DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */
110};
111
112/* STMAC110 DMA Missed Frame Counter register defines */
113#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */
114#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
115#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
116#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
new file mode 100644
index 000000000000..6d2eae3040e5
--- /dev/null
+++ b/drivers/net/stmmac/stmmac.h
@@ -0,0 +1,98 @@
1/*******************************************************************************
2 Copyright (C) 2007-2009 STMicroelectronics Ltd
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms and conditions of the GNU General Public License,
6 version 2, as published by the Free Software Foundation.
7
8 This program is distributed in the hope it will be useful, but WITHOUT
9 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 more details.
12
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16
17 The full GNU General Public License is included in this distribution in
18 the file called "COPYING".
19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/
22
23#define DRV_MODULE_VERSION "Oct_09"
24
25#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
26#define STMMAC_VLAN_TAG_USED
27#include <linux/if_vlan.h>
28#endif
29
30#include "common.h"
31#ifdef CONFIG_STMMAC_TIMER
32#include "stmmac_timer.h"
33#endif
34
35struct stmmac_priv {
36 /* Frequently used values are kept adjacent for cache effect */
37 struct dma_desc *dma_tx ____cacheline_aligned;
38 dma_addr_t dma_tx_phy;
39 struct sk_buff **tx_skbuff;
40 unsigned int cur_tx;
41 unsigned int dirty_tx;
42 unsigned int dma_tx_size;
43 int tx_coe;
44 int tx_coalesce;
45
46 struct dma_desc *dma_rx ;
47 unsigned int cur_rx;
48 unsigned int dirty_rx;
49 struct sk_buff **rx_skbuff;
50 dma_addr_t *rx_skbuff_dma;
51 struct sk_buff_head rx_recycle;
52
53 struct net_device *dev;
54 int is_gmac;
55 dma_addr_t dma_rx_phy;
56 unsigned int dma_rx_size;
57 int rx_csum;
58 unsigned int dma_buf_sz;
59 struct device *device;
60 struct mac_device_info *mac_type;
61
62 struct stmmac_extra_stats xstats;
63 struct napi_struct napi;
64
65 phy_interface_t phy_interface;
66 int pbl;
67 int bus_id;
68 int phy_addr;
69 int phy_mask;
70 int (*phy_reset) (void *priv);
71 void (*fix_mac_speed) (void *priv, unsigned int speed);
72 void *bsp_priv;
73
74 int phy_irq;
75 struct phy_device *phydev;
76 int oldlink;
77 int speed;
78 int oldduplex;
79 unsigned int flow_ctrl;
80 unsigned int pause;
81 struct mii_bus *mii;
82
83 u32 msg_enable;
84 spinlock_t lock;
85 int wolopts;
86 int wolenabled;
87 int shutdown;
88#ifdef CONFIG_STMMAC_TIMER
89 struct stmmac_timer *tm;
90#endif
91#ifdef STMMAC_VLAN_TAG_USED
92 struct vlan_group *vlgrp;
93#endif
94};
95
96extern int stmmac_mdio_unregister(struct net_device *ndev);
97extern int stmmac_mdio_register(struct net_device *ndev);
98extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
new file mode 100644
index 000000000000..694ebe6a0758
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -0,0 +1,395 @@
1/*******************************************************************************
2 STMMAC Ethtool support
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/mii.h>
28#include <linux/phy.h>
29
30#include "stmmac.h"
31
32#define REG_SPACE_SIZE 0x1054
33#define MAC100_ETHTOOL_NAME "st_mac100"
34#define GMAC_ETHTOOL_NAME "st_gmac"
35
36struct stmmac_stats {
37 char stat_string[ETH_GSTRING_LEN];
38 int sizeof_stat;
39 int stat_offset;
40};
41
42#define STMMAC_STAT(m) \
43 { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
44 offsetof(struct stmmac_priv, xstats.m)}
45
46static const struct stmmac_stats stmmac_gstrings_stats[] = {
47 STMMAC_STAT(tx_underflow),
48 STMMAC_STAT(tx_carrier),
49 STMMAC_STAT(tx_losscarrier),
50 STMMAC_STAT(tx_heartbeat),
51 STMMAC_STAT(tx_deferred),
52 STMMAC_STAT(tx_vlan),
53 STMMAC_STAT(rx_vlan),
54 STMMAC_STAT(tx_jabber),
55 STMMAC_STAT(tx_frame_flushed),
56 STMMAC_STAT(tx_payload_error),
57 STMMAC_STAT(tx_ip_header_error),
58 STMMAC_STAT(rx_desc),
59 STMMAC_STAT(rx_partial),
60 STMMAC_STAT(rx_runt),
61 STMMAC_STAT(rx_toolong),
62 STMMAC_STAT(rx_collision),
63 STMMAC_STAT(rx_crc),
64 STMMAC_STAT(rx_lenght),
65 STMMAC_STAT(rx_mii),
66 STMMAC_STAT(rx_multicast),
67 STMMAC_STAT(rx_gmac_overflow),
68 STMMAC_STAT(rx_watchdog),
69 STMMAC_STAT(da_rx_filter_fail),
70 STMMAC_STAT(sa_rx_filter_fail),
71 STMMAC_STAT(rx_missed_cntr),
72 STMMAC_STAT(rx_overflow_cntr),
73 STMMAC_STAT(tx_undeflow_irq),
74 STMMAC_STAT(tx_process_stopped_irq),
75 STMMAC_STAT(tx_jabber_irq),
76 STMMAC_STAT(rx_overflow_irq),
77 STMMAC_STAT(rx_buf_unav_irq),
78 STMMAC_STAT(rx_process_stopped_irq),
79 STMMAC_STAT(rx_watchdog_irq),
80 STMMAC_STAT(tx_early_irq),
81 STMMAC_STAT(fatal_bus_error_irq),
82 STMMAC_STAT(threshold),
83 STMMAC_STAT(tx_pkt_n),
84 STMMAC_STAT(rx_pkt_n),
85 STMMAC_STAT(poll_n),
86 STMMAC_STAT(sched_timer_n),
87 STMMAC_STAT(normal_irq_n),
88};
89#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
90
91void stmmac_ethtool_getdrvinfo(struct net_device *dev,
92 struct ethtool_drvinfo *info)
93{
94 struct stmmac_priv *priv = netdev_priv(dev);
95
96 if (!priv->is_gmac)
97 strcpy(info->driver, MAC100_ETHTOOL_NAME);
98 else
99 strcpy(info->driver, GMAC_ETHTOOL_NAME);
100
101 strcpy(info->version, DRV_MODULE_VERSION);
102 info->fw_version[0] = '\0';
103 info->n_stats = STMMAC_STATS_LEN;
104 return;
105}
106
107int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
108{
109 struct stmmac_priv *priv = netdev_priv(dev);
110 struct phy_device *phy = priv->phydev;
111 int rc;
112 if (phy == NULL) {
113 pr_err("%s: %s: PHY is not registered\n",
114 __func__, dev->name);
115 return -ENODEV;
116 }
117 if (!netif_running(dev)) {
118 pr_err("%s: interface is disabled: we cannot track "
119 "link speed / duplex setting\n", dev->name);
120 return -EBUSY;
121 }
122 cmd->transceiver = XCVR_INTERNAL;
123 spin_lock_irq(&priv->lock);
124 rc = phy_ethtool_gset(phy, cmd);
125 spin_unlock_irq(&priv->lock);
126 return rc;
127}
128
129int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
130{
131 struct stmmac_priv *priv = netdev_priv(dev);
132 struct phy_device *phy = priv->phydev;
133 int rc;
134
135 spin_lock(&priv->lock);
136 rc = phy_ethtool_sset(phy, cmd);
137 spin_unlock(&priv->lock);
138
139 return rc;
140}
141
142u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
143{
144 struct stmmac_priv *priv = netdev_priv(dev);
145 return priv->msg_enable;
146}
147
148void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
149{
150 struct stmmac_priv *priv = netdev_priv(dev);
151 priv->msg_enable = level;
152
153}
154
155int stmmac_check_if_running(struct net_device *dev)
156{
157 if (!netif_running(dev))
158 return -EBUSY;
159 return 0;
160}
161
162int stmmac_ethtool_get_regs_len(struct net_device *dev)
163{
164 return REG_SPACE_SIZE;
165}
166
167void stmmac_ethtool_gregs(struct net_device *dev,
168 struct ethtool_regs *regs, void *space)
169{
170 int i;
171 u32 *reg_space = (u32 *) space;
172
173 struct stmmac_priv *priv = netdev_priv(dev);
174
175 memset(reg_space, 0x0, REG_SPACE_SIZE);
176
177 if (!priv->is_gmac) {
178 /* MAC registers */
179 for (i = 0; i < 12; i++)
180 reg_space[i] = readl(dev->base_addr + (i * 4));
181 /* DMA registers */
182 for (i = 0; i < 9; i++)
183 reg_space[i + 12] =
184 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
185 reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR);
186 reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR);
187 } else {
188 /* MAC registers */
189 for (i = 0; i < 55; i++)
190 reg_space[i] = readl(dev->base_addr + (i * 4));
191 /* DMA registers */
192 for (i = 0; i < 22; i++)
193 reg_space[i + 55] =
194 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
195 }
196
197 return;
198}
199
200int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
201{
202 if (data)
203 netdev->features |= NETIF_F_HW_CSUM;
204 else
205 netdev->features &= ~NETIF_F_HW_CSUM;
206
207 return 0;
208}
209
210u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
211{
212 struct stmmac_priv *priv = netdev_priv(dev);
213
214 return priv->rx_csum;
215}
216
217static void
218stmmac_get_pauseparam(struct net_device *netdev,
219 struct ethtool_pauseparam *pause)
220{
221 struct stmmac_priv *priv = netdev_priv(netdev);
222
223 spin_lock(&priv->lock);
224
225 pause->rx_pause = 0;
226 pause->tx_pause = 0;
227 pause->autoneg = priv->phydev->autoneg;
228
229 if (priv->flow_ctrl & FLOW_RX)
230 pause->rx_pause = 1;
231 if (priv->flow_ctrl & FLOW_TX)
232 pause->tx_pause = 1;
233
234 spin_unlock(&priv->lock);
235 return;
236}
237
238static int
239stmmac_set_pauseparam(struct net_device *netdev,
240 struct ethtool_pauseparam *pause)
241{
242 struct stmmac_priv *priv = netdev_priv(netdev);
243 struct phy_device *phy = priv->phydev;
244 int new_pause = FLOW_OFF;
245 int ret = 0;
246
247 spin_lock(&priv->lock);
248
249 if (pause->rx_pause)
250 new_pause |= FLOW_RX;
251 if (pause->tx_pause)
252 new_pause |= FLOW_TX;
253
254 priv->flow_ctrl = new_pause;
255
256 if (phy->autoneg) {
257 if (netif_running(netdev)) {
258 struct ethtool_cmd cmd;
259 /* auto-negotiation automatically restarted */
260 cmd.cmd = ETHTOOL_NWAY_RST;
261 cmd.supported = phy->supported;
262 cmd.advertising = phy->advertising;
263 cmd.autoneg = phy->autoneg;
264 cmd.speed = phy->speed;
265 cmd.duplex = phy->duplex;
266 cmd.phy_address = phy->addr;
267 ret = phy_ethtool_sset(phy, &cmd);
268 }
269 } else {
270 unsigned long ioaddr = netdev->base_addr;
271 priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
272 priv->flow_ctrl, priv->pause);
273 }
274 spin_unlock(&priv->lock);
275 return ret;
276}
277
278static void stmmac_get_ethtool_stats(struct net_device *dev,
279 struct ethtool_stats *dummy, u64 *data)
280{
281 struct stmmac_priv *priv = netdev_priv(dev);
282 unsigned long ioaddr = dev->base_addr;
283 int i;
284
285 /* Update HW stats if supported */
286 priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
287 ioaddr);
288
289 for (i = 0; i < STMMAC_STATS_LEN; i++) {
290 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
291 data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
292 sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
293 }
294
295 return;
296}
297
298static int stmmac_get_sset_count(struct net_device *netdev, int sset)
299{
300 switch (sset) {
301 case ETH_SS_STATS:
302 return STMMAC_STATS_LEN;
303 default:
304 return -EOPNOTSUPP;
305 }
306}
307
308static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
309{
310 int i;
311 u8 *p = data;
312
313 switch (stringset) {
314 case ETH_SS_STATS:
315 for (i = 0; i < STMMAC_STATS_LEN; i++) {
316 memcpy(p, stmmac_gstrings_stats[i].stat_string,
317 ETH_GSTRING_LEN);
318 p += ETH_GSTRING_LEN;
319 }
320 break;
321 default:
322 WARN_ON(1);
323 break;
324 }
325 return;
326}
327
328/* Currently only support WOL through Magic packet. */
329static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
330{
331 struct stmmac_priv *priv = netdev_priv(dev);
332
333 spin_lock_irq(&priv->lock);
334 if (priv->wolenabled == PMT_SUPPORTED) {
335 wol->supported = WAKE_MAGIC;
336 wol->wolopts = priv->wolopts;
337 }
338 spin_unlock_irq(&priv->lock);
339}
340
341static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
342{
343 struct stmmac_priv *priv = netdev_priv(dev);
344 u32 support = WAKE_MAGIC;
345
346 if (priv->wolenabled == PMT_NOT_SUPPORTED)
347 return -EINVAL;
348
349 if (wol->wolopts & ~support)
350 return -EINVAL;
351
352 if (wol->wolopts == 0)
353 device_set_wakeup_enable(priv->device, 0);
354 else
355 device_set_wakeup_enable(priv->device, 1);
356
357 spin_lock_irq(&priv->lock);
358 priv->wolopts = wol->wolopts;
359 spin_unlock_irq(&priv->lock);
360
361 return 0;
362}
363
364static struct ethtool_ops stmmac_ethtool_ops = {
365 .begin = stmmac_check_if_running,
366 .get_drvinfo = stmmac_ethtool_getdrvinfo,
367 .get_settings = stmmac_ethtool_getsettings,
368 .set_settings = stmmac_ethtool_setsettings,
369 .get_msglevel = stmmac_ethtool_getmsglevel,
370 .set_msglevel = stmmac_ethtool_setmsglevel,
371 .get_regs = stmmac_ethtool_gregs,
372 .get_regs_len = stmmac_ethtool_get_regs_len,
373 .get_link = ethtool_op_get_link,
374 .get_rx_csum = stmmac_ethtool_get_rx_csum,
375 .get_tx_csum = ethtool_op_get_tx_csum,
376 .set_tx_csum = stmmac_ethtool_set_tx_csum,
377 .get_sg = ethtool_op_get_sg,
378 .set_sg = ethtool_op_set_sg,
379 .get_pauseparam = stmmac_get_pauseparam,
380 .set_pauseparam = stmmac_set_pauseparam,
381 .get_ethtool_stats = stmmac_get_ethtool_stats,
382 .get_strings = stmmac_get_strings,
383 .get_wol = stmmac_get_wol,
384 .set_wol = stmmac_set_wol,
385 .get_sset_count = stmmac_get_sset_count,
386#ifdef NETIF_F_TSO
387 .get_tso = ethtool_op_get_tso,
388 .set_tso = ethtool_op_set_tso,
389#endif
390};
391
392void stmmac_set_ethtool_ops(struct net_device *netdev)
393{
394 SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
395}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
new file mode 100644
index 000000000000..c2f14dc9ba28
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -0,0 +1,2204 @@
1/*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
5 Copyright (C) 2007-2009 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24
25 Documentation available at:
26 http://www.stlinux.com
27 Support available at:
28 https://bugzilla.stlinux.com/
29*******************************************************************************/
30
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/kernel.h>
34#include <linux/interrupt.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/platform_device.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/skbuff.h>
41#include <linux/ethtool.h>
42#include <linux/if_ether.h>
43#include <linux/crc32.h>
44#include <linux/mii.h>
45#include <linux/phy.h>
46#include <linux/if_vlan.h>
47#include <linux/dma-mapping.h>
48#include <linux/stm/soc.h>
49#include "stmmac.h"
50
51#define STMMAC_RESOURCE_NAME "stmmaceth"
52#define PHY_RESOURCE_NAME "stmmacphy"
53
54#undef STMMAC_DEBUG
55/*#define STMMAC_DEBUG*/
56#ifdef STMMAC_DEBUG
57#define DBG(nlevel, klevel, fmt, args...) \
58 ((void)(netif_msg_##nlevel(priv) && \
59 printk(KERN_##klevel fmt, ## args)))
60#else
61#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
62#endif
63
64#undef STMMAC_RX_DEBUG
65/*#define STMMAC_RX_DEBUG*/
66#ifdef STMMAC_RX_DEBUG
67#define RX_DBG(fmt, args...) printk(fmt, ## args)
68#else
69#define RX_DBG(fmt, args...) do { } while (0)
70#endif
71
72#undef STMMAC_XMIT_DEBUG
73/*#define STMMAC_XMIT_DEBUG*/
74#ifdef STMMAC_TX_DEBUG
75#define TX_DBG(fmt, args...) printk(fmt, ## args)
76#else
77#define TX_DBG(fmt, args...) do { } while (0)
78#endif
79
80#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
81#define JUMBO_LEN 9000
82
83/* Module parameters */
84#define TX_TIMEO 5000 /* default 5 seconds */
85static int watchdog = TX_TIMEO;
86module_param(watchdog, int, S_IRUGO | S_IWUSR);
87MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
88
89static int debug = -1; /* -1: default, 0: no output, 16: all */
90module_param(debug, int, S_IRUGO | S_IWUSR);
91MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
92
93static int phyaddr = -1;
94module_param(phyaddr, int, S_IRUGO);
95MODULE_PARM_DESC(phyaddr, "Physical device address");
96
97#define DMA_TX_SIZE 256
98static int dma_txsize = DMA_TX_SIZE;
99module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
101
102#define DMA_RX_SIZE 256
103static int dma_rxsize = DMA_RX_SIZE;
104module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
106
107static int flow_ctrl = FLOW_OFF;
108module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
110
111static int pause = PAUSE_TIME;
112module_param(pause, int, S_IRUGO | S_IWUSR);
113MODULE_PARM_DESC(pause, "Flow Control Pause Time");
114
115#define TC_DEFAULT 64
116static int tc = TC_DEFAULT;
117module_param(tc, int, S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(tc, "DMA threshold control value");
119
120#define RX_NO_COALESCE 1 /* Always interrupt on completion */
121#define TX_NO_COALESCE -1 /* No moderation by default */
122
123/* Pay attention to tune this parameter; take care of both
124 * hardware capability and network stabitily/performance impact.
125 * Many tests showed that ~4ms latency seems to be good enough. */
126#ifdef CONFIG_STMMAC_TIMER
127#define DEFAULT_PERIODIC_RATE 256
128static int tmrate = DEFAULT_PERIODIC_RATE;
129module_param(tmrate, int, S_IRUGO | S_IWUSR);
130MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)");
131#endif
132
133#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
134static int buf_sz = DMA_BUFFER_SIZE;
135module_param(buf_sz, int, S_IRUGO | S_IWUSR);
136MODULE_PARM_DESC(buf_sz, "DMA buffer size");
137
138/* In case of Giga ETH, we can enable/disable the COE for the
139 * transmit HW checksum computation.
140 * Note that, if tx csum is off in HW, SG will be still supported. */
141static int tx_coe = HW_CSUM;
142module_param(tx_coe, int, S_IRUGO | S_IWUSR);
143MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
144
145static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
146 NETIF_MSG_LINK | NETIF_MSG_IFUP |
147 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
148
149static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
150static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
151
152/**
153 * stmmac_verify_args - verify the driver parameters.
154 * Description: it verifies if some wrong parameter is passed to the driver.
155 * Note that wrong parameters are replaced with the default values.
156 */
157static void stmmac_verify_args(void)
158{
159 if (unlikely(watchdog < 0))
160 watchdog = TX_TIMEO;
161 if (unlikely(dma_rxsize < 0))
162 dma_rxsize = DMA_RX_SIZE;
163 if (unlikely(dma_txsize < 0))
164 dma_txsize = DMA_TX_SIZE;
165 if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
166 buf_sz = DMA_BUFFER_SIZE;
167 if (unlikely(flow_ctrl > 1))
168 flow_ctrl = FLOW_AUTO;
169 else if (likely(flow_ctrl < 0))
170 flow_ctrl = FLOW_OFF;
171 if (unlikely((pause < 0) || (pause > 0xffff)))
172 pause = PAUSE_TIME;
173
174 return;
175}
176
177#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
178static void print_pkt(unsigned char *buf, int len)
179{
180 int j;
181 pr_info("len = %d byte, buf addr: 0x%p", len, buf);
182 for (j = 0; j < len; j++) {
183 if ((j % 16) == 0)
184 pr_info("\n %03x:", j);
185 pr_info(" %02x", buf[j]);
186 }
187 pr_info("\n");
188 return;
189}
190#endif
191
192/* minimum number of free TX descriptors required to wake up TX process */
193#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
194
195static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
196{
197 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
198}
199
200/**
201 * stmmac_adjust_link
202 * @dev: net device structure
203 * Description: it adjusts the link parameters.
204 */
205static void stmmac_adjust_link(struct net_device *dev)
206{
207 struct stmmac_priv *priv = netdev_priv(dev);
208 struct phy_device *phydev = priv->phydev;
209 unsigned long ioaddr = dev->base_addr;
210 unsigned long flags;
211 int new_state = 0;
212 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
213
214 if (phydev == NULL)
215 return;
216
217 DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
218 phydev->addr, phydev->link);
219
220 spin_lock_irqsave(&priv->lock, flags);
221 if (phydev->link) {
222 u32 ctrl = readl(ioaddr + MAC_CTRL_REG);
223
224 /* Now we make sure that we can be in full duplex mode.
225 * If not, we operate in half-duplex mode. */
226 if (phydev->duplex != priv->oldduplex) {
227 new_state = 1;
228 if (!(phydev->duplex))
229 ctrl &= ~priv->mac_type->hw.link.duplex;
230 else
231 ctrl |= priv->mac_type->hw.link.duplex;
232 priv->oldduplex = phydev->duplex;
233 }
234 /* Flow Control operation */
235 if (phydev->pause)
236 priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
237 fc, pause_time);
238
239 if (phydev->speed != priv->speed) {
240 new_state = 1;
241 switch (phydev->speed) {
242 case 1000:
243 if (likely(priv->is_gmac))
244 ctrl &= ~priv->mac_type->hw.link.port;
245 break;
246 case 100:
247 case 10:
248 if (priv->is_gmac) {
249 ctrl |= priv->mac_type->hw.link.port;
250 if (phydev->speed == SPEED_100) {
251 ctrl |=
252 priv->mac_type->hw.link.
253 speed;
254 } else {
255 ctrl &=
256 ~(priv->mac_type->hw.
257 link.speed);
258 }
259 } else {
260 ctrl &= ~priv->mac_type->hw.link.port;
261 }
262 priv->fix_mac_speed(priv->bsp_priv,
263 phydev->speed);
264 break;
265 default:
266 if (netif_msg_link(priv))
267 pr_warning("%s: Speed (%d) is not 10"
268 " or 100!\n", dev->name, phydev->speed);
269 break;
270 }
271
272 priv->speed = phydev->speed;
273 }
274
275 writel(ctrl, ioaddr + MAC_CTRL_REG);
276
277 if (!priv->oldlink) {
278 new_state = 1;
279 priv->oldlink = 1;
280 }
281 } else if (priv->oldlink) {
282 new_state = 1;
283 priv->oldlink = 0;
284 priv->speed = 0;
285 priv->oldduplex = -1;
286 }
287
288 if (new_state && netif_msg_link(priv))
289 phy_print_status(phydev);
290
291 spin_unlock_irqrestore(&priv->lock, flags);
292
293 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
294}
295
296/**
297 * stmmac_init_phy - PHY initialization
298 * @dev: net device structure
299 * Description: it initializes the driver's PHY state, and attaches the PHY
300 * to the mac driver.
301 * Return value:
302 * 0 on success
303 */
304static int stmmac_init_phy(struct net_device *dev)
305{
306 struct stmmac_priv *priv = netdev_priv(dev);
307 struct phy_device *phydev;
308 char phy_id[BUS_ID_SIZE]; /* PHY to connect */
309 char bus_id[BUS_ID_SIZE];
310
311 priv->oldlink = 0;
312 priv->speed = 0;
313 priv->oldduplex = -1;
314
315 if (priv->phy_addr == -1) {
316 /* We don't have a PHY, so do nothing */
317 return 0;
318 }
319
320 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
321 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
322 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
323
324 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
325 priv->phy_interface);
326
327 if (IS_ERR(phydev)) {
328 pr_err("%s: Could not attach to PHY\n", dev->name);
329 return PTR_ERR(phydev);
330 }
331
332 /*
333 * Broken HW is sometimes missing the pull-up resistor on the
334 * MDIO line, which results in reads to non-existent devices returning
335 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
336 * device as well.
337 * Note: phydev->phy_id is the result of reading the UID PHY registers.
338 */
339 if (phydev->phy_id == 0) {
340 phy_disconnect(phydev);
341 return -ENODEV;
342 }
343 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
344 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
345
346 priv->phydev = phydev;
347
348 return 0;
349}
350
351static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
352{
353 u32 value = readl(ioaddr + MAC_CTRL_REG);
354 value |= MAC_RNABLE_RX;
355 /* Set the RE (receive enable bit into the MAC CTRL register). */
356 writel(value, ioaddr + MAC_CTRL_REG);
357}
358
359static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
360{
361 u32 value = readl(ioaddr + MAC_CTRL_REG);
362 value |= MAC_ENABLE_TX;
363 /* Set the TE (transmit enable bit into the MAC CTRL register). */
364 writel(value, ioaddr + MAC_CTRL_REG);
365}
366
367static inline void stmmac_mac_disable_rx(unsigned long ioaddr)
368{
369 u32 value = readl(ioaddr + MAC_CTRL_REG);
370 value &= ~MAC_RNABLE_RX;
371 writel(value, ioaddr + MAC_CTRL_REG);
372}
373
374static inline void stmmac_mac_disable_tx(unsigned long ioaddr)
375{
376 u32 value = readl(ioaddr + MAC_CTRL_REG);
377 value &= ~MAC_ENABLE_TX;
378 writel(value, ioaddr + MAC_CTRL_REG);
379}
380
381/**
382 * display_ring
383 * @p: pointer to the ring.
384 * @size: size of the ring.
385 * Description: display all the descriptors within the ring.
386 */
387static void display_ring(struct dma_desc *p, int size)
388{
389 struct tmp_s {
390 u64 a;
391 unsigned int b;
392 unsigned int c;
393 };
394 int i;
395 for (i = 0; i < size; i++) {
396 struct tmp_s *x = (struct tmp_s *)(p + i);
397 pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
398 i, (unsigned int)virt_to_phys(&p[i]),
399 (unsigned int)(x->a), (unsigned int)((x->a) >> 32),
400 x->b, x->c);
401 pr_info("\n");
402 }
403}
404
405/**
406 * init_dma_desc_rings - init the RX/TX descriptor rings
407 * @dev: net device structure
408 * Description: this function initializes the DMA RX/TX descriptors
409 * and allocates the socket buffers.
410 */
411static void init_dma_desc_rings(struct net_device *dev)
412{
413 int i;
414 struct stmmac_priv *priv = netdev_priv(dev);
415 struct sk_buff *skb;
416 unsigned int txsize = priv->dma_tx_size;
417 unsigned int rxsize = priv->dma_rx_size;
418 unsigned int bfsize = priv->dma_buf_sz;
419 int buff2_needed = 0;
420 int dis_ic = 0;
421
422#ifdef CONFIG_STMMAC_TIMER
423 /* Using Timers disable interrupts on completion for the reception */
424 dis_ic = 1;
425#endif
426 /* Set the Buffer size according to the MTU;
427 * indeed, in case of jumbo we need to bump-up the buffer sizes.
428 */
429 if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
430 bfsize = BUF_SIZE_16KiB;
431 else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
432 bfsize = BUF_SIZE_8KiB;
433 else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
434 bfsize = BUF_SIZE_4KiB;
435 else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
436 bfsize = BUF_SIZE_2KiB;
437 else
438 bfsize = DMA_BUFFER_SIZE;
439
440 /* If the MTU exceeds 8k so use the second buffer in the chain */
441 if (bfsize >= BUF_SIZE_8KiB)
442 buff2_needed = 1;
443
444 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
445 txsize, rxsize, bfsize);
446
447 priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
448 priv->rx_skbuff =
449 kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
450 priv->dma_rx =
451 (struct dma_desc *)dma_alloc_coherent(priv->device,
452 rxsize *
453 sizeof(struct dma_desc),
454 &priv->dma_rx_phy,
455 GFP_KERNEL);
456 priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
457 GFP_KERNEL);
458 priv->dma_tx =
459 (struct dma_desc *)dma_alloc_coherent(priv->device,
460 txsize *
461 sizeof(struct dma_desc),
462 &priv->dma_tx_phy,
463 GFP_KERNEL);
464
465 if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
466 pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
467 return;
468 }
469
470 DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
471 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
472 dev->name, priv->dma_rx, priv->dma_tx,
473 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
474
475 /* RX INITIALIZATION */
476 DBG(probe, INFO, "stmmac: SKB addresses:\n"
477 "skb\t\tskb data\tdma data\n");
478
479 for (i = 0; i < rxsize; i++) {
480 struct dma_desc *p = priv->dma_rx + i;
481
482 skb = netdev_alloc_skb_ip_align(dev, bfsize);
483 if (unlikely(skb == NULL)) {
484 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
485 break;
486 }
487 priv->rx_skbuff[i] = skb;
488 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
489 bfsize, DMA_FROM_DEVICE);
490
491 p->des2 = priv->rx_skbuff_dma[i];
492 if (unlikely(buff2_needed))
493 p->des3 = p->des2 + BUF_SIZE_8KiB;
494 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
495 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
496 }
497 priv->cur_rx = 0;
498 priv->dirty_rx = (unsigned int)(i - rxsize);
499 priv->dma_buf_sz = bfsize;
500 buf_sz = bfsize;
501
502 /* TX INITIALIZATION */
503 for (i = 0; i < txsize; i++) {
504 priv->tx_skbuff[i] = NULL;
505 priv->dma_tx[i].des2 = 0;
506 }
507 priv->dirty_tx = 0;
508 priv->cur_tx = 0;
509
510 /* Clear the Rx/Tx descriptors */
511 priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
512 priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
513
514 if (netif_msg_hw(priv)) {
515 pr_info("RX descriptor ring:\n");
516 display_ring(priv->dma_rx, rxsize);
517 pr_info("TX descriptor ring:\n");
518 display_ring(priv->dma_tx, txsize);
519 }
520 return;
521}
522
523static void dma_free_rx_skbufs(struct stmmac_priv *priv)
524{
525 int i;
526
527 for (i = 0; i < priv->dma_rx_size; i++) {
528 if (priv->rx_skbuff[i]) {
529 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
530 priv->dma_buf_sz, DMA_FROM_DEVICE);
531 dev_kfree_skb_any(priv->rx_skbuff[i]);
532 }
533 priv->rx_skbuff[i] = NULL;
534 }
535 return;
536}
537
538static void dma_free_tx_skbufs(struct stmmac_priv *priv)
539{
540 int i;
541
542 for (i = 0; i < priv->dma_tx_size; i++) {
543 if (priv->tx_skbuff[i] != NULL) {
544 struct dma_desc *p = priv->dma_tx + i;
545 if (p->des2)
546 dma_unmap_single(priv->device, p->des2,
547 priv->mac_type->ops->get_tx_len(p),
548 DMA_TO_DEVICE);
549 dev_kfree_skb_any(priv->tx_skbuff[i]);
550 priv->tx_skbuff[i] = NULL;
551 }
552 }
553 return;
554}
555
556static void free_dma_desc_resources(struct stmmac_priv *priv)
557{
558 /* Release the DMA TX/RX socket buffers */
559 dma_free_rx_skbufs(priv);
560 dma_free_tx_skbufs(priv);
561
562 /* Free the region of consistent memory previously allocated for
563 * the DMA */
564 dma_free_coherent(priv->device,
565 priv->dma_tx_size * sizeof(struct dma_desc),
566 priv->dma_tx, priv->dma_tx_phy);
567 dma_free_coherent(priv->device,
568 priv->dma_rx_size * sizeof(struct dma_desc),
569 priv->dma_rx, priv->dma_rx_phy);
570 kfree(priv->rx_skbuff_dma);
571 kfree(priv->rx_skbuff);
572 kfree(priv->tx_skbuff);
573
574 return;
575}
576
577/**
578 * stmmac_dma_start_tx
579 * @ioaddr: device I/O address
580 * Description: this function starts the DMA tx process.
581 */
582static void stmmac_dma_start_tx(unsigned long ioaddr)
583{
584 u32 value = readl(ioaddr + DMA_CONTROL);
585 value |= DMA_CONTROL_ST;
586 writel(value, ioaddr + DMA_CONTROL);
587 return;
588}
589
590static void stmmac_dma_stop_tx(unsigned long ioaddr)
591{
592 u32 value = readl(ioaddr + DMA_CONTROL);
593 value &= ~DMA_CONTROL_ST;
594 writel(value, ioaddr + DMA_CONTROL);
595 return;
596}
597
598/**
599 * stmmac_dma_start_rx
600 * @ioaddr: device I/O address
601 * Description: this function starts the DMA rx process.
602 */
603static void stmmac_dma_start_rx(unsigned long ioaddr)
604{
605 u32 value = readl(ioaddr + DMA_CONTROL);
606 value |= DMA_CONTROL_SR;
607 writel(value, ioaddr + DMA_CONTROL);
608
609 return;
610}
611
612static void stmmac_dma_stop_rx(unsigned long ioaddr)
613{
614 u32 value = readl(ioaddr + DMA_CONTROL);
615 value &= ~DMA_CONTROL_SR;
616 writel(value, ioaddr + DMA_CONTROL);
617
618 return;
619}
620
621/**
622 * stmmac_dma_operation_mode - HW DMA operation mode
623 * @priv : pointer to the private device structure.
624 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
625 * or Store-And-Forward capability. It also verifies the COE for the
626 * transmission in case of Giga ETH.
627 */
628static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
629{
630 if (!priv->is_gmac) {
631 /* MAC 10/100 */
632 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
633 priv->tx_coe = NO_HW_CSUM;
634 } else {
635 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
636 priv->mac_type->ops->dma_mode(priv->dev->base_addr,
637 SF_DMA_MODE, SF_DMA_MODE);
638 tc = SF_DMA_MODE;
639 priv->tx_coe = HW_CSUM;
640 } else {
641 /* Checksum computation is performed in software. */
642 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
643 SF_DMA_MODE);
644 priv->tx_coe = NO_HW_CSUM;
645 }
646 }
647 tx_coe = priv->tx_coe;
648
649 return;
650}
651
652#ifdef STMMAC_DEBUG
653/**
654 * show_tx_process_state
655 * @status: tx descriptor status field
656 * Description: it shows the Transmit Process State for CSR5[22:20]
657 */
658static void show_tx_process_state(unsigned int status)
659{
660 unsigned int state;
661 state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
662
663 switch (state) {
664 case 0:
665 pr_info("- TX (Stopped): Reset or Stop command\n");
666 break;
667 case 1:
668 pr_info("- TX (Running):Fetching the Tx desc\n");
669 break;
670 case 2:
671 pr_info("- TX (Running): Waiting for end of tx\n");
672 break;
673 case 3:
674 pr_info("- TX (Running): Reading the data "
675 "and queuing the data into the Tx buf\n");
676 break;
677 case 6:
678 pr_info("- TX (Suspended): Tx Buff Underflow "
679 "or an unavailable Transmit descriptor\n");
680 break;
681 case 7:
682 pr_info("- TX (Running): Closing Tx descriptor\n");
683 break;
684 default:
685 break;
686 }
687 return;
688}
689
690/**
691 * show_rx_process_state
692 * @status: rx descriptor status field
693 * Description: it shows the Receive Process State for CSR5[19:17]
694 */
695static void show_rx_process_state(unsigned int status)
696{
697 unsigned int state;
698 state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
699
700 switch (state) {
701 case 0:
702 pr_info("- RX (Stopped): Reset or Stop command\n");
703 break;
704 case 1:
705 pr_info("- RX (Running): Fetching the Rx desc\n");
706 break;
707 case 2:
708 pr_info("- RX (Running):Checking for end of pkt\n");
709 break;
710 case 3:
711 pr_info("- RX (Running): Waiting for Rx pkt\n");
712 break;
713 case 4:
714 pr_info("- RX (Suspended): Unavailable Rx buf\n");
715 break;
716 case 5:
717 pr_info("- RX (Running): Closing Rx descriptor\n");
718 break;
719 case 6:
720 pr_info("- RX(Running): Flushing the current frame"
721 " from the Rx buf\n");
722 break;
723 case 7:
724 pr_info("- RX (Running): Queuing the Rx frame"
725 " from the Rx buf into memory\n");
726 break;
727 default:
728 break;
729 }
730 return;
731}
732#endif
733
734/**
735 * stmmac_tx:
736 * @priv: private driver structure
737 * Description: it reclaims resources after transmission completes.
738 */
739static void stmmac_tx(struct stmmac_priv *priv)
740{
741 unsigned int txsize = priv->dma_tx_size;
742 unsigned long ioaddr = priv->dev->base_addr;
743
744 while (priv->dirty_tx != priv->cur_tx) {
745 int last;
746 unsigned int entry = priv->dirty_tx % txsize;
747 struct sk_buff *skb = priv->tx_skbuff[entry];
748 struct dma_desc *p = priv->dma_tx + entry;
749
750 /* Check if the descriptor is owned by the DMA. */
751 if (priv->mac_type->ops->get_tx_owner(p))
752 break;
753
754 /* Verify tx error by looking at the last segment */
755 last = priv->mac_type->ops->get_tx_ls(p);
756 if (likely(last)) {
757 int tx_error =
758 priv->mac_type->ops->tx_status(&priv->dev->stats,
759 &priv->xstats,
760 p, ioaddr);
761 if (likely(tx_error == 0)) {
762 priv->dev->stats.tx_packets++;
763 priv->xstats.tx_pkt_n++;
764 } else
765 priv->dev->stats.tx_errors++;
766 }
767 TX_DBG("%s: curr %d, dirty %d\n", __func__,
768 priv->cur_tx, priv->dirty_tx);
769
770 if (likely(p->des2))
771 dma_unmap_single(priv->device, p->des2,
772 priv->mac_type->ops->get_tx_len(p),
773 DMA_TO_DEVICE);
774 if (unlikely(p->des3))
775 p->des3 = 0;
776
777 if (likely(skb != NULL)) {
778 /*
779 * If there's room in the queue (limit it to size)
780 * we add this skb back into the pool,
781 * if it's the right size.
782 */
783 if ((skb_queue_len(&priv->rx_recycle) <
784 priv->dma_rx_size) &&
785 skb_recycle_check(skb, priv->dma_buf_sz))
786 __skb_queue_head(&priv->rx_recycle, skb);
787 else
788 dev_kfree_skb(skb);
789
790 priv->tx_skbuff[entry] = NULL;
791 }
792
793 priv->mac_type->ops->release_tx_desc(p);
794
795 entry = (++priv->dirty_tx) % txsize;
796 }
797 if (unlikely(netif_queue_stopped(priv->dev) &&
798 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
799 netif_tx_lock(priv->dev);
800 if (netif_queue_stopped(priv->dev) &&
801 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
802 TX_DBG("%s: restart transmit\n", __func__);
803 netif_wake_queue(priv->dev);
804 }
805 netif_tx_unlock(priv->dev);
806 }
807 return;
808}
809
810static inline void stmmac_enable_irq(struct stmmac_priv *priv)
811{
812#ifndef CONFIG_STMMAC_TIMER
813 writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
814#else
815 priv->tm->timer_start(tmrate);
816#endif
817}
818
819static inline void stmmac_disable_irq(struct stmmac_priv *priv)
820{
821#ifndef CONFIG_STMMAC_TIMER
822 writel(0, priv->dev->base_addr + DMA_INTR_ENA);
823#else
824 priv->tm->timer_stop();
825#endif
826}
827
828static int stmmac_has_work(struct stmmac_priv *priv)
829{
830 unsigned int has_work = 0;
831 int rxret, tx_work = 0;
832
833 rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
834 (priv->cur_rx % priv->dma_rx_size));
835
836 if (priv->dirty_tx != priv->cur_tx)
837 tx_work = 1;
838
839 if (likely(!rxret || tx_work))
840 has_work = 1;
841
842 return has_work;
843}
844
845static inline void _stmmac_schedule(struct stmmac_priv *priv)
846{
847 if (likely(stmmac_has_work(priv))) {
848 stmmac_disable_irq(priv);
849 napi_schedule(&priv->napi);
850 }
851}
852
853#ifdef CONFIG_STMMAC_TIMER
854void stmmac_schedule(struct net_device *dev)
855{
856 struct stmmac_priv *priv = netdev_priv(dev);
857
858 priv->xstats.sched_timer_n++;
859
860 _stmmac_schedule(priv);
861
862 return;
863}
864
865static void stmmac_no_timer_started(unsigned int x)
866{;
867};
868
869static void stmmac_no_timer_stopped(void)
870{;
871};
872#endif
873
874/**
875 * stmmac_tx_err:
876 * @priv: pointer to the private device structure
877 * Description: it cleans the descriptors and restarts the transmission
878 * in case of errors.
879 */
880static void stmmac_tx_err(struct stmmac_priv *priv)
881{
882 netif_stop_queue(priv->dev);
883
884 stmmac_dma_stop_tx(priv->dev->base_addr);
885 dma_free_tx_skbufs(priv);
886 priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
887 priv->dirty_tx = 0;
888 priv->cur_tx = 0;
889 stmmac_dma_start_tx(priv->dev->base_addr);
890
891 priv->dev->stats.tx_errors++;
892 netif_wake_queue(priv->dev);
893
894 return;
895}
896
897/**
898 * stmmac_dma_interrupt - Interrupt handler for the driver
899 * @dev: net device structure
900 * Description: Interrupt handler for the driver (DMA).
901 */
902static void stmmac_dma_interrupt(struct net_device *dev)
903{
904 unsigned long ioaddr = dev->base_addr;
905 struct stmmac_priv *priv = netdev_priv(dev);
906 /* read the status register (CSR5) */
907 u32 intr_status = readl(ioaddr + DMA_STATUS);
908
909 DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
910
911#ifdef STMMAC_DEBUG
912 /* It displays the DMA transmit process state (CSR5 register) */
913 if (netif_msg_tx_done(priv))
914 show_tx_process_state(intr_status);
915 if (netif_msg_rx_status(priv))
916 show_rx_process_state(intr_status);
917#endif
918 /* ABNORMAL interrupts */
919 if (unlikely(intr_status & DMA_STATUS_AIS)) {
920 DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
921 if (unlikely(intr_status & DMA_STATUS_UNF)) {
922 DBG(intr, INFO, "transmit underflow\n");
923 if (unlikely(tc != SF_DMA_MODE)
924 && (tc <= 256)) {
925 /* Try to bump up the threshold */
926 tc += 64;
927 priv->mac_type->ops->dma_mode(ioaddr, tc,
928 SF_DMA_MODE);
929 priv->xstats.threshold = tc;
930 }
931 stmmac_tx_err(priv);
932 priv->xstats.tx_undeflow_irq++;
933 }
934 if (unlikely(intr_status & DMA_STATUS_TJT)) {
935 DBG(intr, INFO, "transmit jabber\n");
936 priv->xstats.tx_jabber_irq++;
937 }
938 if (unlikely(intr_status & DMA_STATUS_OVF)) {
939 DBG(intr, INFO, "recv overflow\n");
940 priv->xstats.rx_overflow_irq++;
941 }
942 if (unlikely(intr_status & DMA_STATUS_RU)) {
943 DBG(intr, INFO, "receive buffer unavailable\n");
944 priv->xstats.rx_buf_unav_irq++;
945 }
946 if (unlikely(intr_status & DMA_STATUS_RPS)) {
947 DBG(intr, INFO, "receive process stopped\n");
948 priv->xstats.rx_process_stopped_irq++;
949 }
950 if (unlikely(intr_status & DMA_STATUS_RWT)) {
951 DBG(intr, INFO, "receive watchdog\n");
952 priv->xstats.rx_watchdog_irq++;
953 }
954 if (unlikely(intr_status & DMA_STATUS_ETI)) {
955 DBG(intr, INFO, "transmit early interrupt\n");
956 priv->xstats.tx_early_irq++;
957 }
958 if (unlikely(intr_status & DMA_STATUS_TPS)) {
959 DBG(intr, INFO, "transmit process stopped\n");
960 priv->xstats.tx_process_stopped_irq++;
961 stmmac_tx_err(priv);
962 }
963 if (unlikely(intr_status & DMA_STATUS_FBI)) {
964 DBG(intr, INFO, "fatal bus error\n");
965 priv->xstats.fatal_bus_error_irq++;
966 stmmac_tx_err(priv);
967 }
968 }
969
970 /* TX/RX NORMAL interrupts */
971 if (intr_status & DMA_STATUS_NIS) {
972 priv->xstats.normal_irq_n++;
973 if (likely((intr_status & DMA_STATUS_RI) ||
974 (intr_status & (DMA_STATUS_TI))))
975 _stmmac_schedule(priv);
976 }
977
978 /* Optional hardware blocks, interrupts should be disabled */
979 if (unlikely(intr_status &
980 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
981 pr_info("%s: unexpected status %08x\n", __func__, intr_status);
982
983 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
984 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
985
986 DBG(intr, INFO, "\n\n");
987
988 return;
989}
990
991/**
992 * stmmac_open - open entry point of the driver
993 * @dev : pointer to the device structure.
994 * Description:
995 * This function is the open entry point of the driver.
996 * Return value:
997 * 0 on success and an appropriate (-)ve integer as defined in errno.h
998 * file on failure.
999 */
1000static int stmmac_open(struct net_device *dev)
1001{
1002 struct stmmac_priv *priv = netdev_priv(dev);
1003 unsigned long ioaddr = dev->base_addr;
1004 int ret;
1005
1006 /* Check that the MAC address is valid. If its not, refuse
1007 * to bring the device up. The user must specify an
1008 * address using the following linux command:
1009 * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
1010 if (!is_valid_ether_addr(dev->dev_addr)) {
1011 random_ether_addr(dev->dev_addr);
1012 pr_warning("%s: generated random MAC address %pM\n", dev->name,
1013 dev->dev_addr);
1014 }
1015
1016 stmmac_verify_args();
1017
1018 ret = stmmac_init_phy(dev);
1019 if (unlikely(ret)) {
1020 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
1021 return ret;
1022 }
1023
1024 /* Request the IRQ lines */
1025 ret = request_irq(dev->irq, &stmmac_interrupt,
1026 IRQF_SHARED, dev->name, dev);
1027 if (unlikely(ret < 0)) {
1028 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1029 __func__, dev->irq, ret);
1030 return ret;
1031 }
1032
1033#ifdef CONFIG_STMMAC_TIMER
1034 priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
1035 if (unlikely(priv->tm == NULL)) {
1036 pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
1037 return -ENOMEM;
1038 }
1039 priv->tm->freq = tmrate;
1040
1041 /* Test if the HW timer can be actually used.
1042 * In case of failure continue with no timer. */
1043 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
1044 pr_warning("stmmaceth: cannot attach the HW timer\n");
1045 tmrate = 0;
1046 priv->tm->freq = 0;
1047 priv->tm->timer_start = stmmac_no_timer_started;
1048 priv->tm->timer_stop = stmmac_no_timer_stopped;
1049 }
1050#endif
1051
1052 /* Create and initialize the TX/RX descriptors chains. */
1053 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1054 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1055 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1056 init_dma_desc_rings(dev);
1057
1058 /* DMA initialization and SW reset */
1059 if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
1060 priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
1061
1062 pr_err("%s: DMA initialization failed\n", __func__);
1063 return -1;
1064 }
1065
1066 /* Copy the MAC addr into the HW */
1067 priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
1068 /* Initialize the MAC Core */
1069 priv->mac_type->ops->core_init(ioaddr);
1070
1071 priv->shutdown = 0;
1072
1073 /* Initialise the MMC (if present) to disable all interrupts. */
1074 writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK);
1075 writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK);
1076
1077 /* Enable the MAC Rx/Tx */
1078 stmmac_mac_enable_rx(ioaddr);
1079 stmmac_mac_enable_tx(ioaddr);
1080
1081 /* Set the HW DMA mode and the COE */
1082 stmmac_dma_operation_mode(priv);
1083
1084 /* Extra statistics */
1085 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1086 priv->xstats.threshold = tc;
1087
1088 /* Start the ball rolling... */
1089 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
1090 stmmac_dma_start_tx(ioaddr);
1091 stmmac_dma_start_rx(ioaddr);
1092
1093#ifdef CONFIG_STMMAC_TIMER
1094 priv->tm->timer_start(tmrate);
1095#endif
1096 /* Dump DMA/MAC registers */
1097 if (netif_msg_hw(priv)) {
1098 priv->mac_type->ops->dump_mac_regs(ioaddr);
1099 priv->mac_type->ops->dump_dma_regs(ioaddr);
1100 }
1101
1102 if (priv->phydev)
1103 phy_start(priv->phydev);
1104
1105 napi_enable(&priv->napi);
1106 skb_queue_head_init(&priv->rx_recycle);
1107 netif_start_queue(dev);
1108 return 0;
1109}
1110
1111/**
1112 * stmmac_release - close entry point of the driver
1113 * @dev : device pointer.
1114 * Description:
1115 * This is the stop entry point of the driver.
1116 */
1117static int stmmac_release(struct net_device *dev)
1118{
1119 struct stmmac_priv *priv = netdev_priv(dev);
1120
1121 /* Stop and disconnect the PHY */
1122 if (priv->phydev) {
1123 phy_stop(priv->phydev);
1124 phy_disconnect(priv->phydev);
1125 priv->phydev = NULL;
1126 }
1127
1128 netif_stop_queue(dev);
1129
1130#ifdef CONFIG_STMMAC_TIMER
1131 /* Stop and release the timer */
1132 stmmac_close_ext_timer();
1133 if (priv->tm != NULL)
1134 kfree(priv->tm);
1135#endif
1136 napi_disable(&priv->napi);
1137 skb_queue_purge(&priv->rx_recycle);
1138
1139 /* Free the IRQ lines */
1140 free_irq(dev->irq, dev);
1141
1142 /* Stop TX/RX DMA and clear the descriptors */
1143 stmmac_dma_stop_tx(dev->base_addr);
1144 stmmac_dma_stop_rx(dev->base_addr);
1145
1146 /* Release and free the Rx/Tx resources */
1147 free_dma_desc_resources(priv);
1148
1149 /* Disable the MAC core */
1150 stmmac_mac_disable_tx(dev->base_addr);
1151 stmmac_mac_disable_rx(dev->base_addr);
1152
1153 netif_carrier_off(dev);
1154
1155 return 0;
1156}
1157
1158/*
1159 * To perform emulated hardware segmentation on skb.
1160 */
1161static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
1162{
1163 struct sk_buff *segs, *curr_skb;
1164 int gso_segs = skb_shinfo(skb)->gso_segs;
1165
1166 /* Estimate the number of fragments in the worst case */
1167 if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
1168 netif_stop_queue(priv->dev);
1169 TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
1170 __func__);
1171 if (stmmac_tx_avail(priv) < gso_segs)
1172 return NETDEV_TX_BUSY;
1173
1174 netif_wake_queue(priv->dev);
1175 }
1176 TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
1177 skb, skb->len);
1178
1179 segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
1180 if (unlikely(IS_ERR(segs)))
1181 goto sw_tso_end;
1182
1183 do {
1184 curr_skb = segs;
1185 segs = segs->next;
1186 TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
1187 "*next %p\n", curr_skb->len, curr_skb, segs);
1188 curr_skb->next = NULL;
1189 stmmac_xmit(curr_skb, priv->dev);
1190 } while (segs);
1191
1192sw_tso_end:
1193 dev_kfree_skb(skb);
1194
1195 return NETDEV_TX_OK;
1196}
1197
1198static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1199 struct net_device *dev,
1200 int csum_insertion)
1201{
1202 struct stmmac_priv *priv = netdev_priv(dev);
1203 unsigned int nopaged_len = skb_headlen(skb);
1204 unsigned int txsize = priv->dma_tx_size;
1205 unsigned int entry = priv->cur_tx % txsize;
1206 struct dma_desc *desc = priv->dma_tx + entry;
1207
1208 if (nopaged_len > BUF_SIZE_8KiB) {
1209
1210 int buf2_size = nopaged_len - BUF_SIZE_8KiB;
1211
1212 desc->des2 = dma_map_single(priv->device, skb->data,
1213 BUF_SIZE_8KiB, DMA_TO_DEVICE);
1214 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1215 priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
1216 csum_insertion);
1217
1218 entry = (++priv->cur_tx) % txsize;
1219 desc = priv->dma_tx + entry;
1220
1221 desc->des2 = dma_map_single(priv->device,
1222 skb->data + BUF_SIZE_8KiB,
1223 buf2_size, DMA_TO_DEVICE);
1224 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1225 priv->mac_type->ops->prepare_tx_desc(desc, 0,
1226 buf2_size, csum_insertion);
1227 priv->mac_type->ops->set_tx_owner(desc);
1228 priv->tx_skbuff[entry] = NULL;
1229 } else {
1230 desc->des2 = dma_map_single(priv->device, skb->data,
1231 nopaged_len, DMA_TO_DEVICE);
1232 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1233 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
1234 csum_insertion);
1235 }
1236 return entry;
1237}
1238
1239/**
1240 * stmmac_xmit:
1241 * @skb : the socket buffer
1242 * @dev : device pointer
1243 * Description : Tx entry point of the driver.
1244 */
1245static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1246{
1247 struct stmmac_priv *priv = netdev_priv(dev);
1248 unsigned int txsize = priv->dma_tx_size;
1249 unsigned int entry;
1250 int i, csum_insertion = 0;
1251 int nfrags = skb_shinfo(skb)->nr_frags;
1252 struct dma_desc *desc, *first;
1253
1254 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1255 if (!netif_queue_stopped(dev)) {
1256 netif_stop_queue(dev);
1257 /* This is a hard error, log it. */
1258 pr_err("%s: BUG! Tx Ring full when queue awake\n",
1259 __func__);
1260 }
1261 return NETDEV_TX_BUSY;
1262 }
1263
1264 entry = priv->cur_tx % txsize;
1265
1266#ifdef STMMAC_XMIT_DEBUG
1267 if ((skb->len > ETH_FRAME_LEN) || nfrags)
1268 pr_info("stmmac xmit:\n"
1269 "\tskb addr %p - len: %d - nopaged_len: %d\n"
1270 "\tn_frags: %d - ip_summed: %d - %s gso\n",
1271 skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
1272 !skb_is_gso(skb) ? "isn't" : "is");
1273#endif
1274
1275 if (unlikely(skb_is_gso(skb)))
1276 return stmmac_sw_tso(priv, skb);
1277
1278 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1279 if (likely(priv->tx_coe == NO_HW_CSUM))
1280 skb_checksum_help(skb);
1281 else
1282 csum_insertion = 1;
1283 }
1284
1285 desc = priv->dma_tx + entry;
1286 first = desc;
1287
1288#ifdef STMMAC_XMIT_DEBUG
1289 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
1290 pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
1291 "\t\tn_frags: %d, ip_summed: %d\n",
1292 skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
1293#endif
1294 priv->tx_skbuff[entry] = skb;
1295 if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
1296 entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
1297 desc = priv->dma_tx + entry;
1298 } else {
1299 unsigned int nopaged_len = skb_headlen(skb);
1300 desc->des2 = dma_map_single(priv->device, skb->data,
1301 nopaged_len, DMA_TO_DEVICE);
1302 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
1303 csum_insertion);
1304 }
1305
1306 for (i = 0; i < nfrags; i++) {
1307 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1308 int len = frag->size;
1309
1310 entry = (++priv->cur_tx) % txsize;
1311 desc = priv->dma_tx + entry;
1312
1313 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
1314 desc->des2 = dma_map_page(priv->device, frag->page,
1315 frag->page_offset,
1316 len, DMA_TO_DEVICE);
1317 priv->tx_skbuff[entry] = NULL;
1318 priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
1319 csum_insertion);
1320 priv->mac_type->ops->set_tx_owner(desc);
1321 }
1322
1323 /* Interrupt on completition only for the latest segment */
1324 priv->mac_type->ops->close_tx_desc(desc);
1325#ifdef CONFIG_STMMAC_TIMER
1326 /* Clean IC while using timers */
1327 priv->mac_type->ops->clear_tx_ic(desc);
1328#endif
1329 /* To avoid raise condition */
1330 priv->mac_type->ops->set_tx_owner(first);
1331
1332 priv->cur_tx++;
1333
1334#ifdef STMMAC_XMIT_DEBUG
1335 if (netif_msg_pktdata(priv)) {
1336 pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
1337 "first=%p, nfrags=%d\n",
1338 (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
1339 entry, first, nfrags);
1340 display_ring(priv->dma_tx, txsize);
1341 pr_info(">>> frame to be transmitted: ");
1342 print_pkt(skb->data, skb->len);
1343 }
1344#endif
1345 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1346 TX_DBG("%s: stop transmitted packets\n", __func__);
1347 netif_stop_queue(dev);
1348 }
1349
1350 dev->stats.tx_bytes += skb->len;
1351
1352 /* CSR1 enables the transmit DMA to check for new descriptor */
1353 writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
1354
1355 return NETDEV_TX_OK;
1356}
1357
1358static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1359{
1360 unsigned int rxsize = priv->dma_rx_size;
1361 int bfsize = priv->dma_buf_sz;
1362 struct dma_desc *p = priv->dma_rx;
1363
1364 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
1365 unsigned int entry = priv->dirty_rx % rxsize;
1366 if (likely(priv->rx_skbuff[entry] == NULL)) {
1367 struct sk_buff *skb;
1368
1369 skb = __skb_dequeue(&priv->rx_recycle);
1370 if (skb == NULL)
1371 skb = netdev_alloc_skb_ip_align(priv->dev,
1372 bfsize);
1373
1374 if (unlikely(skb == NULL))
1375 break;
1376
1377 priv->rx_skbuff[entry] = skb;
1378 priv->rx_skbuff_dma[entry] =
1379 dma_map_single(priv->device, skb->data, bfsize,
1380 DMA_FROM_DEVICE);
1381
1382 (p + entry)->des2 = priv->rx_skbuff_dma[entry];
1383 if (unlikely(priv->is_gmac)) {
1384 if (bfsize >= BUF_SIZE_8KiB)
1385 (p + entry)->des3 =
1386 (p + entry)->des2 + BUF_SIZE_8KiB;
1387 }
1388 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1389 }
1390 priv->mac_type->ops->set_rx_owner(p + entry);
1391 }
1392 return;
1393}
1394
1395static int stmmac_rx(struct stmmac_priv *priv, int limit)
1396{
1397 unsigned int rxsize = priv->dma_rx_size;
1398 unsigned int entry = priv->cur_rx % rxsize;
1399 unsigned int next_entry;
1400 unsigned int count = 0;
1401 struct dma_desc *p = priv->dma_rx + entry;
1402 struct dma_desc *p_next;
1403
1404#ifdef STMMAC_RX_DEBUG
1405 if (netif_msg_hw(priv)) {
1406 pr_debug(">>> stmmac_rx: descriptor ring:\n");
1407 display_ring(priv->dma_rx, rxsize);
1408 }
1409#endif
1410 count = 0;
1411 while (!priv->mac_type->ops->get_rx_owner(p)) {
1412 int status;
1413
1414 if (count >= limit)
1415 break;
1416
1417 count++;
1418
1419 next_entry = (++priv->cur_rx) % rxsize;
1420 p_next = priv->dma_rx + next_entry;
1421 prefetch(p_next);
1422
1423 /* read the status of the incoming frame */
1424 status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
1425 &priv->xstats, p));
1426 if (unlikely(status == discard_frame))
1427 priv->dev->stats.rx_errors++;
1428 else {
1429 struct sk_buff *skb;
1430 /* Length should omit the CRC */
1431 int frame_len =
1432 priv->mac_type->ops->get_rx_frame_len(p) - 4;
1433
1434#ifdef STMMAC_RX_DEBUG
1435 if (frame_len > ETH_FRAME_LEN)
1436 pr_debug("\tRX frame size %d, COE status: %d\n",
1437 frame_len, status);
1438
1439 if (netif_msg_hw(priv))
1440 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
1441 p, entry, p->des2);
1442#endif
1443 skb = priv->rx_skbuff[entry];
1444 if (unlikely(!skb)) {
1445 pr_err("%s: Inconsistent Rx descriptor chain\n",
1446 priv->dev->name);
1447 priv->dev->stats.rx_dropped++;
1448 break;
1449 }
1450 prefetch(skb->data - NET_IP_ALIGN);
1451 priv->rx_skbuff[entry] = NULL;
1452
1453 skb_put(skb, frame_len);
1454 dma_unmap_single(priv->device,
1455 priv->rx_skbuff_dma[entry],
1456 priv->dma_buf_sz, DMA_FROM_DEVICE);
1457#ifdef STMMAC_RX_DEBUG
1458 if (netif_msg_pktdata(priv)) {
1459 pr_info(" frame received (%dbytes)", frame_len);
1460 print_pkt(skb->data, frame_len);
1461 }
1462#endif
1463 skb->protocol = eth_type_trans(skb, priv->dev);
1464
1465 if (unlikely(status == csum_none)) {
1466 /* always for the old mac 10/100 */
1467 skb->ip_summed = CHECKSUM_NONE;
1468 netif_receive_skb(skb);
1469 } else {
1470 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 napi_gro_receive(&priv->napi, skb);
1472 }
1473
1474 priv->dev->stats.rx_packets++;
1475 priv->dev->stats.rx_bytes += frame_len;
1476 priv->dev->last_rx = jiffies;
1477 }
1478 entry = next_entry;
1479 p = p_next; /* use prefetched values */
1480 }
1481
1482 stmmac_rx_refill(priv);
1483
1484 priv->xstats.rx_pkt_n += count;
1485
1486 return count;
1487}
1488
1489/**
1490 * stmmac_poll - stmmac poll method (NAPI)
1491 * @napi : pointer to the napi structure.
1492 * @budget : maximum number of packets that the current CPU can receive from
1493 * all interfaces.
1494 * Description :
1495 * This function implements the the reception process.
1496 * Also it runs the TX completion thread
1497 */
1498static int stmmac_poll(struct napi_struct *napi, int budget)
1499{
1500 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
1501 int work_done = 0;
1502
1503 priv->xstats.poll_n++;
1504 stmmac_tx(priv);
1505 work_done = stmmac_rx(priv, budget);
1506
1507 if (work_done < budget) {
1508 napi_complete(napi);
1509 stmmac_enable_irq(priv);
1510 }
1511 return work_done;
1512}
1513
1514/**
1515 * stmmac_tx_timeout
1516 * @dev : Pointer to net device structure
1517 * Description: this function is called when a packet transmission fails to
1518 * complete within a reasonable tmrate. The driver will mark the error in the
1519 * netdev structure and arrange for the device to be reset to a sane state
1520 * in order to transmit a new packet.
1521 */
1522static void stmmac_tx_timeout(struct net_device *dev)
1523{
1524 struct stmmac_priv *priv = netdev_priv(dev);
1525
1526 /* Clear Tx resources and restart transmitting again */
1527 stmmac_tx_err(priv);
1528 return;
1529}
1530
1531/* Configuration changes (passed on by ifconfig) */
1532static int stmmac_config(struct net_device *dev, struct ifmap *map)
1533{
1534 if (dev->flags & IFF_UP) /* can't act on a running interface */
1535 return -EBUSY;
1536
1537 /* Don't allow changing the I/O address */
1538 if (map->base_addr != dev->base_addr) {
1539 pr_warning("%s: can't change I/O address\n", dev->name);
1540 return -EOPNOTSUPP;
1541 }
1542
1543 /* Don't allow changing the IRQ */
1544 if (map->irq != dev->irq) {
1545 pr_warning("%s: can't change IRQ number %d\n",
1546 dev->name, dev->irq);
1547 return -EOPNOTSUPP;
1548 }
1549
1550 /* ignore other fields */
1551 return 0;
1552}
1553
1554/**
1555 * stmmac_multicast_list - entry point for multicast addressing
1556 * @dev : pointer to the device structure
1557 * Description:
1558 * This function is a driver entry point which gets called by the kernel
1559 * whenever multicast addresses must be enabled/disabled.
1560 * Return value:
1561 * void.
1562 */
1563static void stmmac_multicast_list(struct net_device *dev)
1564{
1565 struct stmmac_priv *priv = netdev_priv(dev);
1566
1567 spin_lock(&priv->lock);
1568 priv->mac_type->ops->set_filter(dev);
1569 spin_unlock(&priv->lock);
1570 return;
1571}
1572
1573/**
1574 * stmmac_change_mtu - entry point to change MTU size for the device.
1575 * @dev : device pointer.
1576 * @new_mtu : the new MTU size for the device.
1577 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1578 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1579 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1580 * Return value:
1581 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1582 * file on failure.
1583 */
1584static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1585{
1586 struct stmmac_priv *priv = netdev_priv(dev);
1587 int max_mtu;
1588
1589 if (netif_running(dev)) {
1590 pr_err("%s: must be stopped to change its MTU\n", dev->name);
1591 return -EBUSY;
1592 }
1593
1594 if (priv->is_gmac)
1595 max_mtu = JUMBO_LEN;
1596 else
1597 max_mtu = ETH_DATA_LEN;
1598
1599 if ((new_mtu < 46) || (new_mtu > max_mtu)) {
1600 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
1601 return -EINVAL;
1602 }
1603
1604 dev->mtu = new_mtu;
1605
1606 return 0;
1607}
1608
1609static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1610{
1611 struct net_device *dev = (struct net_device *)dev_id;
1612 struct stmmac_priv *priv = netdev_priv(dev);
1613
1614 if (unlikely(!dev)) {
1615 pr_err("%s: invalid dev pointer\n", __func__);
1616 return IRQ_NONE;
1617 }
1618
1619 if (priv->is_gmac) {
1620 unsigned long ioaddr = dev->base_addr;
1621 /* To handle GMAC own interrupts */
1622 priv->mac_type->ops->host_irq_status(ioaddr);
1623 }
1624 stmmac_dma_interrupt(dev);
1625
1626 return IRQ_HANDLED;
1627}
1628
1629#ifdef CONFIG_NET_POLL_CONTROLLER
1630/* Polling receive - used by NETCONSOLE and other diagnostic tools
1631 * to allow network I/O with interrupts disabled. */
1632static void stmmac_poll_controller(struct net_device *dev)
1633{
1634 disable_irq(dev->irq);
1635 stmmac_interrupt(dev->irq, dev);
1636 enable_irq(dev->irq);
1637}
1638#endif
1639
1640/**
1641 * stmmac_ioctl - Entry point for the Ioctl
1642 * @dev: Device pointer.
1643 * @rq: An IOCTL specefic structure, that can contain a pointer to
1644 * a proprietary structure used to pass information to the driver.
1645 * @cmd: IOCTL command
1646 * Description:
1647 * Currently there are no special functionality supported in IOCTL, just the
1648 * phy_mii_ioctl(...) can be invoked.
1649 */
1650static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1651{
1652 struct stmmac_priv *priv = netdev_priv(dev);
1653 int ret = -EOPNOTSUPP;
1654
1655 if (!netif_running(dev))
1656 return -EINVAL;
1657
1658 switch (cmd) {
1659 case SIOCGMIIPHY:
1660 case SIOCGMIIREG:
1661 case SIOCSMIIREG:
1662 if (!priv->phydev)
1663 return -EINVAL;
1664
1665 spin_lock(&priv->lock);
1666 ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
1667 spin_unlock(&priv->lock);
1668 default:
1669 break;
1670 }
1671 return ret;
1672}
1673
1674#ifdef STMMAC_VLAN_TAG_USED
1675static void stmmac_vlan_rx_register(struct net_device *dev,
1676 struct vlan_group *grp)
1677{
1678 struct stmmac_priv *priv = netdev_priv(dev);
1679
1680 DBG(probe, INFO, "%s: Setting vlgrp to %p\n", dev->name, grp);
1681
1682 spin_lock(&priv->lock);
1683 priv->vlgrp = grp;
1684 spin_unlock(&priv->lock);
1685
1686 return;
1687}
1688#endif
1689
1690static const struct net_device_ops stmmac_netdev_ops = {
1691 .ndo_open = stmmac_open,
1692 .ndo_start_xmit = stmmac_xmit,
1693 .ndo_stop = stmmac_release,
1694 .ndo_change_mtu = stmmac_change_mtu,
1695 .ndo_set_multicast_list = stmmac_multicast_list,
1696 .ndo_tx_timeout = stmmac_tx_timeout,
1697 .ndo_do_ioctl = stmmac_ioctl,
1698 .ndo_set_config = stmmac_config,
1699#ifdef STMMAC_VLAN_TAG_USED
1700 .ndo_vlan_rx_register = stmmac_vlan_rx_register,
1701#endif
1702#ifdef CONFIG_NET_POLL_CONTROLLER
1703 .ndo_poll_controller = stmmac_poll_controller,
1704#endif
1705 .ndo_set_mac_address = eth_mac_addr,
1706};
1707
1708/**
1709 * stmmac_probe - Initialization of the adapter .
1710 * @dev : device pointer
1711 * Description: The function initializes the network device structure for
1712 * the STMMAC driver. It also calls the low level routines
1713 * in order to init the HW (i.e. the DMA engine)
1714 */
1715static int stmmac_probe(struct net_device *dev)
1716{
1717 int ret = 0;
1718 struct stmmac_priv *priv = netdev_priv(dev);
1719
1720 ether_setup(dev);
1721
1722 dev->netdev_ops = &stmmac_netdev_ops;
1723 stmmac_set_ethtool_ops(dev);
1724
1725 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
1726 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1727#ifdef STMMAC_VLAN_TAG_USED
1728 /* Both mac100 and gmac support receive VLAN tag detection */
1729 dev->features |= NETIF_F_HW_VLAN_RX;
1730#endif
1731 priv->msg_enable = netif_msg_init(debug, default_msg_level);
1732
1733 if (priv->is_gmac)
1734 priv->rx_csum = 1;
1735
1736 if (flow_ctrl)
1737 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
1738
1739 priv->pause = pause;
1740 netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
1741
1742 /* Get the MAC address */
1743 priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
1744
1745 if (!is_valid_ether_addr(dev->dev_addr))
1746 pr_warning("\tno valid MAC address;"
1747 "please, use ifconfig or nwhwconfig!\n");
1748
1749 ret = register_netdev(dev);
1750 if (ret) {
1751 pr_err("%s: ERROR %i registering the device\n",
1752 __func__, ret);
1753 return -ENODEV;
1754 }
1755
1756 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
1757 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
1758 (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
1759
1760 spin_lock_init(&priv->lock);
1761
1762 return ret;
1763}
1764
1765/**
1766 * stmmac_mac_device_setup
1767 * @dev : device pointer
1768 * Description: select and initialise the mac device (mac100 or Gmac).
1769 */
1770static int stmmac_mac_device_setup(struct net_device *dev)
1771{
1772 struct stmmac_priv *priv = netdev_priv(dev);
1773 unsigned long ioaddr = dev->base_addr;
1774
1775 struct mac_device_info *device;
1776
1777 if (priv->is_gmac)
1778 device = gmac_setup(ioaddr);
1779 else
1780 device = mac100_setup(ioaddr);
1781
1782 if (!device)
1783 return -ENOMEM;
1784
1785 priv->mac_type = device;
1786
1787 priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */
1788 if (priv->wolenabled == PMT_SUPPORTED)
1789 priv->wolopts = WAKE_MAGIC; /* Magic Frame */
1790
1791 return 0;
1792}
1793
1794static int stmmacphy_dvr_probe(struct platform_device *pdev)
1795{
1796 struct plat_stmmacphy_data *plat_dat;
1797 plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
1798
1799 pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
1800 plat_dat->bus_id);
1801
1802 return 0;
1803}
1804
1805static int stmmacphy_dvr_remove(struct platform_device *pdev)
1806{
1807 return 0;
1808}
1809
1810static struct platform_driver stmmacphy_driver = {
1811 .driver = {
1812 .name = PHY_RESOURCE_NAME,
1813 },
1814 .probe = stmmacphy_dvr_probe,
1815 .remove = stmmacphy_dvr_remove,
1816};
1817
1818/**
1819 * stmmac_associate_phy
1820 * @dev: pointer to device structure
1821 * @data: points to the private structure.
1822 * Description: Scans through all the PHYs we have registered and checks if
1823 * any are associated with our MAC. If so, then just fill in
1824 * the blanks in our local context structure
1825 */
1826static int stmmac_associate_phy(struct device *dev, void *data)
1827{
1828 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1829 struct plat_stmmacphy_data *plat_dat;
1830
1831 plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
1832
1833 DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
1834 plat_dat->bus_id);
1835
1836 /* Check that this phy is for the MAC being initialised */
1837 if (priv->bus_id != plat_dat->bus_id)
1838 return 0;
1839
1840 /* OK, this PHY is connected to the MAC.
1841 Go ahead and get the parameters */
1842 DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__);
1843 priv->phy_irq =
1844 platform_get_irq_byname(to_platform_device(dev), "phyirq");
1845 DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__,
1846 plat_dat->bus_id, priv->phy_irq);
1847
1848 /* Override with kernel parameters if supplied XXX CRS XXX
1849 * this needs to have multiple instances */
1850 if ((phyaddr >= 0) && (phyaddr <= 31))
1851 plat_dat->phy_addr = phyaddr;
1852
1853 priv->phy_addr = plat_dat->phy_addr;
1854 priv->phy_mask = plat_dat->phy_mask;
1855 priv->phy_interface = plat_dat->interface;
1856 priv->phy_reset = plat_dat->phy_reset;
1857
1858 DBG(probe, DEBUG, "%s: exiting\n", __func__);
1859 return 1; /* forces exit of driver_for_each_device() */
1860}
1861
1862/**
1863 * stmmac_dvr_probe
1864 * @pdev: platform device pointer
1865 * Description: the driver is initialized through platform_device.
1866 */
1867static int stmmac_dvr_probe(struct platform_device *pdev)
1868{
1869 int ret = 0;
1870 struct resource *res;
1871 unsigned int *addr = NULL;
1872 struct net_device *ndev = NULL;
1873 struct stmmac_priv *priv;
1874 struct plat_stmmacenet_data *plat_dat;
1875
1876 pr_info("STMMAC driver:\n\tplatform registration... ");
1877 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1878 if (!res) {
1879 ret = -ENODEV;
1880 goto out;
1881 }
1882 pr_info("done!\n");
1883
1884 if (!request_mem_region(res->start, (res->end - res->start),
1885 pdev->name)) {
1886 pr_err("%s: ERROR: memory allocation failed"
1887 "cannot get the I/O addr 0x%x\n",
1888 __func__, (unsigned int)res->start);
1889 ret = -EBUSY;
1890 goto out;
1891 }
1892
1893 addr = ioremap(res->start, (res->end - res->start));
1894 if (!addr) {
1895 pr_err("%s: ERROR: memory mapping failed \n", __func__);
1896 ret = -ENOMEM;
1897 goto out;
1898 }
1899
1900 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
1901 if (!ndev) {
1902 pr_err("%s: ERROR: allocating the device\n", __func__);
1903 ret = -ENOMEM;
1904 goto out;
1905 }
1906
1907 SET_NETDEV_DEV(ndev, &pdev->dev);
1908
1909 /* Get the MAC information */
1910 ndev->irq = platform_get_irq_byname(pdev, "macirq");
1911 if (ndev->irq == -ENXIO) {
1912 pr_err("%s: ERROR: MAC IRQ configuration "
1913 "information not found\n", __func__);
1914 ret = -ENODEV;
1915 goto out;
1916 }
1917
1918 priv = netdev_priv(ndev);
1919 priv->device = &(pdev->dev);
1920 priv->dev = ndev;
1921 plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
1922 priv->bus_id = plat_dat->bus_id;
1923 priv->pbl = plat_dat->pbl; /* TLI */
1924 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1925
1926 platform_set_drvdata(pdev, ndev);
1927
1928 /* Set the I/O base addr */
1929 ndev->base_addr = (unsigned long)addr;
1930
1931 /* MAC HW revice detection */
1932 ret = stmmac_mac_device_setup(ndev);
1933 if (ret < 0)
1934 goto out;
1935
1936 /* Network Device Registration */
1937 ret = stmmac_probe(ndev);
1938 if (ret < 0)
1939 goto out;
1940
1941 /* associate a PHY - it is provided by another platform bus */
1942 if (!driver_for_each_device
1943 (&(stmmacphy_driver.driver), NULL, (void *)priv,
1944 stmmac_associate_phy)) {
1945 pr_err("No PHY device is associated with this MAC!\n");
1946 ret = -ENODEV;
1947 goto out;
1948 }
1949
1950 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1951 priv->bsp_priv = plat_dat->bsp_priv;
1952
1953 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
1954 "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name,
1955 pdev->id, ndev->irq, (unsigned int)addr);
1956
1957 /* MDIO bus Registration */
1958 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
1959 ret = stmmac_mdio_register(ndev);
1960 if (ret < 0)
1961 goto out;
1962 pr_debug("registered!\n");
1963
1964out:
1965 if (ret < 0) {
1966 platform_set_drvdata(pdev, NULL);
1967 release_mem_region(res->start, (res->end - res->start));
1968 if (addr != NULL)
1969 iounmap(addr);
1970 }
1971
1972 return ret;
1973}
1974
1975/**
1976 * stmmac_dvr_remove
1977 * @pdev: platform device pointer
1978 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1979 * changes the link status, releases the DMA descriptor rings,
1980 * unregisters the MDIO bus and unmaps the allocated memory.
1981 */
1982static int stmmac_dvr_remove(struct platform_device *pdev)
1983{
1984 struct net_device *ndev = platform_get_drvdata(pdev);
1985 struct resource *res;
1986
1987 pr_info("%s:\n\tremoving driver", __func__);
1988
1989 stmmac_dma_stop_rx(ndev->base_addr);
1990 stmmac_dma_stop_tx(ndev->base_addr);
1991
1992 stmmac_mac_disable_rx(ndev->base_addr);
1993 stmmac_mac_disable_tx(ndev->base_addr);
1994
1995 netif_carrier_off(ndev);
1996
1997 stmmac_mdio_unregister(ndev);
1998
1999 platform_set_drvdata(pdev, NULL);
2000 unregister_netdev(ndev);
2001
2002 iounmap((void *)ndev->base_addr);
2003 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2004 release_mem_region(res->start, (res->end - res->start));
2005
2006 free_netdev(ndev);
2007
2008 return 0;
2009}
2010
2011#ifdef CONFIG_PM
2012static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
2013{
2014 struct net_device *dev = platform_get_drvdata(pdev);
2015 struct stmmac_priv *priv = netdev_priv(dev);
2016 int dis_ic = 0;
2017
2018 if (!dev || !netif_running(dev))
2019 return 0;
2020
2021 spin_lock(&priv->lock);
2022
2023 if (state.event == PM_EVENT_SUSPEND) {
2024 netif_device_detach(dev);
2025 netif_stop_queue(dev);
2026 if (priv->phydev)
2027 phy_stop(priv->phydev);
2028
2029#ifdef CONFIG_STMMAC_TIMER
2030 priv->tm->timer_stop();
2031 dis_ic = 1;
2032#endif
2033 napi_disable(&priv->napi);
2034
2035 /* Stop TX/RX DMA */
2036 stmmac_dma_stop_tx(dev->base_addr);
2037 stmmac_dma_stop_rx(dev->base_addr);
2038 /* Clear the Rx/Tx descriptors */
2039 priv->mac_type->ops->init_rx_desc(priv->dma_rx,
2040 priv->dma_rx_size, dis_ic);
2041 priv->mac_type->ops->init_tx_desc(priv->dma_tx,
2042 priv->dma_tx_size);
2043
2044 stmmac_mac_disable_tx(dev->base_addr);
2045
2046 if (device_may_wakeup(&(pdev->dev))) {
2047 /* Enable Power down mode by programming the PMT regs */
2048 if (priv->wolenabled == PMT_SUPPORTED)
2049 priv->mac_type->ops->pmt(dev->base_addr,
2050 priv->wolopts);
2051 } else {
2052 stmmac_mac_disable_rx(dev->base_addr);
2053 }
2054 } else {
2055 priv->shutdown = 1;
2056 /* Although this can appear slightly redundant it actually
2057 * makes fast the standby operation and guarantees the driver
2058 * working if hibernation is on media. */
2059 stmmac_release(dev);
2060 }
2061
2062 spin_unlock(&priv->lock);
2063 return 0;
2064}
2065
2066static int stmmac_resume(struct platform_device *pdev)
2067{
2068 struct net_device *dev = platform_get_drvdata(pdev);
2069 struct stmmac_priv *priv = netdev_priv(dev);
2070 unsigned long ioaddr = dev->base_addr;
2071
2072 if (!netif_running(dev))
2073 return 0;
2074
2075 spin_lock(&priv->lock);
2076
2077 if (priv->shutdown) {
2078 /* Re-open the interface and re-init the MAC/DMA
2079 and the rings. */
2080 stmmac_open(dev);
2081 goto out_resume;
2082 }
2083
2084 /* Power Down bit, into the PM register, is cleared
2085 * automatically as soon as a magic packet or a Wake-up frame
2086 * is received. Anyway, it's better to manually clear
2087 * this bit because it can generate problems while resuming
2088 * from another devices (e.g. serial console). */
2089 if (device_may_wakeup(&(pdev->dev)))
2090 if (priv->wolenabled == PMT_SUPPORTED)
2091 priv->mac_type->ops->pmt(dev->base_addr, 0);
2092
2093 netif_device_attach(dev);
2094
2095 /* Enable the MAC and DMA */
2096 stmmac_mac_enable_rx(ioaddr);
2097 stmmac_mac_enable_tx(ioaddr);
2098 stmmac_dma_start_tx(ioaddr);
2099 stmmac_dma_start_rx(ioaddr);
2100
2101#ifdef CONFIG_STMMAC_TIMER
2102 priv->tm->timer_start(tmrate);
2103#endif
2104 napi_enable(&priv->napi);
2105
2106 if (priv->phydev)
2107 phy_start(priv->phydev);
2108
2109 netif_start_queue(dev);
2110
2111out_resume:
2112 spin_unlock(&priv->lock);
2113 return 0;
2114}
2115#endif
2116
2117static struct platform_driver stmmac_driver = {
2118 .driver = {
2119 .name = STMMAC_RESOURCE_NAME,
2120 },
2121 .probe = stmmac_dvr_probe,
2122 .remove = stmmac_dvr_remove,
2123#ifdef CONFIG_PM
2124 .suspend = stmmac_suspend,
2125 .resume = stmmac_resume,
2126#endif
2127
2128};
2129
2130/**
2131 * stmmac_init_module - Entry point for the driver
2132 * Description: This function is the entry point for the driver.
2133 */
2134static int __init stmmac_init_module(void)
2135{
2136 int ret;
2137
2138 if (platform_driver_register(&stmmacphy_driver)) {
2139 pr_err("No PHY devices registered!\n");
2140 return -ENODEV;
2141 }
2142
2143 ret = platform_driver_register(&stmmac_driver);
2144 return ret;
2145}
2146
2147/**
2148 * stmmac_cleanup_module - Cleanup routine for the driver
2149 * Description: This function is the cleanup routine for the driver.
2150 */
2151static void __exit stmmac_cleanup_module(void)
2152{
2153 platform_driver_unregister(&stmmacphy_driver);
2154 platform_driver_unregister(&stmmac_driver);
2155}
2156
2157#ifndef MODULE
2158static int __init stmmac_cmdline_opt(char *str)
2159{
2160 char *opt;
2161
2162 if (!str || !*str)
2163 return -EINVAL;
2164 while ((opt = strsep(&str, ",")) != NULL) {
2165 if (!strncmp(opt, "debug:", 6))
2166 strict_strtoul(opt + 6, 0, (unsigned long *)&debug);
2167 else if (!strncmp(opt, "phyaddr:", 8))
2168 strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr);
2169 else if (!strncmp(opt, "dma_txsize:", 11))
2170 strict_strtoul(opt + 11, 0,
2171 (unsigned long *)&dma_txsize);
2172 else if (!strncmp(opt, "dma_rxsize:", 11))
2173 strict_strtoul(opt + 11, 0,
2174 (unsigned long *)&dma_rxsize);
2175 else if (!strncmp(opt, "buf_sz:", 7))
2176 strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
2177 else if (!strncmp(opt, "tc:", 3))
2178 strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
2179 else if (!strncmp(opt, "tx_coe:", 7))
2180 strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
2181 else if (!strncmp(opt, "watchdog:", 9))
2182 strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
2183 else if (!strncmp(opt, "flow_ctrl:", 10))
2184 strict_strtoul(opt + 10, 0,
2185 (unsigned long *)&flow_ctrl);
2186 else if (!strncmp(opt, "pause:", 6))
2187 strict_strtoul(opt + 6, 0, (unsigned long *)&pause);
2188#ifdef CONFIG_STMMAC_TIMER
2189 else if (!strncmp(opt, "tmrate:", 7))
2190 strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate);
2191#endif
2192 }
2193 return 0;
2194}
2195
2196__setup("stmmaceth=", stmmac_cmdline_opt);
2197#endif
2198
2199module_init(stmmac_init_module);
2200module_exit(stmmac_cleanup_module);
2201
2202MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
2203MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
2204MODULE_LICENSE("GPL");
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
new file mode 100644
index 000000000000..8498552a22fc
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -0,0 +1,217 @@
1/*******************************************************************************
2 STMMAC Ethernet Driver -- MDIO bus implementation
3 Provides Bus interface for MII registers
4
5 Copyright (C) 2007-2009 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Author: Carl Shaw <carl.shaw@st.com>
24 Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/
26
27#include <linux/netdevice.h>
28#include <linux/mii.h>
29#include <linux/phy.h>
30
31#include "stmmac.h"
32
33#define MII_BUSY 0x00000001
34#define MII_WRITE 0x00000002
35
36/**
37 * stmmac_mdio_read
38 * @bus: points to the mii_bus structure
39 * @phyaddr: MII addr reg bits 15-11
40 * @phyreg: MII addr reg bits 10-6
41 * Description: it reads data from the MII register from within the phy device.
42 * For the 7111 GMAC, we must set the bit 0 in the MII address register while
43 * accessing the PHY registers.
44 * Fortunately, it seems this has no drawback for the 7109 MAC.
45 */
46static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
47{
48 struct net_device *ndev = bus->priv;
49 struct stmmac_priv *priv = netdev_priv(ndev);
50 unsigned long ioaddr = ndev->base_addr;
51 unsigned int mii_address = priv->mac_type->hw.mii.addr;
52 unsigned int mii_data = priv->mac_type->hw.mii.data;
53
54 int data;
55 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
56 ((phyreg << 6) & (0x000007C0)));
57 regValue |= MII_BUSY; /* in case of GMAC */
58
59 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
60 writel(regValue, ioaddr + mii_address);
61 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
62
63 /* Read the data from the MII data register */
64 data = (int)readl(ioaddr + mii_data);
65
66 return data;
67}
68
69/**
70 * stmmac_mdio_write
71 * @bus: points to the mii_bus structure
72 * @phyaddr: MII addr reg bits 15-11
73 * @phyreg: MII addr reg bits 10-6
74 * @phydata: phy data
75 * Description: it writes the data into the MII register from within the device.
76 */
77static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
78 u16 phydata)
79{
80 struct net_device *ndev = bus->priv;
81 struct stmmac_priv *priv = netdev_priv(ndev);
82 unsigned long ioaddr = ndev->base_addr;
83 unsigned int mii_address = priv->mac_type->hw.mii.addr;
84 unsigned int mii_data = priv->mac_type->hw.mii.data;
85
86 u16 value =
87 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
88 | MII_WRITE;
89
90 value |= MII_BUSY;
91
92 /* Wait until any existing MII operation is complete */
93 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
94
95 /* Set the MII address register to write */
96 writel(phydata, ioaddr + mii_data);
97 writel(value, ioaddr + mii_address);
98
99 /* Wait until any existing MII operation is complete */
100 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
101
102 return 0;
103}
104
105/**
106 * stmmac_mdio_reset
107 * @bus: points to the mii_bus structure
108 * Description: reset the MII bus
109 */
110static int stmmac_mdio_reset(struct mii_bus *bus)
111{
112 struct net_device *ndev = bus->priv;
113 struct stmmac_priv *priv = netdev_priv(ndev);
114 unsigned long ioaddr = ndev->base_addr;
115 unsigned int mii_address = priv->mac_type->hw.mii.addr;
116
117 if (priv->phy_reset) {
118 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
119 priv->phy_reset(priv->bsp_priv);
120 }
121
122 /* This is a workaround for problems with the STE101P PHY.
123 * It doesn't complete its reset until at least one clock cycle
124 * on MDC, so perform a dummy mdio read.
125 */
126 writel(0, ioaddr + mii_address);
127
128 return 0;
129}
130
131/**
132 * stmmac_mdio_register
133 * @ndev: net device structure
134 * Description: it registers the MII bus
135 */
136int stmmac_mdio_register(struct net_device *ndev)
137{
138 int err = 0;
139 struct mii_bus *new_bus;
140 int *irqlist;
141 struct stmmac_priv *priv = netdev_priv(ndev);
142 int addr, found;
143
144 new_bus = mdiobus_alloc();
145 if (new_bus == NULL)
146 return -ENOMEM;
147
148 irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
149 if (irqlist == NULL) {
150 err = -ENOMEM;
151 goto irqlist_alloc_fail;
152 }
153
154 /* Assign IRQ to phy at address phy_addr */
155 if (priv->phy_addr != -1)
156 irqlist[priv->phy_addr] = priv->phy_irq;
157
158 new_bus->name = "STMMAC MII Bus";
159 new_bus->read = &stmmac_mdio_read;
160 new_bus->write = &stmmac_mdio_write;
161 new_bus->reset = &stmmac_mdio_reset;
162 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
163 new_bus->priv = ndev;
164 new_bus->irq = irqlist;
165 new_bus->phy_mask = priv->phy_mask;
166 new_bus->parent = priv->device;
167 err = mdiobus_register(new_bus);
168 if (err != 0) {
169 pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
170 goto bus_register_fail;
171 }
172
173 priv->mii = new_bus;
174
175 found = 0;
176 for (addr = 0; addr < 32; addr++) {
177 struct phy_device *phydev = new_bus->phy_map[addr];
178 if (phydev) {
179 if (priv->phy_addr == -1) {
180 priv->phy_addr = addr;
181 phydev->irq = priv->phy_irq;
182 irqlist[addr] = priv->phy_irq;
183 }
184 pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
185 ndev->name, phydev->phy_id, addr,
186 phydev->irq, dev_name(&phydev->dev),
187 (addr == priv->phy_addr) ? " active" : "");
188 found = 1;
189 }
190 }
191
192 if (!found)
193 pr_warning("%s: No PHY found\n", ndev->name);
194
195 return 0;
196bus_register_fail:
197 kfree(irqlist);
198irqlist_alloc_fail:
199 kfree(new_bus);
200 return err;
201}
202
203/**
204 * stmmac_mdio_unregister
205 * @ndev: net device structure
206 * Description: it unregisters the MII bus
207 */
208int stmmac_mdio_unregister(struct net_device *ndev)
209{
210 struct stmmac_priv *priv = netdev_priv(ndev);
211
212 mdiobus_unregister(priv->mii);
213 priv->mii->priv = NULL;
214 kfree(priv->mii);
215
216 return 0;
217}
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
new file mode 100644
index 000000000000..b838c6582077
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -0,0 +1,140 @@
1/*******************************************************************************
2 STMMAC external timer support.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include <linux/kernel.h>
26#include <linux/etherdevice.h>
27#include "stmmac_timer.h"
28
29static void stmmac_timer_handler(void *data)
30{
31 struct net_device *dev = (struct net_device *)data;
32
33 stmmac_schedule(dev);
34
35 return;
36}
37
38#define STMMAC_TIMER_MSG(timer, freq) \
39printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
40
41#if defined(CONFIG_STMMAC_RTC_TIMER)
42#include <linux/rtc.h>
43static struct rtc_device *stmmac_rtc;
44static rtc_task_t stmmac_task;
45
46static void stmmac_rtc_start(unsigned int new_freq)
47{
48 rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
49 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
50 return;
51}
52
53static void stmmac_rtc_stop(void)
54{
55 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
56 return;
57}
58
59int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
60{
61 stmmac_task.private_data = dev;
62 stmmac_task.func = stmmac_timer_handler;
63
64 stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
65 if (stmmac_rtc == NULL) {
66 pr_error("open rtc device failed\n");
67 return -ENODEV;
68 }
69
70 rtc_irq_register(stmmac_rtc, &stmmac_task);
71
72 /* Periodic mode is not supported */
73 if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
74 pr_error("set periodic failed\n");
75 rtc_irq_unregister(stmmac_rtc, &stmmac_task);
76 rtc_class_close(stmmac_rtc);
77 return -1;
78 }
79
80 STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
81
82 tm->timer_start = stmmac_rtc_start;
83 tm->timer_stop = stmmac_rtc_stop;
84
85 return 0;
86}
87
88int stmmac_close_ext_timer(void)
89{
90 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
91 rtc_irq_unregister(stmmac_rtc, &stmmac_task);
92 rtc_class_close(stmmac_rtc);
93 return 0;
94}
95
96#elif defined(CONFIG_STMMAC_TMU_TIMER)
97#include <linux/clk.h>
98#define TMU_CHANNEL "tmu2_clk"
99static struct clk *timer_clock;
100
101static void stmmac_tmu_start(unsigned int new_freq)
102{
103 clk_set_rate(timer_clock, new_freq);
104 clk_enable(timer_clock);
105 return;
106}
107
108static void stmmac_tmu_stop(void)
109{
110 clk_disable(timer_clock);
111 return;
112}
113
114int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
115{
116 timer_clock = clk_get(NULL, TMU_CHANNEL);
117
118 if (timer_clock == NULL)
119 return -1;
120
121 if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
122 timer_clock = NULL;
123 return -1;
124 }
125
126 STMMAC_TIMER_MSG("TMU2", tm->freq);
127 tm->timer_start = stmmac_tmu_start;
128 tm->timer_stop = stmmac_tmu_stop;
129
130 return 0;
131}
132
133int stmmac_close_ext_timer(void)
134{
135 clk_disable(timer_clock);
136 tmu2_unregister_user();
137 clk_put(timer_clock);
138 return 0;
139}
140#endif
diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h
new file mode 100644
index 000000000000..f795cae33725
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.h
@@ -0,0 +1,41 @@
1/*******************************************************************************
2 STMMAC external timer Header File.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25struct stmmac_timer {
26 void (*timer_start) (unsigned int new_freq);
27 void (*timer_stop) (void);
28 unsigned int freq;
29};
30
31/* Open the HW timer device and return 0 in case of success */
32int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
33/* Stop the timer and release it */
34int stmmac_close_ext_timer(void);
35/* Function used for scheduling task within the stmmac */
36void stmmac_schedule(struct net_device *dev);
37
38#if defined(CONFIG_STMMAC_TMU_TIMER)
39extern int tmu2_register_user(void *fnt, void *data);
40extern void tmu2_unregister_user(void);
41#endif
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 305ec3d783db..d6f4faf5bbcb 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1033,10 +1033,8 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
1033 (csum_stuff_off << 21)); 1033 (csum_stuff_off << 21));
1034 } 1034 }
1035 1035
1036 local_irq_save(flags); 1036 if (!spin_trylock_irqsave(&gp->tx_lock, flags)) {
1037 if (!spin_trylock(&gp->tx_lock)) {
1038 /* Tell upper layer to requeue */ 1037 /* Tell upper layer to requeue */
1039 local_irq_restore(flags);
1040 return NETDEV_TX_LOCKED; 1038 return NETDEV_TX_LOCKED;
1041 } 1039 }
1042 /* We raced with gem_do_stop() */ 1040 /* We raced with gem_do_stop() */
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index d1298e5b72c5..6572e8a54520 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -22,11 +22,7 @@
22 * All Rights Reserved. 22 * All Rights Reserved.
23 */ 23 */
24 24
25#ifdef TC35815_NAPI 25#define DRV_VERSION "1.39"
26#define DRV_VERSION "1.38-NAPI"
27#else
28#define DRV_VERSION "1.38"
29#endif
30static const char *version = "tc35815.c:v" DRV_VERSION "\n"; 26static const char *version = "tc35815.c:v" DRV_VERSION "\n";
31#define MODNAME "tc35815" 27#define MODNAME "tc35815"
32 28
@@ -54,13 +50,6 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
54#include <asm/io.h> 50#include <asm/io.h>
55#include <asm/byteorder.h> 51#include <asm/byteorder.h>
56 52
57/* First, a few definitions that the brave might change. */
58
59#define GATHER_TXINT /* On-Demand Tx Interrupt */
60#define WORKAROUND_LOSTCAR
61#define WORKAROUND_100HALF_PROMISC
62/* #define TC35815_USE_PACKEDBUFFER */
63
64enum tc35815_chiptype { 53enum tc35815_chiptype {
65 TC35815CF = 0, 54 TC35815CF = 0,
66 TC35815_NWU, 55 TC35815_NWU,
@@ -330,17 +319,10 @@ struct BDesc {
330 319
331 320
332/* Some useful constants. */ 321/* Some useful constants. */
333#undef NO_CHECK_CARRIER /* Does not check No-Carrier with TP */
334 322
335#ifdef NO_CHECK_CARRIER 323#define TX_CTL_CMD (Tx_EnTxPar | Tx_EnLateColl | \
336#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
337 Tx_EnExColl | Tx_EnExDefer | Tx_EnUnder | \
338 Tx_En) /* maybe 0x7b01 */
339#else
340#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
341 Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \ 324 Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
342 Tx_En) /* maybe 0x7b01 */ 325 Tx_En) /* maybe 0x7b01 */
343#endif
344/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */ 326/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
345#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \ 327#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
346 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */ 328 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
@@ -361,13 +343,6 @@ struct BDesc {
361#define TX_THRESHOLD_KEEP_LIMIT 10 343#define TX_THRESHOLD_KEEP_LIMIT 10
362 344
363/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ 345/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
364#ifdef TC35815_USE_PACKEDBUFFER
365#define FD_PAGE_NUM 2
366#define RX_BUF_NUM 8 /* >= 2 */
367#define RX_FD_NUM 250 /* >= 32 */
368#define TX_FD_NUM 128
369#define RX_BUF_SIZE PAGE_SIZE
370#else /* TC35815_USE_PACKEDBUFFER */
371#define FD_PAGE_NUM 4 346#define FD_PAGE_NUM 4
372#define RX_BUF_NUM 128 /* < 256 */ 347#define RX_BUF_NUM 128 /* < 256 */
373#define RX_FD_NUM 256 /* >= 32 */ 348#define RX_FD_NUM 256 /* >= 32 */
@@ -381,7 +356,6 @@ struct BDesc {
381#define RX_BUF_SIZE \ 356#define RX_BUF_SIZE \
382 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN) 357 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
383#endif 358#endif
384#endif /* TC35815_USE_PACKEDBUFFER */
385#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */ 359#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */
386#define NAPI_WEIGHT 16 360#define NAPI_WEIGHT 16
387 361
@@ -439,11 +413,7 @@ struct tc35815_local {
439 /* 413 /*
440 * Transmitting: Batch Mode. 414 * Transmitting: Batch Mode.
441 * 1 BD in 1 TxFD. 415 * 1 BD in 1 TxFD.
442 * Receiving: Packing Mode. (TC35815_USE_PACKEDBUFFER) 416 * Receiving: Non-Packing Mode.
443 * 1 circular FD for Free Buffer List.
444 * RX_BUF_NUM BD in Free Buffer FD.
445 * One Free Buffer BD has PAGE_SIZE data buffer.
446 * Or Non-Packing Mode.
447 * 1 circular FD for Free Buffer List. 417 * 1 circular FD for Free Buffer List.
448 * RX_BUF_NUM BD in Free Buffer FD. 418 * RX_BUF_NUM BD in Free Buffer FD.
449 * One Free Buffer BD has ETH_FRAME_LEN data buffer. 419 * One Free Buffer BD has ETH_FRAME_LEN data buffer.
@@ -457,21 +427,11 @@ struct tc35815_local {
457 struct RxFD *rfd_limit; 427 struct RxFD *rfd_limit;
458 struct RxFD *rfd_cur; 428 struct RxFD *rfd_cur;
459 struct FrFD *fbl_ptr; 429 struct FrFD *fbl_ptr;
460#ifdef TC35815_USE_PACKEDBUFFER
461 unsigned char fbl_curid;
462 void *data_buf[RX_BUF_NUM]; /* packing */
463 dma_addr_t data_buf_dma[RX_BUF_NUM];
464 struct {
465 struct sk_buff *skb;
466 dma_addr_t skb_dma;
467 } tx_skbs[TX_FD_NUM];
468#else
469 unsigned int fbl_count; 430 unsigned int fbl_count;
470 struct { 431 struct {
471 struct sk_buff *skb; 432 struct sk_buff *skb;
472 dma_addr_t skb_dma; 433 dma_addr_t skb_dma;
473 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; 434 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
474#endif
475 u32 msg_enable; 435 u32 msg_enable;
476 enum tc35815_chiptype chiptype; 436 enum tc35815_chiptype chiptype;
477}; 437};
@@ -486,51 +446,6 @@ static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
486 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma)); 446 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
487} 447}
488#endif 448#endif
489#ifdef TC35815_USE_PACKEDBUFFER
490static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
491{
492 int i;
493 for (i = 0; i < RX_BUF_NUM; i++) {
494 if (bus >= lp->data_buf_dma[i] &&
495 bus < lp->data_buf_dma[i] + PAGE_SIZE)
496 return (void *)((u8 *)lp->data_buf[i] +
497 (bus - lp->data_buf_dma[i]));
498 }
499 return NULL;
500}
501
502#define TC35815_DMA_SYNC_ONDEMAND
503static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
504{
505#ifdef TC35815_DMA_SYNC_ONDEMAND
506 void *buf;
507 /* pci_map + pci_dma_sync will be more effective than
508 * pci_alloc_consistent on some archs. */
509 buf = (void *)__get_free_page(GFP_ATOMIC);
510 if (!buf)
511 return NULL;
512 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
513 PCI_DMA_FROMDEVICE);
514 if (pci_dma_mapping_error(hwdev, *dma_handle)) {
515 free_page((unsigned long)buf);
516 return NULL;
517 }
518 return buf;
519#else
520 return pci_alloc_consistent(hwdev, PAGE_SIZE, dma_handle);
521#endif
522}
523
524static void free_rxbuf_page(struct pci_dev *hwdev, void *buf, dma_addr_t dma_handle)
525{
526#ifdef TC35815_DMA_SYNC_ONDEMAND
527 pci_unmap_single(hwdev, dma_handle, PAGE_SIZE, PCI_DMA_FROMDEVICE);
528 free_page((unsigned long)buf);
529#else
530 pci_free_consistent(hwdev, PAGE_SIZE, buf, dma_handle);
531#endif
532}
533#else /* TC35815_USE_PACKEDBUFFER */
534static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, 449static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
535 struct pci_dev *hwdev, 450 struct pci_dev *hwdev,
536 dma_addr_t *dma_handle) 451 dma_addr_t *dma_handle)
@@ -555,19 +470,14 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_
555 PCI_DMA_FROMDEVICE); 470 PCI_DMA_FROMDEVICE);
556 dev_kfree_skb_any(skb); 471 dev_kfree_skb_any(skb);
557} 472}
558#endif /* TC35815_USE_PACKEDBUFFER */
559 473
560/* Index to functions, as function prototypes. */ 474/* Index to functions, as function prototypes. */
561 475
562static int tc35815_open(struct net_device *dev); 476static int tc35815_open(struct net_device *dev);
563static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); 477static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
564static irqreturn_t tc35815_interrupt(int irq, void *dev_id); 478static irqreturn_t tc35815_interrupt(int irq, void *dev_id);
565#ifdef TC35815_NAPI
566static int tc35815_rx(struct net_device *dev, int limit); 479static int tc35815_rx(struct net_device *dev, int limit);
567static int tc35815_poll(struct napi_struct *napi, int budget); 480static int tc35815_poll(struct napi_struct *napi, int budget);
568#else
569static void tc35815_rx(struct net_device *dev);
570#endif
571static void tc35815_txdone(struct net_device *dev); 481static void tc35815_txdone(struct net_device *dev);
572static int tc35815_close(struct net_device *dev); 482static int tc35815_close(struct net_device *dev);
573static struct net_device_stats *tc35815_get_stats(struct net_device *dev); 483static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
@@ -654,8 +564,6 @@ static void tc_handle_link_change(struct net_device *dev)
654 * TX4939 PCFG.SPEEDn bit will be changed on 564 * TX4939 PCFG.SPEEDn bit will be changed on
655 * NETDEV_CHANGE event. 565 * NETDEV_CHANGE event.
656 */ 566 */
657
658#if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR)
659 /* 567 /*
660 * WORKAROUND: enable LostCrS only if half duplex 568 * WORKAROUND: enable LostCrS only if half duplex
661 * operation. 569 * operation.
@@ -665,7 +573,6 @@ static void tc_handle_link_change(struct net_device *dev)
665 lp->chiptype != TC35815_TX4939) 573 lp->chiptype != TC35815_TX4939)
666 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, 574 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
667 &tr->Tx_Ctl); 575 &tr->Tx_Ctl);
668#endif
669 576
670 lp->speed = phydev->speed; 577 lp->speed = phydev->speed;
671 lp->duplex = phydev->duplex; 578 lp->duplex = phydev->duplex;
@@ -674,11 +581,9 @@ static void tc_handle_link_change(struct net_device *dev)
674 581
675 if (phydev->link != lp->link) { 582 if (phydev->link != lp->link) {
676 if (phydev->link) { 583 if (phydev->link) {
677#ifdef WORKAROUND_100HALF_PROMISC
678 /* delayed promiscuous enabling */ 584 /* delayed promiscuous enabling */
679 if (dev->flags & IFF_PROMISC) 585 if (dev->flags & IFF_PROMISC)
680 tc35815_set_multicast_list(dev); 586 tc35815_set_multicast_list(dev);
681#endif
682 } else { 587 } else {
683 lp->speed = 0; 588 lp->speed = 0;
684 lp->duplex = -1; 589 lp->duplex = -1;
@@ -923,9 +828,7 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
923 dev->netdev_ops = &tc35815_netdev_ops; 828 dev->netdev_ops = &tc35815_netdev_ops;
924 dev->ethtool_ops = &tc35815_ethtool_ops; 829 dev->ethtool_ops = &tc35815_ethtool_ops;
925 dev->watchdog_timeo = TC35815_TX_TIMEOUT; 830 dev->watchdog_timeo = TC35815_TX_TIMEOUT;
926#ifdef TC35815_NAPI
927 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); 831 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
928#endif
929 832
930 dev->irq = pdev->irq; 833 dev->irq = pdev->irq;
931 dev->base_addr = (unsigned long)ioaddr; 834 dev->base_addr = (unsigned long)ioaddr;
@@ -1007,25 +910,6 @@ tc35815_init_queues(struct net_device *dev)
1007 if (!lp->fd_buf) 910 if (!lp->fd_buf)
1008 return -ENOMEM; 911 return -ENOMEM;
1009 for (i = 0; i < RX_BUF_NUM; i++) { 912 for (i = 0; i < RX_BUF_NUM; i++) {
1010#ifdef TC35815_USE_PACKEDBUFFER
1011 lp->data_buf[i] =
1012 alloc_rxbuf_page(lp->pci_dev,
1013 &lp->data_buf_dma[i]);
1014 if (!lp->data_buf[i]) {
1015 while (--i >= 0) {
1016 free_rxbuf_page(lp->pci_dev,
1017 lp->data_buf[i],
1018 lp->data_buf_dma[i]);
1019 lp->data_buf[i] = NULL;
1020 }
1021 pci_free_consistent(lp->pci_dev,
1022 PAGE_SIZE * FD_PAGE_NUM,
1023 lp->fd_buf,
1024 lp->fd_buf_dma);
1025 lp->fd_buf = NULL;
1026 return -ENOMEM;
1027 }
1028#else
1029 lp->rx_skbs[i].skb = 913 lp->rx_skbs[i].skb =
1030 alloc_rxbuf_skb(dev, lp->pci_dev, 914 alloc_rxbuf_skb(dev, lp->pci_dev,
1031 &lp->rx_skbs[i].skb_dma); 915 &lp->rx_skbs[i].skb_dma);
@@ -1043,15 +927,9 @@ tc35815_init_queues(struct net_device *dev)
1043 lp->fd_buf = NULL; 927 lp->fd_buf = NULL;
1044 return -ENOMEM; 928 return -ENOMEM;
1045 } 929 }
1046#endif
1047 } 930 }
1048 printk(KERN_DEBUG "%s: FD buf %p DataBuf", 931 printk(KERN_DEBUG "%s: FD buf %p DataBuf",
1049 dev->name, lp->fd_buf); 932 dev->name, lp->fd_buf);
1050#ifdef TC35815_USE_PACKEDBUFFER
1051 printk(" DataBuf");
1052 for (i = 0; i < RX_BUF_NUM; i++)
1053 printk(" %p", lp->data_buf[i]);
1054#endif
1055 printk("\n"); 933 printk("\n");
1056 } else { 934 } else {
1057 for (i = 0; i < FD_PAGE_NUM; i++) 935 for (i = 0; i < FD_PAGE_NUM; i++)
@@ -1084,7 +962,6 @@ tc35815_init_queues(struct net_device *dev)
1084 lp->fbl_ptr = (struct FrFD *)fd_addr; 962 lp->fbl_ptr = (struct FrFD *)fd_addr;
1085 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); 963 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
1086 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); 964 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
1087#ifndef TC35815_USE_PACKEDBUFFER
1088 /* 965 /*
1089 * move all allocated skbs to head of rx_skbs[] array. 966 * move all allocated skbs to head of rx_skbs[] array.
1090 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in 967 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
@@ -1102,11 +979,7 @@ tc35815_init_queues(struct net_device *dev)
1102 lp->fbl_count++; 979 lp->fbl_count++;
1103 } 980 }
1104 } 981 }
1105#endif
1106 for (i = 0; i < RX_BUF_NUM; i++) { 982 for (i = 0; i < RX_BUF_NUM; i++) {
1107#ifdef TC35815_USE_PACKEDBUFFER
1108 lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->data_buf_dma[i]);
1109#else
1110 if (i >= lp->fbl_count) { 983 if (i >= lp->fbl_count) {
1111 lp->fbl_ptr->bd[i].BuffData = 0; 984 lp->fbl_ptr->bd[i].BuffData = 0;
1112 lp->fbl_ptr->bd[i].BDCtl = 0; 985 lp->fbl_ptr->bd[i].BDCtl = 0;
@@ -1114,15 +987,11 @@ tc35815_init_queues(struct net_device *dev)
1114 } 987 }
1115 lp->fbl_ptr->bd[i].BuffData = 988 lp->fbl_ptr->bd[i].BuffData =
1116 cpu_to_le32(lp->rx_skbs[i].skb_dma); 989 cpu_to_le32(lp->rx_skbs[i].skb_dma);
1117#endif
1118 /* BDID is index of FrFD.bd[] */ 990 /* BDID is index of FrFD.bd[] */
1119 lp->fbl_ptr->bd[i].BDCtl = 991 lp->fbl_ptr->bd[i].BDCtl =
1120 cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | 992 cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
1121 RX_BUF_SIZE); 993 RX_BUF_SIZE);
1122 } 994 }
1123#ifdef TC35815_USE_PACKEDBUFFER
1124 lp->fbl_curid = 0;
1125#endif
1126 995
1127 printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n", 996 printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
1128 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); 997 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
@@ -1196,19 +1065,11 @@ tc35815_free_queues(struct net_device *dev)
1196 lp->fbl_ptr = NULL; 1065 lp->fbl_ptr = NULL;
1197 1066
1198 for (i = 0; i < RX_BUF_NUM; i++) { 1067 for (i = 0; i < RX_BUF_NUM; i++) {
1199#ifdef TC35815_USE_PACKEDBUFFER
1200 if (lp->data_buf[i]) {
1201 free_rxbuf_page(lp->pci_dev,
1202 lp->data_buf[i], lp->data_buf_dma[i]);
1203 lp->data_buf[i] = NULL;
1204 }
1205#else
1206 if (lp->rx_skbs[i].skb) { 1068 if (lp->rx_skbs[i].skb) {
1207 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, 1069 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
1208 lp->rx_skbs[i].skb_dma); 1070 lp->rx_skbs[i].skb_dma);
1209 lp->rx_skbs[i].skb = NULL; 1071 lp->rx_skbs[i].skb = NULL;
1210 } 1072 }
1211#endif
1212 } 1073 }
1213 if (lp->fd_buf) { 1074 if (lp->fd_buf) {
1214 pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, 1075 pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
@@ -1254,7 +1115,7 @@ dump_rxfd(struct RxFD *fd)
1254 return bd_count; 1115 return bd_count;
1255} 1116}
1256 1117
1257#if defined(DEBUG) || defined(TC35815_USE_PACKEDBUFFER) 1118#ifdef DEBUG
1258static void 1119static void
1259dump_frfd(struct FrFD *fd) 1120dump_frfd(struct FrFD *fd)
1260{ 1121{
@@ -1271,9 +1132,7 @@ dump_frfd(struct FrFD *fd)
1271 le32_to_cpu(fd->bd[i].BDCtl)); 1132 le32_to_cpu(fd->bd[i].BDCtl));
1272 printk("\n"); 1133 printk("\n");
1273} 1134}
1274#endif
1275 1135
1276#ifdef DEBUG
1277static void 1136static void
1278panic_queues(struct net_device *dev) 1137panic_queues(struct net_device *dev)
1279{ 1138{
@@ -1400,9 +1259,7 @@ tc35815_open(struct net_device *dev)
1400 return -EAGAIN; 1259 return -EAGAIN;
1401 } 1260 }
1402 1261
1403#ifdef TC35815_NAPI
1404 napi_enable(&lp->napi); 1262 napi_enable(&lp->napi);
1405#endif
1406 1263
1407 /* Reset the hardware here. Don't forget to set the station address. */ 1264 /* Reset the hardware here. Don't forget to set the station address. */
1408 spin_lock_irq(&lp->lock); 1265 spin_lock_irq(&lp->lock);
@@ -1478,9 +1335,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1478 (struct tc35815_regs __iomem *)dev->base_addr; 1335 (struct tc35815_regs __iomem *)dev->base_addr;
1479 /* Start DMA Transmitter. */ 1336 /* Start DMA Transmitter. */
1480 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL); 1337 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1481#ifdef GATHER_TXINT
1482 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); 1338 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1483#endif
1484 if (netif_msg_tx_queued(lp)) { 1339 if (netif_msg_tx_queued(lp)) {
1485 printk("%s: starting TxFD.\n", dev->name); 1340 printk("%s: starting TxFD.\n", dev->name);
1486 dump_txfd(txfd); 1341 dump_txfd(txfd);
@@ -1536,11 +1391,7 @@ static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1536 tc35815_schedule_restart(dev); 1391 tc35815_schedule_restart(dev);
1537} 1392}
1538 1393
1539#ifdef TC35815_NAPI
1540static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) 1394static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1541#else
1542static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1543#endif
1544{ 1395{
1545 struct tc35815_local *lp = netdev_priv(dev); 1396 struct tc35815_local *lp = netdev_priv(dev);
1546 int ret = -1; 1397 int ret = -1;
@@ -1579,12 +1430,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1579 /* normal notification */ 1430 /* normal notification */
1580 if (status & Int_IntMacRx) { 1431 if (status & Int_IntMacRx) {
1581 /* Got a packet(s). */ 1432 /* Got a packet(s). */
1582#ifdef TC35815_NAPI
1583 ret = tc35815_rx(dev, limit); 1433 ret = tc35815_rx(dev, limit);
1584#else
1585 tc35815_rx(dev);
1586 ret = 0;
1587#endif
1588 lp->lstats.rx_ints++; 1434 lp->lstats.rx_ints++;
1589 } 1435 }
1590 if (status & Int_IntMacTx) { 1436 if (status & Int_IntMacTx) {
@@ -1592,7 +1438,8 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1592 lp->lstats.tx_ints++; 1438 lp->lstats.tx_ints++;
1593 tc35815_txdone(dev); 1439 tc35815_txdone(dev);
1594 netif_wake_queue(dev); 1440 netif_wake_queue(dev);
1595 ret = 0; 1441 if (ret < 0)
1442 ret = 0;
1596 } 1443 }
1597 return ret; 1444 return ret;
1598} 1445}
@@ -1607,7 +1454,6 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1607 struct tc35815_local *lp = netdev_priv(dev); 1454 struct tc35815_local *lp = netdev_priv(dev);
1608 struct tc35815_regs __iomem *tr = 1455 struct tc35815_regs __iomem *tr =
1609 (struct tc35815_regs __iomem *)dev->base_addr; 1456 (struct tc35815_regs __iomem *)dev->base_addr;
1610#ifdef TC35815_NAPI
1611 u32 dmactl = tc_readl(&tr->DMA_Ctl); 1457 u32 dmactl = tc_readl(&tr->DMA_Ctl);
1612 1458
1613 if (!(dmactl & DMA_IntMask)) { 1459 if (!(dmactl & DMA_IntMask)) {
@@ -1624,22 +1470,6 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1624 return IRQ_HANDLED; 1470 return IRQ_HANDLED;
1625 } 1471 }
1626 return IRQ_NONE; 1472 return IRQ_NONE;
1627#else
1628 int handled;
1629 u32 status;
1630
1631 spin_lock(&lp->lock);
1632 status = tc_readl(&tr->Int_Src);
1633 /* BLEx, FDAEx will be cleared later */
1634 tc_writel(status & ~(Int_BLEx | Int_FDAEx),
1635 &tr->Int_Src); /* write to clear */
1636 handled = tc35815_do_interrupt(dev, status);
1637 if (status & (Int_BLEx | Int_FDAEx))
1638 tc_writel(status & (Int_BLEx | Int_FDAEx), &tr->Int_Src);
1639 (void)tc_readl(&tr->Int_Src); /* flush */
1640 spin_unlock(&lp->lock);
1641 return IRQ_RETVAL(handled >= 0);
1642#endif /* TC35815_NAPI */
1643} 1473}
1644 1474
1645#ifdef CONFIG_NET_POLL_CONTROLLER 1475#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1652,20 +1482,13 @@ static void tc35815_poll_controller(struct net_device *dev)
1652#endif 1482#endif
1653 1483
1654/* We have a good packet(s), get it/them out of the buffers. */ 1484/* We have a good packet(s), get it/them out of the buffers. */
1655#ifdef TC35815_NAPI
1656static int 1485static int
1657tc35815_rx(struct net_device *dev, int limit) 1486tc35815_rx(struct net_device *dev, int limit)
1658#else
1659static void
1660tc35815_rx(struct net_device *dev)
1661#endif
1662{ 1487{
1663 struct tc35815_local *lp = netdev_priv(dev); 1488 struct tc35815_local *lp = netdev_priv(dev);
1664 unsigned int fdctl; 1489 unsigned int fdctl;
1665 int i; 1490 int i;
1666#ifdef TC35815_NAPI
1667 int received = 0; 1491 int received = 0;
1668#endif
1669 1492
1670 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) { 1493 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
1671 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat); 1494 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
@@ -1684,52 +1507,9 @@ tc35815_rx(struct net_device *dev)
1684 struct sk_buff *skb; 1507 struct sk_buff *skb;
1685 unsigned char *data; 1508 unsigned char *data;
1686 int cur_bd; 1509 int cur_bd;
1687#ifdef TC35815_USE_PACKEDBUFFER
1688 int offset;
1689#endif
1690 1510
1691#ifdef TC35815_NAPI
1692 if (--limit < 0) 1511 if (--limit < 0)
1693 break; 1512 break;
1694#endif
1695#ifdef TC35815_USE_PACKEDBUFFER
1696 BUG_ON(bd_count > 2);
1697 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1698 if (skb == NULL) {
1699 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
1700 dev->name);
1701 dev->stats.rx_dropped++;
1702 break;
1703 }
1704 skb_reserve(skb, NET_IP_ALIGN);
1705
1706 data = skb_put(skb, pkt_len);
1707
1708 /* copy from receive buffer */
1709 cur_bd = 0;
1710 offset = 0;
1711 while (offset < pkt_len && cur_bd < bd_count) {
1712 int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) &
1713 BD_BuffLength_MASK;
1714 dma_addr_t dma = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData);
1715 void *rxbuf = rxbuf_bus_to_virt(lp, dma);
1716 if (offset + len > pkt_len)
1717 len = pkt_len - offset;
1718#ifdef TC35815_DMA_SYNC_ONDEMAND
1719 pci_dma_sync_single_for_cpu(lp->pci_dev,
1720 dma, len,
1721 PCI_DMA_FROMDEVICE);
1722#endif
1723 memcpy(data + offset, rxbuf, len);
1724#ifdef TC35815_DMA_SYNC_ONDEMAND
1725 pci_dma_sync_single_for_device(lp->pci_dev,
1726 dma, len,
1727 PCI_DMA_FROMDEVICE);
1728#endif
1729 offset += len;
1730 cur_bd++;
1731 }
1732#else /* TC35815_USE_PACKEDBUFFER */
1733 BUG_ON(bd_count > 1); 1513 BUG_ON(bd_count > 1);
1734 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) 1514 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
1735 & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; 1515 & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
@@ -1757,16 +1537,11 @@ tc35815_rx(struct net_device *dev)
1757 memmove(skb->data, skb->data - NET_IP_ALIGN, 1537 memmove(skb->data, skb->data - NET_IP_ALIGN,
1758 pkt_len); 1538 pkt_len);
1759 data = skb_put(skb, pkt_len); 1539 data = skb_put(skb, pkt_len);
1760#endif /* TC35815_USE_PACKEDBUFFER */
1761 if (netif_msg_pktdata(lp)) 1540 if (netif_msg_pktdata(lp))
1762 print_eth(data); 1541 print_eth(data);
1763 skb->protocol = eth_type_trans(skb, dev); 1542 skb->protocol = eth_type_trans(skb, dev);
1764#ifdef TC35815_NAPI
1765 netif_receive_skb(skb); 1543 netif_receive_skb(skb);
1766 received++; 1544 received++;
1767#else
1768 netif_rx(skb);
1769#endif
1770 dev->stats.rx_packets++; 1545 dev->stats.rx_packets++;
1771 dev->stats.rx_bytes += pkt_len; 1546 dev->stats.rx_bytes += pkt_len;
1772 } else { 1547 } else {
@@ -1803,19 +1578,11 @@ tc35815_rx(struct net_device *dev)
1803 BUG_ON(id >= RX_BUF_NUM); 1578 BUG_ON(id >= RX_BUF_NUM);
1804#endif 1579#endif
1805 /* free old buffers */ 1580 /* free old buffers */
1806#ifdef TC35815_USE_PACKEDBUFFER
1807 while (lp->fbl_curid != id)
1808#else
1809 lp->fbl_count--; 1581 lp->fbl_count--;
1810 while (lp->fbl_count < RX_BUF_NUM) 1582 while (lp->fbl_count < RX_BUF_NUM)
1811#endif
1812 { 1583 {
1813#ifdef TC35815_USE_PACKEDBUFFER
1814 unsigned char curid = lp->fbl_curid;
1815#else
1816 unsigned char curid = 1584 unsigned char curid =
1817 (id + 1 + lp->fbl_count) % RX_BUF_NUM; 1585 (id + 1 + lp->fbl_count) % RX_BUF_NUM;
1818#endif
1819 struct BDesc *bd = &lp->fbl_ptr->bd[curid]; 1586 struct BDesc *bd = &lp->fbl_ptr->bd[curid];
1820#ifdef DEBUG 1587#ifdef DEBUG
1821 bdctl = le32_to_cpu(bd->BDCtl); 1588 bdctl = le32_to_cpu(bd->BDCtl);
@@ -1826,7 +1593,6 @@ tc35815_rx(struct net_device *dev)
1826 } 1593 }
1827#endif 1594#endif
1828 /* pass BD to controller */ 1595 /* pass BD to controller */
1829#ifndef TC35815_USE_PACKEDBUFFER
1830 if (!lp->rx_skbs[curid].skb) { 1596 if (!lp->rx_skbs[curid].skb) {
1831 lp->rx_skbs[curid].skb = 1597 lp->rx_skbs[curid].skb =
1832 alloc_rxbuf_skb(dev, 1598 alloc_rxbuf_skb(dev,
@@ -1836,21 +1602,11 @@ tc35815_rx(struct net_device *dev)
1836 break; /* try on next reception */ 1602 break; /* try on next reception */
1837 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); 1603 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
1838 } 1604 }
1839#endif /* TC35815_USE_PACKEDBUFFER */
1840 /* Note: BDLength was modified by chip. */ 1605 /* Note: BDLength was modified by chip. */
1841 bd->BDCtl = cpu_to_le32(BD_CownsBD | 1606 bd->BDCtl = cpu_to_le32(BD_CownsBD |
1842 (curid << BD_RxBDID_SHIFT) | 1607 (curid << BD_RxBDID_SHIFT) |
1843 RX_BUF_SIZE); 1608 RX_BUF_SIZE);
1844#ifdef TC35815_USE_PACKEDBUFFER
1845 lp->fbl_curid = (curid + 1) % RX_BUF_NUM;
1846 if (netif_msg_rx_status(lp)) {
1847 printk("%s: Entering new FBD %d\n",
1848 dev->name, lp->fbl_curid);
1849 dump_frfd(lp->fbl_ptr);
1850 }
1851#else
1852 lp->fbl_count++; 1609 lp->fbl_count++;
1853#endif
1854 } 1610 }
1855 } 1611 }
1856 1612
@@ -1882,12 +1638,9 @@ tc35815_rx(struct net_device *dev)
1882#endif 1638#endif
1883 } 1639 }
1884 1640
1885#ifdef TC35815_NAPI
1886 return received; 1641 return received;
1887#endif
1888} 1642}
1889 1643
1890#ifdef TC35815_NAPI
1891static int tc35815_poll(struct napi_struct *napi, int budget) 1644static int tc35815_poll(struct napi_struct *napi, int budget)
1892{ 1645{
1893 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi); 1646 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
@@ -1924,13 +1677,8 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1924 } 1677 }
1925 return received; 1678 return received;
1926} 1679}
1927#endif
1928 1680
1929#ifdef NO_CHECK_CARRIER
1930#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1931#else
1932#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr) 1681#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1933#endif
1934 1682
1935static void 1683static void
1936tc35815_check_tx_stat(struct net_device *dev, int status) 1684tc35815_check_tx_stat(struct net_device *dev, int status)
@@ -1944,16 +1692,12 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
1944 if (status & Tx_TxColl_MASK) 1692 if (status & Tx_TxColl_MASK)
1945 dev->stats.collisions += status & Tx_TxColl_MASK; 1693 dev->stats.collisions += status & Tx_TxColl_MASK;
1946 1694
1947#ifndef NO_CHECK_CARRIER
1948 /* TX4939 does not have NCarr */ 1695 /* TX4939 does not have NCarr */
1949 if (lp->chiptype == TC35815_TX4939) 1696 if (lp->chiptype == TC35815_TX4939)
1950 status &= ~Tx_NCarr; 1697 status &= ~Tx_NCarr;
1951#ifdef WORKAROUND_LOSTCAR
1952 /* WORKAROUND: ignore LostCrS in full duplex operation */ 1698 /* WORKAROUND: ignore LostCrS in full duplex operation */
1953 if (!lp->link || lp->duplex == DUPLEX_FULL) 1699 if (!lp->link || lp->duplex == DUPLEX_FULL)
1954 status &= ~Tx_NCarr; 1700 status &= ~Tx_NCarr;
1955#endif
1956#endif
1957 1701
1958 if (!(status & TX_STA_ERR)) { 1702 if (!(status & TX_STA_ERR)) {
1959 /* no error. */ 1703 /* no error. */
@@ -1983,12 +1727,10 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
1983 dev->stats.tx_fifo_errors++; 1727 dev->stats.tx_fifo_errors++;
1984 msg = "Excessive Deferral."; 1728 msg = "Excessive Deferral.";
1985 } 1729 }
1986#ifndef NO_CHECK_CARRIER
1987 if (status & Tx_NCarr) { 1730 if (status & Tx_NCarr) {
1988 dev->stats.tx_carrier_errors++; 1731 dev->stats.tx_carrier_errors++;
1989 msg = "Lost Carrier Sense."; 1732 msg = "Lost Carrier Sense.";
1990 } 1733 }
1991#endif
1992 if (status & Tx_LateColl) { 1734 if (status & Tx_LateColl) {
1993 dev->stats.tx_aborted_errors++; 1735 dev->stats.tx_aborted_errors++;
1994 msg = "Late Collision."; 1736 msg = "Late Collision.";
@@ -2044,11 +1786,7 @@ tc35815_txdone(struct net_device *dev)
2044 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE); 1786 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
2045 lp->tx_skbs[lp->tfd_end].skb = NULL; 1787 lp->tx_skbs[lp->tfd_end].skb = NULL;
2046 lp->tx_skbs[lp->tfd_end].skb_dma = 0; 1788 lp->tx_skbs[lp->tfd_end].skb_dma = 0;
2047#ifdef TC35815_NAPI
2048 dev_kfree_skb_any(skb); 1789 dev_kfree_skb_any(skb);
2049#else
2050 dev_kfree_skb_irq(skb);
2051#endif
2052 } 1790 }
2053 txfd->fd.FDSystem = cpu_to_le32(0xffffffff); 1791 txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
2054 1792
@@ -2083,9 +1821,7 @@ tc35815_txdone(struct net_device *dev)
2083 1821
2084 /* start DMA Transmitter again */ 1822 /* start DMA Transmitter again */
2085 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL); 1823 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
2086#ifdef GATHER_TXINT
2087 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); 1824 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
2088#endif
2089 if (netif_msg_tx_queued(lp)) { 1825 if (netif_msg_tx_queued(lp)) {
2090 printk("%s: start TxFD on queue.\n", 1826 printk("%s: start TxFD on queue.\n",
2091 dev->name); 1827 dev->name);
@@ -2112,9 +1848,7 @@ tc35815_close(struct net_device *dev)
2112 struct tc35815_local *lp = netdev_priv(dev); 1848 struct tc35815_local *lp = netdev_priv(dev);
2113 1849
2114 netif_stop_queue(dev); 1850 netif_stop_queue(dev);
2115#ifdef TC35815_NAPI
2116 napi_disable(&lp->napi); 1851 napi_disable(&lp->napi);
2117#endif
2118 if (lp->phy_dev) 1852 if (lp->phy_dev)
2119 phy_stop(lp->phy_dev); 1853 phy_stop(lp->phy_dev);
2120 cancel_work_sync(&lp->restart_work); 1854 cancel_work_sync(&lp->restart_work);
@@ -2198,14 +1932,12 @@ tc35815_set_multicast_list(struct net_device *dev)
2198 (struct tc35815_regs __iomem *)dev->base_addr; 1932 (struct tc35815_regs __iomem *)dev->base_addr;
2199 1933
2200 if (dev->flags & IFF_PROMISC) { 1934 if (dev->flags & IFF_PROMISC) {
2201#ifdef WORKAROUND_100HALF_PROMISC
2202 /* With some (all?) 100MHalf HUB, controller will hang 1935 /* With some (all?) 100MHalf HUB, controller will hang
2203 * if we enabled promiscuous mode before linkup... */ 1936 * if we enabled promiscuous mode before linkup... */
2204 struct tc35815_local *lp = netdev_priv(dev); 1937 struct tc35815_local *lp = netdev_priv(dev);
2205 1938
2206 if (!lp->link) 1939 if (!lp->link)
2207 return; 1940 return;
2208#endif
2209 /* Enable promiscuous mode */ 1941 /* Enable promiscuous mode */
2210 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); 1942 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
2211 } else if ((dev->flags & IFF_ALLMULTI) || 1943 } else if ((dev->flags & IFF_ALLMULTI) ||
@@ -2392,9 +2124,6 @@ static void tc35815_chip_init(struct net_device *dev)
2392 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); 2124 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
2393 else 2125 else
2394 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); 2126 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
2395#ifdef TC35815_USE_PACKEDBUFFER
2396 tc_writel(RxFrag_EnPack | ETH_ZLEN, &tr->RxFragSize); /* Packing */
2397#endif
2398 tc_writel(0, &tr->TxPollCtr); /* Batch mode */ 2127 tc_writel(0, &tr->TxPollCtr); /* Batch mode */
2399 tc_writel(TX_THRESHOLD, &tr->TxThrsh); 2128 tc_writel(TX_THRESHOLD, &tr->TxThrsh);
2400 tc_writel(INT_EN_CMD, &tr->Int_En); 2129 tc_writel(INT_EN_CMD, &tr->Int_En);
@@ -2412,19 +2141,12 @@ static void tc35815_chip_init(struct net_device *dev)
2412 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ 2141 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */
2413 2142
2414 /* start MAC transmitter */ 2143 /* start MAC transmitter */
2415#ifndef NO_CHECK_CARRIER
2416 /* TX4939 does not have EnLCarr */ 2144 /* TX4939 does not have EnLCarr */
2417 if (lp->chiptype == TC35815_TX4939) 2145 if (lp->chiptype == TC35815_TX4939)
2418 txctl &= ~Tx_EnLCarr; 2146 txctl &= ~Tx_EnLCarr;
2419#ifdef WORKAROUND_LOSTCAR
2420 /* WORKAROUND: ignore LostCrS in full duplex operation */ 2147 /* WORKAROUND: ignore LostCrS in full duplex operation */
2421 if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) 2148 if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
2422 txctl &= ~Tx_EnLCarr; 2149 txctl &= ~Tx_EnLCarr;
2423#endif
2424#endif /* !NO_CHECK_CARRIER */
2425#ifdef GATHER_TXINT
2426 txctl &= ~Tx_EnComp; /* disable global tx completion int. */
2427#endif
2428 tc_writel(txctl, &tr->Tx_Ctl); 2150 tc_writel(txctl, &tr->Tx_Ctl);
2429} 2151}
2430 2152
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 79d4868e75a6..492bff68bf2d 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -1878,7 +1878,7 @@ static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1878 udelay(50); /* give hw a chance to clean fifo */ 1878 udelay(50); /* give hw a chance to clean fifo */
1879 continue; 1879 continue;
1880 } 1880 }
1881 avail = MIN(avail, size); 1881 avail = min(avail, size);
1882 DBG("about to push %d bytes starting %p size %d\n", avail, 1882 DBG("about to push %d bytes starting %p size %d\n", avail,
1883 data, size); 1883 data, size);
1884 bdx_tx_push_desc(priv, data, avail); 1884 bdx_tx_push_desc(priv, data, avail);
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 4fc875e5dcdd..124141909e42 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -76,8 +76,6 @@
76#define FIFO_SIZE 4096 76#define FIFO_SIZE 4096
77#define FIFO_EXTRA_SPACE 1024 77#define FIFO_EXTRA_SPACE 1024
78 78
79#define MIN(x, y) ((x) < (y) ? (x) : (y))
80
81#if BITS_PER_LONG == 64 79#if BITS_PER_LONG == 64
82# define H32_64(x) (u32) ((u64)(x) >> 32) 80# define H32_64(x) (u32) ((u64)(x) >> 32)
83# define L32_64(x) (u32) ((u64)(x) & 0xffffffff) 81# define L32_64(x) (u32) ((u64)(x) & 0xffffffff)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ba5d3fe753b6..47a4f0947872 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.102" 71#define DRV_MODULE_VERSION "3.103"
72#define DRV_MODULE_RELDATE "September 1, 2009" 72#define DRV_MODULE_RELDATE "November 2, 2009"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -937,9 +937,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
937 u32 val; 937 u32 val;
938 struct phy_device *phydev; 938 struct phy_device *phydev;
939 939
940 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 940 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
941 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 941 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
942 case TG3_PHY_ID_BCM50610: 942 case TG3_PHY_ID_BCM50610:
943 case TG3_PHY_ID_BCM50610M:
943 val = MAC_PHYCFG2_50610_LED_MODES; 944 val = MAC_PHYCFG2_50610_LED_MODES;
944 break; 945 break;
945 case TG3_PHY_ID_BCMAC131: 946 case TG3_PHY_ID_BCMAC131:
@@ -1031,7 +1032,7 @@ static void tg3_mdio_start(struct tg3 *tp)
1031 if (is_serdes) 1032 if (is_serdes)
1032 tp->phy_addr += 7; 1033 tp->phy_addr += 7;
1033 } else 1034 } else
1034 tp->phy_addr = PHY_ADDR; 1035 tp->phy_addr = TG3_PHY_MII_ADDR;
1035 1036
1036 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && 1037 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 1038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
@@ -1062,7 +1063,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1062 tp->mdio_bus->read = &tg3_mdio_read; 1063 tp->mdio_bus->read = &tg3_mdio_read;
1063 tp->mdio_bus->write = &tg3_mdio_write; 1064 tp->mdio_bus->write = &tg3_mdio_write;
1064 tp->mdio_bus->reset = &tg3_mdio_reset; 1065 tp->mdio_bus->reset = &tg3_mdio_reset;
1065 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR); 1066 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1066 tp->mdio_bus->irq = &tp->mdio_irq[0]; 1067 tp->mdio_bus->irq = &tp->mdio_irq[0];
1067 1068
1068 for (i = 0; i < PHY_MAX_ADDR; i++) 1069 for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1084,7 +1085,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1084 return i; 1085 return i;
1085 } 1086 }
1086 1087
1087 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1088 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1088 1089
1089 if (!phydev || !phydev->drv) { 1090 if (!phydev || !phydev->drv) {
1090 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); 1091 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
@@ -1096,8 +1097,14 @@ static int tg3_mdio_init(struct tg3 *tp)
1096 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1097 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1097 case TG3_PHY_ID_BCM57780: 1098 case TG3_PHY_ID_BCM57780:
1098 phydev->interface = PHY_INTERFACE_MODE_GMII; 1099 phydev->interface = PHY_INTERFACE_MODE_GMII;
1100 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1099 break; 1101 break;
1100 case TG3_PHY_ID_BCM50610: 1102 case TG3_PHY_ID_BCM50610:
1103 case TG3_PHY_ID_BCM50610M:
1104 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1105 PHY_BRCM_RX_REFCLK_UNUSED |
1106 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1107 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1101 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) 1108 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1102 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1109 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1103 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1110 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
@@ -1111,6 +1118,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1111 case TG3_PHY_ID_RTL8201E: 1118 case TG3_PHY_ID_RTL8201E:
1112 case TG3_PHY_ID_BCMAC131: 1119 case TG3_PHY_ID_BCMAC131:
1113 phydev->interface = PHY_INTERFACE_MODE_MII; 1120 phydev->interface = PHY_INTERFACE_MODE_MII;
1121 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1114 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; 1122 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1115 break; 1123 break;
1116 } 1124 }
@@ -1311,7 +1319,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1311 u32 old_tx_mode = tp->tx_mode; 1319 u32 old_tx_mode = tp->tx_mode;
1312 1320
1313 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) 1321 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1314 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg; 1322 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1315 else 1323 else
1316 autoneg = tp->link_config.autoneg; 1324 autoneg = tp->link_config.autoneg;
1317 1325
@@ -1348,7 +1356,7 @@ static void tg3_adjust_link(struct net_device *dev)
1348 u8 oldflowctrl, linkmesg = 0; 1356 u8 oldflowctrl, linkmesg = 0;
1349 u32 mac_mode, lcl_adv, rmt_adv; 1357 u32 mac_mode, lcl_adv, rmt_adv;
1350 struct tg3 *tp = netdev_priv(dev); 1358 struct tg3 *tp = netdev_priv(dev);
1351 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1359 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1352 1360
1353 spin_lock_bh(&tp->lock); 1361 spin_lock_bh(&tp->lock);
1354 1362
@@ -1363,8 +1371,11 @@ static void tg3_adjust_link(struct net_device *dev)
1363 1371
1364 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 1372 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1365 mac_mode |= MAC_MODE_PORT_MODE_MII; 1373 mac_mode |= MAC_MODE_PORT_MODE_MII;
1366 else 1374 else if (phydev->speed == SPEED_1000 ||
1375 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1367 mac_mode |= MAC_MODE_PORT_MODE_GMII; 1376 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1377 else
1378 mac_mode |= MAC_MODE_PORT_MODE_MII;
1368 1379
1369 if (phydev->duplex == DUPLEX_HALF) 1380 if (phydev->duplex == DUPLEX_HALF)
1370 mac_mode |= MAC_MODE_HALF_DUPLEX; 1381 mac_mode |= MAC_MODE_HALF_DUPLEX;
@@ -1434,7 +1445,7 @@ static int tg3_phy_init(struct tg3 *tp)
1434 /* Bring the PHY back to a known state. */ 1445 /* Bring the PHY back to a known state. */
1435 tg3_bmcr_reset(tp); 1446 tg3_bmcr_reset(tp);
1436 1447
1437 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1448 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1438 1449
1439 /* Attach the MAC to the PHY. */ 1450 /* Attach the MAC to the PHY. */
1440 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, 1451 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
@@ -1461,7 +1472,7 @@ static int tg3_phy_init(struct tg3 *tp)
1461 SUPPORTED_Asym_Pause); 1472 SUPPORTED_Asym_Pause);
1462 break; 1473 break;
1463 default: 1474 default:
1464 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); 1475 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1465 return -EINVAL; 1476 return -EINVAL;
1466 } 1477 }
1467 1478
@@ -1479,7 +1490,7 @@ static void tg3_phy_start(struct tg3 *tp)
1479 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1490 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1480 return; 1491 return;
1481 1492
1482 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1493 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1483 1494
1484 if (tp->link_config.phy_is_low_power) { 1495 if (tp->link_config.phy_is_low_power) {
1485 tp->link_config.phy_is_low_power = 0; 1496 tp->link_config.phy_is_low_power = 0;
@@ -1499,13 +1510,13 @@ static void tg3_phy_stop(struct tg3 *tp)
1499 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1510 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1500 return; 1511 return;
1501 1512
1502 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]); 1513 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1503} 1514}
1504 1515
1505static void tg3_phy_fini(struct tg3 *tp) 1516static void tg3_phy_fini(struct tg3 *tp)
1506{ 1517{
1507 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 1518 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1508 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); 1519 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1509 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; 1520 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1510 } 1521 }
1511} 1522}
@@ -2149,6 +2160,26 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2149 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 2160 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2150 udelay(40); 2161 udelay(40);
2151 return; 2162 return;
2163 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2164 u32 phytest;
2165 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2166 u32 phy;
2167
2168 tg3_writephy(tp, MII_ADVERTISE, 0);
2169 tg3_writephy(tp, MII_BMCR,
2170 BMCR_ANENABLE | BMCR_ANRESTART);
2171
2172 tg3_writephy(tp, MII_TG3_FET_TEST,
2173 phytest | MII_TG3_FET_SHADOW_EN);
2174 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2175 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2176 tg3_writephy(tp,
2177 MII_TG3_FET_SHDW_AUXMODE4,
2178 phy);
2179 }
2180 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2181 }
2182 return;
2152 } else if (do_low_power) { 2183 } else if (do_low_power) {
2153 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2184 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2154 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 2185 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
@@ -2474,7 +2505,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2474 struct phy_device *phydev; 2505 struct phy_device *phydev;
2475 u32 phyid, advertising; 2506 u32 phyid, advertising;
2476 2507
2477 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 2508 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2478 2509
2479 tp->link_config.phy_is_low_power = 1; 2510 tp->link_config.phy_is_low_power = 1;
2480 2511
@@ -3243,15 +3274,6 @@ relink:
3243 pci_write_config_word(tp->pdev, 3274 pci_write_config_word(tp->pdev,
3244 tp->pcie_cap + PCI_EXP_LNKCTL, 3275 tp->pcie_cap + PCI_EXP_LNKCTL,
3245 newlnkctl); 3276 newlnkctl);
3246 } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3247 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3248 if (tp->link_config.active_speed == SPEED_100 ||
3249 tp->link_config.active_speed == SPEED_10)
3250 newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3251 else
3252 newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3253 if (newreg != oldreg)
3254 tw32(TG3_PCIE_LNKCTL, newreg);
3255 } 3277 }
3256 3278
3257 if (current_link_up != netif_carrier_ok(tp->dev)) { 3279 if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -4435,6 +4457,10 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4435 4457
4436 mapping = pci_map_single(tp->pdev, skb->data, skb_size, 4458 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4437 PCI_DMA_FROMDEVICE); 4459 PCI_DMA_FROMDEVICE);
4460 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4461 dev_kfree_skb(skb);
4462 return -EIO;
4463 }
4438 4464
4439 map->skb = skb; 4465 map->skb = skb;
4440 pci_unmap_addr_set(map, mapping, mapping); 4466 pci_unmap_addr_set(map, mapping, mapping);
@@ -5124,7 +5150,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5124 /* Make sure new skb does not cross any 4G boundaries. 5150 /* Make sure new skb does not cross any 4G boundaries.
5125 * Drop the packet if it does. 5151 * Drop the packet if it does.
5126 */ 5152 */
5127 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) { 5153 if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5154 tg3_4g_overflow_test(new_addr, new_skb->len))) {
5128 if (!ret) 5155 if (!ret)
5129 skb_dma_unmap(&tp->pdev->dev, new_skb, 5156 skb_dma_unmap(&tp->pdev->dev, new_skb,
5130 DMA_TO_DEVICE); 5157 DMA_TO_DEVICE);
@@ -5392,7 +5419,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5392 mss = 0; 5419 mss = 0;
5393 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5420 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5394 struct iphdr *iph; 5421 struct iphdr *iph;
5395 int tcp_opt_len, ip_tcp_len, hdr_len; 5422 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5396 5423
5397 if (skb_header_cloned(skb) && 5424 if (skb_header_cloned(skb) &&
5398 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 5425 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5423,8 +5450,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5423 IPPROTO_TCP, 5450 IPPROTO_TCP,
5424 0); 5451 0);
5425 5452
5426 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || 5453 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5427 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { 5454 mss |= hdr_len << 9;
5455 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5428 if (tcp_opt_len || iph->ihl > 5) { 5457 if (tcp_opt_len || iph->ihl > 5) {
5429 int tsflags; 5458 int tsflags;
5430 5459
@@ -5459,9 +5488,18 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5459 5488
5460 would_hit_hwbug = 0; 5489 would_hit_hwbug = 0;
5461 5490
5462 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) 5491 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5492 would_hit_hwbug = 1;
5493
5494 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5495 tg3_4g_overflow_test(mapping, len))
5496 would_hit_hwbug = 1;
5497
5498 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5499 tg3_40bit_overflow_test(tp, mapping, len))
5463 would_hit_hwbug = 1; 5500 would_hit_hwbug = 1;
5464 else if (tg3_4g_overflow_test(mapping, len)) 5501
5502 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5465 would_hit_hwbug = 1; 5503 would_hit_hwbug = 1;
5466 5504
5467 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 5505 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5482,10 +5520,16 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5482 5520
5483 tnapi->tx_buffers[entry].skb = NULL; 5521 tnapi->tx_buffers[entry].skb = NULL;
5484 5522
5485 if (tg3_4g_overflow_test(mapping, len)) 5523 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5524 len <= 8)
5525 would_hit_hwbug = 1;
5526
5527 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5528 tg3_4g_overflow_test(mapping, len))
5486 would_hit_hwbug = 1; 5529 would_hit_hwbug = 1;
5487 5530
5488 if (tg3_40bit_overflow_test(tp, mapping, len)) 5531 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5532 tg3_40bit_overflow_test(tp, mapping, len))
5489 would_hit_hwbug = 1; 5533 would_hit_hwbug = 1;
5490 5534
5491 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 5535 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
@@ -6580,6 +6624,30 @@ static int tg3_chip_reset(struct tg3 *tp)
6580 6624
6581 tg3_mdio_start(tp); 6625 tg3_mdio_start(tp);
6582 6626
6627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6628 u8 phy_addr;
6629
6630 phy_addr = tp->phy_addr;
6631 tp->phy_addr = TG3_PHY_PCIE_ADDR;
6632
6633 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6634 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
6635 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
6636 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
6637 TG3_PCIEPHY_TX0CTRL1_NB_EN;
6638 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
6639 udelay(10);
6640
6641 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6642 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
6643 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
6644 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
6645 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
6646 udelay(10);
6647
6648 tp->phy_addr = phy_addr;
6649 }
6650
6583 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 6651 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6584 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 6652 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6585 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 6653 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
@@ -7162,15 +7230,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7162 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 7230 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7163 7231
7164 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 7232 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7165 }
7166 7233
7167 if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) { 7234 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7168 val = tr32(TG3_PCIE_LNKCTL); 7235 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7169 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
7170 val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7171 else
7172 val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7173 tw32(TG3_PCIE_LNKCTL, val);
7174 } 7236 }
7175 7237
7176 /* This works around an issue with Athlon chipsets on 7238 /* This works around an issue with Athlon chipsets on
@@ -7602,6 +7664,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7602 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 7664 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7603 val |= WDMAC_MODE_STATUS_TAG_FIX; 7665 val |= WDMAC_MODE_STATUS_TAG_FIX;
7604 7666
7667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7668 val |= WDMAC_MODE_BURST_ALL_DATA;
7669
7605 tw32_f(WDMAC_MODE, val); 7670 tw32_f(WDMAC_MODE, val);
7606 udelay(40); 7671 udelay(40);
7607 7672
@@ -9240,9 +9305,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9240 struct tg3 *tp = netdev_priv(dev); 9305 struct tg3 *tp = netdev_priv(dev);
9241 9306
9242 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9307 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9308 struct phy_device *phydev;
9243 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9309 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9244 return -EAGAIN; 9310 return -EAGAIN;
9245 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); 9311 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9312 return phy_ethtool_gset(phydev, cmd);
9246 } 9313 }
9247 9314
9248 cmd->supported = (SUPPORTED_Autoneg); 9315 cmd->supported = (SUPPORTED_Autoneg);
@@ -9281,9 +9348,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9281 struct tg3 *tp = netdev_priv(dev); 9348 struct tg3 *tp = netdev_priv(dev);
9282 9349
9283 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9350 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9351 struct phy_device *phydev;
9284 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9352 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9285 return -EAGAIN; 9353 return -EAGAIN;
9286 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); 9354 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9355 return phy_ethtool_sset(phydev, cmd);
9287 } 9356 }
9288 9357
9289 if (cmd->autoneg != AUTONEG_ENABLE && 9358 if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -9466,7 +9535,7 @@ static int tg3_nway_reset(struct net_device *dev)
9466 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9535 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9467 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9536 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9468 return -EAGAIN; 9537 return -EAGAIN;
9469 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]); 9538 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9470 } else { 9539 } else {
9471 u32 bmcr; 9540 u32 bmcr;
9472 9541
@@ -9585,7 +9654,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
9585 u32 newadv; 9654 u32 newadv;
9586 struct phy_device *phydev; 9655 struct phy_device *phydev;
9587 9656
9588 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 9657 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9589 9658
9590 if (epause->rx_pause) { 9659 if (epause->rx_pause) {
9591 if (epause->tx_pause) 9660 if (epause->tx_pause)
@@ -10338,7 +10407,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10338 for (i = 14; i < tx_len; i++) 10407 for (i = 14; i < tx_len; i++)
10339 tx_data[i] = (u8) (i & 0xff); 10408 tx_data[i] = (u8) (i & 0xff);
10340 10409
10341 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 10410 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
10411 dev_kfree_skb(skb);
10412 return -EIO;
10413 }
10342 10414
10343 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 10415 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10344 rnapi->coal_now); 10416 rnapi->coal_now);
@@ -10349,7 +10421,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10349 10421
10350 num_pkts = 0; 10422 num_pkts = 0;
10351 10423
10352 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); 10424 tg3_set_txd(tnapi, tnapi->tx_prod,
10425 skb_shinfo(skb)->dma_head, tx_len, 0, 1);
10353 10426
10354 tnapi->tx_prod++; 10427 tnapi->tx_prod++;
10355 num_pkts++; 10428 num_pkts++;
@@ -10359,8 +10432,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10359 10432
10360 udelay(10); 10433 udelay(10);
10361 10434
10362 /* 250 usec to allow enough time on some 10/100 Mbps devices. */ 10435 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10363 for (i = 0; i < 25; i++) { 10436 for (i = 0; i < 35; i++) {
10364 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 10437 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10365 coal_now); 10438 coal_now);
10366 10439
@@ -10373,7 +10446,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10373 break; 10446 break;
10374 } 10447 }
10375 10448
10376 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 10449 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
10377 dev_kfree_skb(skb); 10450 dev_kfree_skb(skb);
10378 10451
10379 if (tx_idx != tnapi->tx_prod) 10452 if (tx_idx != tnapi->tx_prod)
@@ -10565,9 +10638,11 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10565 int err; 10638 int err;
10566 10639
10567 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10640 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10641 struct phy_device *phydev;
10568 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 10642 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10569 return -EAGAIN; 10643 return -EAGAIN;
10570 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd); 10644 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10645 return phy_mii_ioctl(phydev, data, cmd);
10571 } 10646 }
10572 10647
10573 switch(cmd) { 10648 switch(cmd) {
@@ -12610,12 +12685,19 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12610 12685
12611 tp->irq_max = 1; 12686 tp->irq_max = 1;
12612 12687
12613#ifdef TG3_NAPI
12614 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 12688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12615 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 12689 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12616 tp->irq_max = TG3_IRQ_MAX_VECS; 12690 tp->irq_max = TG3_IRQ_MAX_VECS;
12617 } 12691 }
12618#endif 12692
12693 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
12694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12695 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
12696 else {
12697 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
12698 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
12699 }
12700 }
12619 12701
12620 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 12702 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12621 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 12703 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
@@ -12926,11 +13008,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 13008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12927 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; 13009 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12928 13010
12929 if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12930 tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12931 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12932 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12933
12934 err = tg3_mdio_init(tp); 13011 err = tg3_mdio_init(tp);
12935 if (err) 13012 if (err)
12936 return err; 13013 return err;
@@ -13975,8 +14052,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13975 goto err_out_iounmap; 14052 goto err_out_iounmap;
13976 } 14053 }
13977 14054
13978 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 14055 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13980 dev->netdev_ops = &tg3_netdev_ops; 14056 dev->netdev_ops = &tg3_netdev_ops;
13981 else 14057 else
13982 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 14058 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14131,13 +14207,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14131 tg3_bus_string(tp, str), 14207 tg3_bus_string(tp, str),
14132 dev->dev_addr); 14208 dev->dev_addr);
14133 14209
14134 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) 14210 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14211 struct phy_device *phydev;
14212 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14135 printk(KERN_INFO 14213 printk(KERN_INFO
14136 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 14214 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14137 tp->dev->name, 14215 tp->dev->name, phydev->drv->name,
14138 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name, 14216 dev_name(&phydev->dev));
14139 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev)); 14217 } else
14140 else
14141 printk(KERN_INFO 14218 printk(KERN_INFO
14142 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", 14219 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14143 tp->dev->name, tg3_phy_string(tp), 14220 tp->dev->name, tg3_phy_string(tp),
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index bab7940158e6..d770da124b85 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1264,8 +1264,9 @@
1264#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080 1264#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080
1265#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100 1265#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100
1266#define WDMAC_MODE_LNGREAD_ENAB 0x00000200 1266#define WDMAC_MODE_LNGREAD_ENAB 0x00000200
1267#define WDMAC_MODE_RX_ACCEL 0x00000400 1267#define WDMAC_MODE_RX_ACCEL 0x00000400
1268#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000 1268#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000
1269#define WDMAC_MODE_BURST_ALL_DATA 0xc0000000
1269#define WDMAC_STATUS 0x00004c04 1270#define WDMAC_STATUS 0x00004c04
1270#define WDMAC_STATUS_TGTABORT 0x00000004 1271#define WDMAC_STATUS_TGTABORT 0x00000004
1271#define WDMAC_STATUS_MSTABORT 0x00000008 1272#define WDMAC_STATUS_MSTABORT 0x00000008
@@ -1953,10 +1954,34 @@
1953#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 1954#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
1954#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 1955#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
1955 1956
1957
1956/* Currently this is fixed. */ 1958/* Currently this is fixed. */
1957#define PHY_ADDR 0x01 1959#define TG3_PHY_PCIE_ADDR 0x00
1960#define TG3_PHY_MII_ADDR 0x01
1961
1962
1963/*** Tigon3 specific PHY PCIE registers. ***/
1964
1965#define TG3_PCIEPHY_BLOCK_ADDR 0x1f
1966#define TG3_PCIEPHY_XGXS_BLK1 0x0801
1967#define TG3_PCIEPHY_TXB_BLK 0x0861
1968#define TG3_PCIEPHY_BLOCK_SHIFT 4
1958 1969
1959/* Tigon3 specific PHY MII registers. */ 1970/* TG3_PCIEPHY_TXB_BLK */
1971#define TG3_PCIEPHY_TX0CTRL1 0x15
1972#define TG3_PCIEPHY_TX0CTRL1_TXOCM 0x0003
1973#define TG3_PCIEPHY_TX0CTRL1_RDCTL 0x0008
1974#define TG3_PCIEPHY_TX0CTRL1_TXCMV 0x0030
1975#define TG3_PCIEPHY_TX0CTRL1_TKSEL 0x0040
1976#define TG3_PCIEPHY_TX0CTRL1_NB_EN 0x0400
1977
1978/* TG3_PCIEPHY_XGXS_BLK1 */
1979#define TG3_PCIEPHY_PWRMGMT4 0x1a
1980#define TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN 0x0038
1981#define TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN 0x4000
1982
1983
1984/*** Tigon3 specific PHY MII registers. ***/
1960#define TG3_BMCR_SPEED1000 0x0040 1985#define TG3_BMCR_SPEED1000 0x0040
1961 1986
1962#define MII_TG3_CTRL 0x09 /* 1000-baseT control register */ 1987#define MII_TG3_CTRL 0x09 /* 1000-baseT control register */
@@ -2055,6 +2080,9 @@
2055#define MII_TG3_FET_SHDW_MISCCTRL 0x10 2080#define MII_TG3_FET_SHDW_MISCCTRL 0x10
2056#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000 2081#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000
2057 2082
2083#define MII_TG3_FET_SHDW_AUXMODE4 0x1a
2084#define MII_TG3_FET_SHDW_AUXMODE4_SBPD 0x0008
2085
2058#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b 2086#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b
2059#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020 2087#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020
2060 2088
@@ -2756,9 +2784,11 @@ struct tg3 {
2756#define TG3_FLG3_PHY_ENABLE_APD 0x00001000 2784#define TG3_FLG3_PHY_ENABLE_APD 0x00001000
2757#define TG3_FLG3_5755_PLUS 0x00002000 2785#define TG3_FLG3_5755_PLUS 0x00002000
2758#define TG3_FLG3_NO_NVRAM 0x00004000 2786#define TG3_FLG3_NO_NVRAM 0x00004000
2759#define TG3_FLG3_TOGGLE_10_100_L1PLLPD 0x00008000
2760#define TG3_FLG3_PHY_IS_FET 0x00010000 2787#define TG3_FLG3_PHY_IS_FET 0x00010000
2761#define TG3_FLG3_ENABLE_RSS 0x00020000 2788#define TG3_FLG3_ENABLE_RSS 0x00020000
2789#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000
2790#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
2791#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
2762 2792
2763 struct timer_list timer; 2793 struct timer_list timer;
2764 u16 timer_counter; 2794 u16 timer_counter;
@@ -2834,6 +2864,7 @@ struct tg3 {
2834#define PHY_REV_BCM5401_C0 0x6 2864#define PHY_REV_BCM5401_C0 0x6
2835#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2865#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
2836#define TG3_PHY_ID_BCM50610 0x143bd60 2866#define TG3_PHY_ID_BCM50610 0x143bd60
2867#define TG3_PHY_ID_BCM50610M 0x143bd70
2837#define TG3_PHY_ID_BCMAC131 0x143bc70 2868#define TG3_PHY_ID_BCMAC131 0x143bc70
2838#define TG3_PHY_ID_RTL8211C 0x001cc910 2869#define TG3_PHY_ID_RTL8211C 0x001cc910
2839#define TG3_PHY_ID_RTL8201E 0x00008200 2870#define TG3_PHY_ID_RTL8201E 0x00008200
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 525bbc5b9c9d..6a3c7510afd9 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -1143,9 +1143,16 @@ static void dir_open_adapter (struct net_device *dev)
1143 } else { 1143 } else {
1144 char **prphase = printphase; 1144 char **prphase = printphase;
1145 char **prerror = printerror; 1145 char **prerror = printerror;
1146 int pnr = err / 16 - 1;
1147 int enr = err % 16 - 1;
1146 DPRINTK("TR Adapter misc open failure, error code = "); 1148 DPRINTK("TR Adapter misc open failure, error code = ");
1147 printk("0x%x, Phase: %s, Error: %s\n", 1149 if (pnr < 0 || pnr >= ARRAY_SIZE(printphase) ||
1148 err, prphase[err/16 -1], prerror[err%16 -1]); 1150 enr < 0 ||
1151 enr >= ARRAY_SIZE(printerror))
1152 printk("0x%x, invalid Phase/Error.", err);
1153 else
1154 printk("0x%x, Phase: %s, Error: %s\n", err,
1155 prphase[pnr], prerror[enr]);
1149 printk(" retrying after %ds delay...\n", 1156 printk(" retrying after %ds delay...\n",
1150 TR_RETRY_INTERVAL/HZ); 1157 TR_RETRY_INTERVAL/HZ);
1151 } 1158 }
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index c47237c2d638..32d93564a74d 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -174,7 +174,7 @@ config USB_NET_CDCETHER
174 * Ericsson Mobile Broadband Module (all variants) 174 * Ericsson Mobile Broadband Module (all variants)
175 * Motorola (DM100 and SB4100) 175 * Motorola (DM100 and SB4100)
176 * Broadcom Cable Modem (reference design) 176 * Broadcom Cable Modem (reference design)
177 * Toshiba (PCX1100U and F3507g) 177 * Toshiba (PCX1100U and F3507g/F3607gw)
178 * ... 178 * ...
179 179
180 This driver creates an interface named "ethX", where X depends on 180 This driver creates an interface named "ethX", where X depends on
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 71e65fc10e6f..71d7ff3de99f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -552,20 +552,60 @@ static const struct usb_device_id products [] = {
552 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 552 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
553 .driver_info = (unsigned long) &mbm_info, 553 .driver_info = (unsigned long) &mbm_info,
554}, { 554}, {
555 /* Ericsson F3307 */ 555 /* Ericsson F3607gw ver 2 */
556 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
557 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
558 .driver_info = (unsigned long) &mbm_info,
559}, {
560 /* Ericsson F3607gw ver 3 */
556 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM, 561 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
557 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 562 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
558 .driver_info = (unsigned long) &mbm_info, 563 .driver_info = (unsigned long) &mbm_info,
559}, { 564}, {
565 /* Ericsson F3307 */
566 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
567 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
568 .driver_info = (unsigned long) &mbm_info,
569}, {
570 /* Ericsson F3307 ver 2 */
571 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
572 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
573 .driver_info = (unsigned long) &mbm_info,
574}, {
575 /* Ericsson C3607w */
576 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
577 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
578 .driver_info = (unsigned long) &mbm_info,
579}, {
560 /* Toshiba F3507g */ 580 /* Toshiba F3507g */
561 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, 581 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
562 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 582 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
563 .driver_info = (unsigned long) &mbm_info, 583 .driver_info = (unsigned long) &mbm_info,
564}, { 584}, {
585 /* Toshiba F3607gw */
586 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
587 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
588 .driver_info = (unsigned long) &mbm_info,
589}, {
590 /* Toshiba F3607gw ver 2 */
591 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
592 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
593 .driver_info = (unsigned long) &mbm_info,
594}, {
565 /* Dell F3507g */ 595 /* Dell F3507g */
566 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM, 596 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
567 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 597 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
568 .driver_info = (unsigned long) &mbm_info, 598 .driver_info = (unsigned long) &mbm_info,
599}, {
600 /* Dell F3607gw */
601 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
602 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
603 .driver_info = (unsigned long) &mbm_info,
604}, {
605 /* Dell F3607gw ver 2 */
606 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
607 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
608 .driver_info = (unsigned long) &mbm_info,
569}, 609},
570 { }, // END 610 { }, // END
571}; 611};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 72470f77f556..a2b30a10064f 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -649,6 +649,10 @@ static const struct usb_device_id products[] = {
649 USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */ 649 USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */
650 .driver_info = (unsigned long)&dm9601_info, 650 .driver_info = (unsigned long)&dm9601_info,
651 }, 651 },
652 {
653 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
654 .driver_info = (unsigned long)&dm9601_info,
655 },
652 {}, // END 656 {}, // END
653}; 657};
654 658
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index e391ef969c28..3b80e8d2d621 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -471,16 +471,7 @@ static int kaweth_reset(struct kaweth_device *kaweth)
471 int result; 471 int result;
472 472
473 dbg("kaweth_reset(%p)", kaweth); 473 dbg("kaweth_reset(%p)", kaweth);
474 result = kaweth_control(kaweth, 474 result = usb_reset_configuration(kaweth->dev);
475 usb_sndctrlpipe(kaweth->dev, 0),
476 USB_REQ_SET_CONFIGURATION,
477 0,
478 kaweth->dev->config[0].desc.bConfigurationValue,
479 0,
480 NULL,
481 0,
482 KAWETH_CONTROL_TIMEOUT);
483
484 mdelay(10); 475 mdelay(10);
485 476
486 dbg("kaweth_reset() returns %d.",result); 477 dbg("kaweth_reset() returns %d.",result);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ade5b344f75d..9bed694cd215 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -442,7 +442,7 @@ err_register_peer:
442 return err; 442 return err;
443} 443}
444 444
445static void veth_dellink(struct net_device *dev) 445static void veth_dellink(struct net_device *dev, struct list_head *head)
446{ 446{
447 struct veth_priv *priv; 447 struct veth_priv *priv;
448 struct net_device *peer; 448 struct net_device *peer;
@@ -450,8 +450,8 @@ static void veth_dellink(struct net_device *dev)
450 priv = netdev_priv(dev); 450 priv = netdev_priv(dev);
451 peer = priv->peer; 451 peer = priv->peer;
452 452
453 unregister_netdevice(dev); 453 unregister_netdevice_queue(dev, head);
454 unregister_netdevice(peer); 454 unregister_netdevice_queue(peer, head);
455} 455}
456 456
457static const struct nla_policy veth_policy[VETH_INFO_MAX + 1]; 457static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 144db6395c95..158f411bd555 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -364,11 +364,6 @@ static int rx_copybreak = 200;
364module_param(rx_copybreak, int, 0644); 364module_param(rx_copybreak, int, 0644);
365MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 365MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
366 366
367#ifdef CONFIG_PM
368static DEFINE_SPINLOCK(velocity_dev_list_lock);
369static LIST_HEAD(velocity_dev_list);
370#endif
371
372/* 367/*
373 * Internal board variants. At the moment we have only one 368 * Internal board variants. At the moment we have only one
374 */ 369 */
@@ -417,14 +412,6 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
417 struct net_device *dev = pci_get_drvdata(pdev); 412 struct net_device *dev = pci_get_drvdata(pdev);
418 struct velocity_info *vptr = netdev_priv(dev); 413 struct velocity_info *vptr = netdev_priv(dev);
419 414
420#ifdef CONFIG_PM
421 unsigned long flags;
422
423 spin_lock_irqsave(&velocity_dev_list_lock, flags);
424 if (!list_empty(&velocity_dev_list))
425 list_del(&vptr->list);
426 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
427#endif
428 unregister_netdev(dev); 415 unregister_netdev(dev);
429 iounmap(vptr->mac_regs); 416 iounmap(vptr->mac_regs);
430 pci_release_regions(pdev); 417 pci_release_regions(pdev);
@@ -2577,7 +2564,6 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
2577 vptr->tx.numq = info->txqueue; 2564 vptr->tx.numq = info->txqueue;
2578 vptr->multicast_limit = MCAM_SIZE; 2565 vptr->multicast_limit = MCAM_SIZE;
2579 spin_lock_init(&vptr->lock); 2566 spin_lock_init(&vptr->lock);
2580 INIT_LIST_HEAD(&vptr->list);
2581} 2567}
2582 2568
2583/** 2569/**
@@ -2776,15 +2762,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2776 /* and leave the chip powered down */ 2762 /* and leave the chip powered down */
2777 2763
2778 pci_set_power_state(pdev, PCI_D3hot); 2764 pci_set_power_state(pdev, PCI_D3hot);
2779#ifdef CONFIG_PM
2780 {
2781 unsigned long flags;
2782
2783 spin_lock_irqsave(&velocity_dev_list_lock, flags);
2784 list_add(&vptr->list, &velocity_dev_list);
2785 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
2786 }
2787#endif
2788 velocity_nics++; 2765 velocity_nics++;
2789out: 2766out:
2790 return ret; 2767 return ret;
@@ -3240,20 +3217,10 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi
3240{ 3217{
3241 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3218 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3242 struct net_device *dev = ifa->ifa_dev->dev; 3219 struct net_device *dev = ifa->ifa_dev->dev;
3243 struct velocity_info *vptr;
3244 unsigned long flags;
3245 3220
3246 if (dev_net(dev) != &init_net) 3221 if (dev_net(dev) == &init_net &&
3247 return NOTIFY_DONE; 3222 dev->netdev_ops == &velocity_netdev_ops)
3248 3223 velocity_get_ip(netdev_priv(dev));
3249 spin_lock_irqsave(&velocity_dev_list_lock, flags);
3250 list_for_each_entry(vptr, &velocity_dev_list, list) {
3251 if (vptr->dev == dev) {
3252 velocity_get_ip(vptr);
3253 break;
3254 }
3255 }
3256 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
3257 3224
3258 return NOTIFY_DONE; 3225 return NOTIFY_DONE;
3259} 3226}
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 2f00c13ab502..ce894ffa7c91 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1499,8 +1499,6 @@ struct velocity_opt {
1499#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) 1499#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1500 1500
1501struct velocity_info { 1501struct velocity_info {
1502 struct list_head list;
1503
1504 struct pci_dev *pdev; 1502 struct pci_dev *pdev;
1505 struct net_device *dev; 1503 struct net_device *dev;
1506 1504
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 556512dc6072..22a8ca5d67d5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -451,7 +451,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
451 vi->dev->stats.tx_bytes += skb->len; 451 vi->dev->stats.tx_bytes += skb->len;
452 vi->dev->stats.tx_packets++; 452 vi->dev->stats.tx_packets++;
453 tot_sgs += skb_vnet_hdr(skb)->num_sg; 453 tot_sgs += skb_vnet_hdr(skb)->num_sg;
454 kfree_skb(skb); 454 dev_kfree_skb_any(skb);
455 } 455 }
456 return tot_sgs; 456 return tot_sgs;
457} 457}
@@ -514,8 +514,7 @@ again:
514 /* Free up any pending old buffers before queueing new ones. */ 514 /* Free up any pending old buffers before queueing new ones. */
515 free_old_xmit_skbs(vi); 515 free_old_xmit_skbs(vi);
516 516
517 /* Put new one in send queue and do transmit */ 517 /* Try to transmit */
518 __skb_queue_head(&vi->send, skb);
519 capacity = xmit_skb(vi, skb); 518 capacity = xmit_skb(vi, skb);
520 519
521 /* This can happen with OOM and indirect buffers. */ 520 /* This can happen with OOM and indirect buffers. */
@@ -529,8 +528,17 @@ again:
529 } 528 }
530 return NETDEV_TX_BUSY; 529 return NETDEV_TX_BUSY;
531 } 530 }
532
533 vi->svq->vq_ops->kick(vi->svq); 531 vi->svq->vq_ops->kick(vi->svq);
532
533 /*
534 * Put new one in send queue. You'd expect we'd need this before
535 * xmit_skb calls add_buf(), since the callback can be triggered
536 * immediately after that. But since the callback just triggers
537 * another call back here, normal network xmit locking prevents the
538 * race.
539 */
540 __skb_queue_head(&vi->send, skb);
541
534 /* Don't wait up for transmitted skbs to be freed. */ 542 /* Don't wait up for transmitted skbs to be freed. */
535 skb_orphan(skb); 543 skb_orphan(skb);
536 nf_reset(skb); 544 nf_reset(skb);
@@ -988,7 +996,7 @@ static unsigned int features[] = {
988 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 996 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
989}; 997};
990 998
991static struct virtio_driver virtio_net = { 999static struct virtio_driver virtio_net_driver = {
992 .feature_table = features, 1000 .feature_table = features,
993 .feature_table_size = ARRAY_SIZE(features), 1001 .feature_table_size = ARRAY_SIZE(features),
994 .driver.name = KBUILD_MODNAME, 1002 .driver.name = KBUILD_MODNAME,
@@ -1001,12 +1009,12 @@ static struct virtio_driver virtio_net = {
1001 1009
1002static int __init init(void) 1010static int __init init(void)
1003{ 1011{
1004 return register_virtio_driver(&virtio_net); 1012 return register_virtio_driver(&virtio_net_driver);
1005} 1013}
1006 1014
1007static void __exit fini(void) 1015static void __exit fini(void)
1008{ 1016{
1009 unregister_virtio_driver(&virtio_net); 1017 unregister_virtio_driver(&virtio_net_driver);
1010} 1018}
1011module_init(init); 1019module_init(init);
1012module_exit(fini); 1020module_exit(fini);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 44fb0c5a2800..004353a46af0 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -481,7 +481,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
481 } 481 }
482 rq->uncommitted[ring_idx] += num_allocated; 482 rq->uncommitted[ring_idx] += num_allocated;
483 483
484 dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp " 484 dev_dbg(&adapter->netdev->dev,
485 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
485 "%u, uncommited %u\n", num_allocated, ring->next2fill, 486 "%u, uncommited %u\n", num_allocated, ring->next2fill,
486 ring->next2comp, rq->uncommitted[ring_idx]); 487 ring->next2comp, rq->uncommitted[ring_idx]);
487 488
@@ -539,7 +540,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
539 tbi = tq->buf_info + tq->tx_ring.next2fill; 540 tbi = tq->buf_info + tq->tx_ring.next2fill;
540 tbi->map_type = VMXNET3_MAP_NONE; 541 tbi->map_type = VMXNET3_MAP_NONE;
541 542
542 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", 543 dev_dbg(&adapter->netdev->dev,
544 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
543 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, 545 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
544 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 546 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
545 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 547 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -572,7 +574,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
572 gdesc->dword[2] = dw2 | buf_size; 574 gdesc->dword[2] = dw2 | buf_size;
573 gdesc->dword[3] = 0; 575 gdesc->dword[3] = 0;
574 576
575 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", 577 dev_dbg(&adapter->netdev->dev,
578 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
576 tq->tx_ring.next2fill, gdesc->txd.addr, 579 tq->tx_ring.next2fill, gdesc->txd.addr,
577 gdesc->dword[2], gdesc->dword[3]); 580 gdesc->dword[2], gdesc->dword[3]);
578 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 581 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -600,7 +603,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
600 gdesc->dword[2] = dw2 | frag->size; 603 gdesc->dword[2] = dw2 | frag->size;
601 gdesc->dword[3] = 0; 604 gdesc->dword[3] = 0;
602 605
603 dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n", 606 dev_dbg(&adapter->netdev->dev,
607 "txd[%u]: 0x%llu %u %u\n",
604 tq->tx_ring.next2fill, gdesc->txd.addr, 608 tq->tx_ring.next2fill, gdesc->txd.addr,
605 gdesc->dword[2], gdesc->dword[3]); 609 gdesc->dword[2], gdesc->dword[3]);
606 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 610 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -697,7 +701,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
697 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 701 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
698 702
699 memcpy(tdd->data, skb->data, ctx->copy_size); 703 memcpy(tdd->data, skb->data, ctx->copy_size);
700 dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n", 704 dev_dbg(&adapter->netdev->dev,
705 "copy %u bytes to dataRing[%u]\n",
701 ctx->copy_size, tq->tx_ring.next2fill); 706 ctx->copy_size, tq->tx_ring.next2fill);
702 return 1; 707 return 1;
703 708
@@ -808,7 +813,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
808 813
809 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { 814 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
810 tq->stats.tx_ring_full++; 815 tq->stats.tx_ring_full++;
811 dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u" 816 dev_dbg(&adapter->netdev->dev,
817 "tx queue stopped on %s, next2comp %u"
812 " next2fill %u\n", adapter->netdev->name, 818 " next2fill %u\n", adapter->netdev->name,
813 tq->tx_ring.next2comp, tq->tx_ring.next2fill); 819 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
814 820
@@ -853,7 +859,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
853 859
854 /* finally flips the GEN bit of the SOP desc */ 860 /* finally flips the GEN bit of the SOP desc */
855 gdesc->dword[2] ^= VMXNET3_TXD_GEN; 861 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
856 dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 862 dev_dbg(&adapter->netdev->dev,
863 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
857 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 864 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
858 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], 865 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
859 gdesc->dword[3]); 866 gdesc->dword[3]);
@@ -990,7 +997,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
990 if (unlikely(rcd->len == 0)) { 997 if (unlikely(rcd->len == 0)) {
991 /* Pretend the rx buffer is skipped. */ 998 /* Pretend the rx buffer is skipped. */
992 BUG_ON(!(rcd->sop && rcd->eop)); 999 BUG_ON(!(rcd->sop && rcd->eop));
993 dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n", 1000 dev_dbg(&adapter->netdev->dev,
1001 "rxRing[%u][%u] 0 length\n",
994 ring_idx, idx); 1002 ring_idx, idx);
995 goto rcd_done; 1003 goto rcd_done;
996 } 1004 }
@@ -1314,9 +1322,11 @@ vmxnet3_netpoll(struct net_device *netdev)
1314 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1322 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1315 int irq; 1323 int irq;
1316 1324
1325#ifdef CONFIG_PCI_MSI
1317 if (adapter->intr.type == VMXNET3_IT_MSIX) 1326 if (adapter->intr.type == VMXNET3_IT_MSIX)
1318 irq = adapter->intr.msix_entries[0].vector; 1327 irq = adapter->intr.msix_entries[0].vector;
1319 else 1328 else
1329#endif
1320 irq = adapter->pdev->irq; 1330 irq = adapter->pdev->irq;
1321 1331
1322 disable_irq(irq); 1332 disable_irq(irq);
@@ -1330,12 +1340,15 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1330{ 1340{
1331 int err; 1341 int err;
1332 1342
1343#ifdef CONFIG_PCI_MSI
1333 if (adapter->intr.type == VMXNET3_IT_MSIX) { 1344 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1334 /* we only use 1 MSI-X vector */ 1345 /* we only use 1 MSI-X vector */
1335 err = request_irq(adapter->intr.msix_entries[0].vector, 1346 err = request_irq(adapter->intr.msix_entries[0].vector,
1336 vmxnet3_intr, 0, adapter->netdev->name, 1347 vmxnet3_intr, 0, adapter->netdev->name,
1337 adapter->netdev); 1348 adapter->netdev);
1338 } else if (adapter->intr.type == VMXNET3_IT_MSI) { 1349 } else
1350#endif
1351 if (adapter->intr.type == VMXNET3_IT_MSI) {
1339 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1352 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1340 adapter->netdev->name, adapter->netdev); 1353 adapter->netdev->name, adapter->netdev);
1341 } else { 1354 } else {
@@ -1376,6 +1389,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1376 adapter->intr.num_intrs <= 0); 1389 adapter->intr.num_intrs <= 0);
1377 1390
1378 switch (adapter->intr.type) { 1391 switch (adapter->intr.type) {
1392#ifdef CONFIG_PCI_MSI
1379 case VMXNET3_IT_MSIX: 1393 case VMXNET3_IT_MSIX:
1380 { 1394 {
1381 int i; 1395 int i;
@@ -1385,6 +1399,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1385 adapter->netdev); 1399 adapter->netdev);
1386 break; 1400 break;
1387 } 1401 }
1402#endif
1388 case VMXNET3_IT_MSI: 1403 case VMXNET3_IT_MSI:
1389 free_irq(adapter->pdev->irq, adapter->netdev); 1404 free_irq(adapter->pdev->irq, adapter->netdev);
1390 break; 1405 break;
@@ -1676,7 +1691,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1676 int err; 1691 int err;
1677 u32 ret; 1692 u32 ret;
1678 1693
1679 dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" 1694 dev_dbg(&adapter->netdev->dev,
1695 "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
1680 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, 1696 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
1681 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, 1697 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
1682 adapter->rx_queue.rx_ring[0].size, 1698 adapter->rx_queue.rx_ring[0].size,
@@ -2134,6 +2150,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2134 if (adapter->intr.type == VMXNET3_IT_AUTO) { 2150 if (adapter->intr.type == VMXNET3_IT_AUTO) {
2135 int err; 2151 int err;
2136 2152
2153#ifdef CONFIG_PCI_MSI
2137 adapter->intr.msix_entries[0].entry = 0; 2154 adapter->intr.msix_entries[0].entry = 0;
2138 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, 2155 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2139 VMXNET3_LINUX_MAX_MSIX_VECT); 2156 VMXNET3_LINUX_MAX_MSIX_VECT);
@@ -2142,6 +2159,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2142 adapter->intr.type = VMXNET3_IT_MSIX; 2159 adapter->intr.type = VMXNET3_IT_MSIX;
2143 return; 2160 return;
2144 } 2161 }
2162#endif
2145 2163
2146 err = pci_enable_msi(adapter->pdev); 2164 err = pci_enable_msi(adapter->pdev);
2147 if (!err) { 2165 if (!err) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 6bb91576e999..445081686d5d 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -27,15 +27,11 @@
27#ifndef _VMXNET3_INT_H 27#ifndef _VMXNET3_INT_H
28#define _VMXNET3_INT_H 28#define _VMXNET3_INT_H
29 29
30#include <linux/types.h>
31#include <linux/ethtool.h> 30#include <linux/ethtool.h>
32#include <linux/delay.h> 31#include <linux/delay.h>
33#include <linux/netdevice.h> 32#include <linux/netdevice.h>
34#include <linux/pci.h> 33#include <linux/pci.h>
35#include <linux/ethtool.h>
36#include <linux/compiler.h> 34#include <linux/compiler.h>
37#include <linux/module.h>
38#include <linux/moduleparam.h>
39#include <linux/slab.h> 35#include <linux/slab.h>
40#include <linux/spinlock.h> 36#include <linux/spinlock.h>
41#include <linux/ioport.h> 37#include <linux/ioport.h>
@@ -59,7 +55,6 @@
59#include <linux/if_vlan.h> 55#include <linux/if_vlan.h>
60#include <linux/if_arp.h> 56#include <linux/if_arp.h>
61#include <linux/inetdevice.h> 57#include <linux/inetdevice.h>
62#include <linux/dst.h>
63 58
64#include "vmxnet3_defs.h" 59#include "vmxnet3_defs.h"
65 60
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 63d0f891ffae..e21358e82c74 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -3612,11 +3612,12 @@ static int __devinit vxge_config_vpaths(
3612 device_config->vp_config[i].fifo.enable = 3612 device_config->vp_config[i].fifo.enable =
3613 VXGE_HW_FIFO_ENABLE; 3613 VXGE_HW_FIFO_ENABLE;
3614 device_config->vp_config[i].fifo.max_frags = 3614 device_config->vp_config[i].fifo.max_frags =
3615 MAX_SKB_FRAGS; 3615 MAX_SKB_FRAGS + 1;
3616 device_config->vp_config[i].fifo.memblock_size = 3616 device_config->vp_config[i].fifo.memblock_size =
3617 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE; 3617 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3618 3618
3619 txdl_size = MAX_SKB_FRAGS * sizeof(struct vxge_hw_fifo_txd); 3619 txdl_size = device_config->vp_config[i].fifo.max_frags *
3620 sizeof(struct vxge_hw_fifo_txd);
3620 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size; 3621 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3621 3622
3622 device_config->vp_config[i].fifo.fifo_blocks = 3623 device_config->vp_config[i].fifo.fifo_blocks =
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index fa66248aae6d..77c2a754b7b8 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -18,6 +18,6 @@
18#define VXGE_VERSION_MAJOR "2" 18#define VXGE_VERSION_MAJOR "2"
19#define VXGE_VERSION_MINOR "0" 19#define VXGE_VERSION_MINOR "0"
20#define VXGE_VERSION_FIX "6" 20#define VXGE_VERSION_FIX "6"
21#define VXGE_VERSION_BUILD "18707" 21#define VXGE_VERSION_BUILD "18937"
22#define VXGE_VERSION_FOR "k" 22#define VXGE_VERSION_FOR "k"
23#endif 23#endif
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index d623b3d99a4b..3f703384295e 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -31,6 +31,14 @@ config WIMAX_I2400M_SDIO
31 31
32 If unsure, it is safe to select M (module). 32 If unsure, it is safe to select M (module).
33 33
34config WIMAX_IWMC3200_SDIO
35 bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO"
36 depends on WIMAX_I2400M_SDIO
37 select IWMC3200TOP
38 help
39 Select if you have a device based on the Intel Multicom WiMAX
40 Connection 3200 over SDIO.
41
34config WIMAX_I2400M_DEBUG_LEVEL 42config WIMAX_I2400M_DEBUG_LEVEL
35 int "WiMAX i2400m debug level" 43 int "WiMAX i2400m debug level"
36 depends on WIMAX_I2400M 44 depends on WIMAX_I2400M
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7116a1aa20ce..4eec87c3be2b 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -4790,9 +4790,8 @@ static int proc_stats_rid_open( struct inode *inode,
4790static int get_dec_u16( char *buffer, int *start, int limit ) { 4790static int get_dec_u16( char *buffer, int *start, int limit ) {
4791 u16 value; 4791 u16 value;
4792 int valid = 0; 4792 int valid = 0;
4793 for( value = 0; buffer[*start] >= '0' && 4793 for (value = 0; *start < limit && buffer[*start] >= '0' &&
4794 buffer[*start] <= '9' && 4794 buffer[*start] <= '9'; (*start)++) {
4795 *start < limit; (*start)++ ) {
4796 valid = 1; 4795 valid = 1;
4797 value *= 10; 4796 value *= 10;
4798 value += buffer[*start] - '0'; 4797 value += buffer[*start] - '0';
@@ -5660,7 +5659,8 @@ static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
5660 5659
5661 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 5660 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
5662 pci_save_state(pdev); 5661 pci_save_state(pdev);
5663 return pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5662 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5663 return 0;
5664} 5664}
5665 5665
5666static int airo_pci_resume(struct pci_dev *pdev) 5666static int airo_pci_resume(struct pci_dev *pdev)
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index ec034af26980..9f9459860d82 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -231,7 +231,7 @@ struct ar9170 {
231 struct sk_buff_head tx_status_ampdu; 231 struct sk_buff_head tx_status_ampdu;
232 spinlock_t tx_ampdu_list_lock; 232 spinlock_t tx_ampdu_list_lock;
233 struct list_head tx_ampdu_list; 233 struct list_head tx_ampdu_list;
234 unsigned int tx_ampdu_pending; 234 atomic_t tx_ampdu_pending;
235 235
236 /* rxstream mpdu merge */ 236 /* rxstream mpdu merge */
237 struct ar9170_rxstream_mpdu_merge rx_mpdu; 237 struct ar9170_rxstream_mpdu_merge rx_mpdu;
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 88113148331c..701ddb7d8400 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -152,14 +152,14 @@ enum ar9170_cmd {
152#define AR9170_MAC_REG_FTF_BIT14 BIT(14) 152#define AR9170_MAC_REG_FTF_BIT14 BIT(14)
153#define AR9170_MAC_REG_FTF_BIT15 BIT(15) 153#define AR9170_MAC_REG_FTF_BIT15 BIT(15)
154#define AR9170_MAC_REG_FTF_BAR BIT(24) 154#define AR9170_MAC_REG_FTF_BAR BIT(24)
155#define AR9170_MAC_REG_FTF_BIT25 BIT(25) 155#define AR9170_MAC_REG_FTF_BA BIT(25)
156#define AR9170_MAC_REG_FTF_PSPOLL BIT(26) 156#define AR9170_MAC_REG_FTF_PSPOLL BIT(26)
157#define AR9170_MAC_REG_FTF_RTS BIT(27) 157#define AR9170_MAC_REG_FTF_RTS BIT(27)
158#define AR9170_MAC_REG_FTF_CTS BIT(28) 158#define AR9170_MAC_REG_FTF_CTS BIT(28)
159#define AR9170_MAC_REG_FTF_ACK BIT(29) 159#define AR9170_MAC_REG_FTF_ACK BIT(29)
160#define AR9170_MAC_REG_FTF_CFE BIT(30) 160#define AR9170_MAC_REG_FTF_CFE BIT(30)
161#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31) 161#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31)
162#define AR9170_MAC_REG_FTF_DEFAULTS 0x0500ffff 162#define AR9170_MAC_REG_FTF_DEFAULTS 0x0700ffff
163#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff 163#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff
164 164
165#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0) 165#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0)
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index de0ba2bf7691..7e59b82e64d3 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -414,9 +414,9 @@ static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
414 414
415 skb_queue_tail(&ar->tx_status_ampdu, skb); 415 skb_queue_tail(&ar->tx_status_ampdu, skb);
416 ar9170_tx_fake_ampdu_status(ar); 416 ar9170_tx_fake_ampdu_status(ar);
417 ar->tx_ampdu_pending--;
418 417
419 if (!list_empty(&ar->tx_ampdu_list) && !ar->tx_ampdu_pending) 418 if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
419 !list_empty(&ar->tx_ampdu_list))
420 ar9170_tx_ampdu(ar); 420 ar9170_tx_ampdu(ar);
421} 421}
422 422
@@ -1248,6 +1248,7 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
1248 ar->global_ampdu_density = 6; 1248 ar->global_ampdu_density = 6;
1249 ar->global_ampdu_factor = 3; 1249 ar->global_ampdu_factor = 3;
1250 1250
1251 atomic_set(&ar->tx_ampdu_pending, 0);
1251 ar->bad_hw_nagger = jiffies; 1252 ar->bad_hw_nagger = jiffies;
1252 1253
1253 err = ar->open(ar); 1254 err = ar->open(ar);
@@ -1773,7 +1774,7 @@ static void ar9170_tx(struct ar9170 *ar)
1773 msecs_to_jiffies(AR9170_TX_TIMEOUT); 1774 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1774 1775
1775 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1776 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
1776 ar->tx_ampdu_pending++; 1777 atomic_inc(&ar->tx_ampdu_pending);
1777 1778
1778#ifdef AR9170_QUEUE_DEBUG 1779#ifdef AR9170_QUEUE_DEBUG
1779 printk(KERN_DEBUG "%s: send frame q:%d =>\n", 1780 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
@@ -1784,7 +1785,7 @@ static void ar9170_tx(struct ar9170 *ar)
1784 err = ar->tx(ar, skb); 1785 err = ar->tx(ar, skb);
1785 if (unlikely(err)) { 1786 if (unlikely(err)) {
1786 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1787 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
1787 ar->tx_ampdu_pending--; 1788 atomic_dec(&ar->tx_ampdu_pending);
1788 1789
1789 frames_failed++; 1790 frames_failed++;
1790 dev_kfree_skb_any(skb); 1791 dev_kfree_skb_any(skb);
@@ -1931,7 +1932,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1931 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1932 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1932 bool run = ar9170_tx_ampdu_queue(ar, skb); 1933 bool run = ar9170_tx_ampdu_queue(ar, skb);
1933 1934
1934 if (run || !ar->tx_ampdu_pending) 1935 if (run || !atomic_read(&ar->tx_ampdu_pending))
1935 ar9170_tx_ampdu(ar); 1936 ar9170_tx_ampdu(ar);
1936 } else { 1937 } else {
1937 unsigned int queue = skb_get_queue_mapping(skb); 1938 unsigned int queue = skb_get_queue_mapping(skb);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e974e5829e1a..6bdcdf6d1cc0 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -108,15 +108,15 @@ static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
108 return ; 108 return ;
109 109
110 spin_lock_irqsave(&aru->tx_urb_lock, flags); 110 spin_lock_irqsave(&aru->tx_urb_lock, flags);
111 if (aru->tx_submitted_urbs >= AR9170_NUM_TX_URBS) { 111 if (atomic_read(&aru->tx_submitted_urbs) >= AR9170_NUM_TX_URBS) {
112 spin_unlock_irqrestore(&aru->tx_urb_lock, flags); 112 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
113 return ; 113 return ;
114 } 114 }
115 aru->tx_submitted_urbs++; 115 atomic_inc(&aru->tx_submitted_urbs);
116 116
117 urb = usb_get_from_anchor(&aru->tx_pending); 117 urb = usb_get_from_anchor(&aru->tx_pending);
118 if (!urb) { 118 if (!urb) {
119 aru->tx_submitted_urbs--; 119 atomic_dec(&aru->tx_submitted_urbs);
120 spin_unlock_irqrestore(&aru->tx_urb_lock, flags); 120 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
121 121
122 return ; 122 return ;
@@ -133,7 +133,7 @@ static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
133 err); 133 err);
134 134
135 usb_unanchor_urb(urb); 135 usb_unanchor_urb(urb);
136 aru->tx_submitted_urbs--; 136 atomic_dec(&aru->tx_submitted_urbs);
137 ar9170_tx_callback(&aru->common, urb->context); 137 ar9170_tx_callback(&aru->common, urb->context);
138 } 138 }
139 139
@@ -151,7 +151,7 @@ static void ar9170_usb_tx_urb_complete_frame(struct urb *urb)
151 return ; 151 return ;
152 } 152 }
153 153
154 aru->tx_submitted_urbs--; 154 atomic_dec(&aru->tx_submitted_urbs);
155 155
156 ar9170_tx_callback(&aru->common, skb); 156 ar9170_tx_callback(&aru->common, skb);
157 157
@@ -794,7 +794,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
794 spin_lock_init(&aru->tx_urb_lock); 794 spin_lock_init(&aru->tx_urb_lock);
795 795
796 aru->tx_pending_urbs = 0; 796 aru->tx_pending_urbs = 0;
797 aru->tx_submitted_urbs = 0; 797 atomic_set(&aru->tx_submitted_urbs, 0);
798 798
799 aru->common.stop = ar9170_usb_stop; 799 aru->common.stop = ar9170_usb_stop;
800 aru->common.flush = ar9170_usb_flush; 800 aru->common.flush = ar9170_usb_flush;
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index d098f4d5d2f2..a2ce3b169ceb 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -67,7 +67,7 @@ struct ar9170_usb {
67 bool req_one_stage_fw; 67 bool req_one_stage_fw;
68 68
69 spinlock_t tx_urb_lock; 69 spinlock_t tx_urb_lock;
70 unsigned int tx_submitted_urbs; 70 atomic_t tx_submitted_urbs;
71 unsigned int tx_pending_urbs; 71 unsigned int tx_pending_urbs;
72 72
73 struct completion cmd_wait; 73 struct completion cmd_wait;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 647d826bf5fb..6a2a96761111 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -198,6 +198,7 @@
198#define AR5K_TUNE_CWMAX_11B 1023 198#define AR5K_TUNE_CWMAX_11B 1023
199#define AR5K_TUNE_CWMAX_XR 7 199#define AR5K_TUNE_CWMAX_XR 7
200#define AR5K_TUNE_NOISE_FLOOR -72 200#define AR5K_TUNE_NOISE_FLOOR -72
201#define AR5K_TUNE_CCA_MAX_GOOD_VALUE -95
201#define AR5K_TUNE_MAX_TXPOWER 63 202#define AR5K_TUNE_MAX_TXPOWER 63
202#define AR5K_TUNE_DEFAULT_TXPOWER 25 203#define AR5K_TUNE_DEFAULT_TXPOWER 25
203#define AR5K_TUNE_TPC_TXPOWER false 204#define AR5K_TUNE_TPC_TXPOWER false
@@ -1006,6 +1007,14 @@ struct ath5k_capabilities {
1006 } cap_queues; 1007 } cap_queues;
1007}; 1008};
1008 1009
1010/* size of noise floor history (keep it a power of two) */
1011#define ATH5K_NF_CAL_HIST_MAX 8
1012struct ath5k_nfcal_hist
1013{
1014 s16 index; /* current index into nfval */
1015 s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
1016};
1017
1009 1018
1010/***************************************\ 1019/***************************************\
1011 HARDWARE ABSTRACTION LAYER STRUCTURE 1020 HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1112,6 +1121,8 @@ struct ath5k_hw {
1112 struct ieee80211_channel r_last_channel; 1121 struct ieee80211_channel r_last_channel;
1113 } ah_radar; 1122 } ah_radar;
1114 1123
1124 struct ath5k_nfcal_hist ah_nfcal_hist;
1125
1115 /* noise floor from last periodic calibration */ 1126 /* noise floor from last periodic calibration */
1116 s32 ah_noise_floor; 1127 s32 ah_noise_floor;
1117 1128
@@ -1274,8 +1285,10 @@ extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
1274extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags); 1285extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
1275extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1286extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1276/* PHY calibration */ 1287/* PHY calibration */
1288void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
1277extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1289extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1278extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq); 1290extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
1291extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
1279extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah); 1292extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
1280/* Spur mitigation */ 1293/* Spur mitigation */
1281bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, 1294bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 92995adeb5cd..42284445b75e 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -331,6 +331,8 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
331 331
332 ath5k_hw_rfgain_opt_init(ah); 332 ath5k_hw_rfgain_opt_init(ah);
333 333
334 ath5k_hw_init_nfcal_hist(ah);
335
334 /* turn on HW LEDs */ 336 /* turn on HW LEDs */
335 ath5k_hw_set_ledstate(ah, AR5K_LED_INIT); 337 ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
336 338
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 1a039f2bd732..895990751d36 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1124,77 +1124,148 @@ ath5k_hw_calibration_poll(struct ath5k_hw *ah)
1124 ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION; 1124 ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
1125 AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); 1125 AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
1126 } 1126 }
1127}
1127 1128
1129static int sign_extend(int val, const int nbits)
1130{
1131 int order = BIT(nbits-1);
1132 return (val ^ order) - order;
1128} 1133}
1129 1134
1130/** 1135static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
1131 * ath5k_hw_noise_floor_calibration - perform PHY noise floor calibration 1136{
1132 * 1137 s32 val;
1133 * @ah: struct ath5k_hw pointer we are operating on 1138
1134 * @freq: the channel frequency, just used for error logging 1139 val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
1135 * 1140 return sign_extend(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 9);
1136 * This function performs a noise floor calibration of the PHY and waits for 1141}
1137 * it to complete. Then the noise floor value is compared to some maximum 1142
1138 * noise floor we consider valid. 1143void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
1139 * 1144{
1140 * Note that this is different from what the madwifi HAL does: it reads the 1145 int i;
1141 * noise floor and afterwards initiates the calibration. Since the noise floor 1146
1142 * calibration can take some time to finish, depending on the current channel 1147 ah->ah_nfcal_hist.index = 0;
1143 * use, that avoids the occasional timeout warnings we are seeing now. 1148 for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++)
1144 * 1149 ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
1145 * See the following link for an Atheros patent on noise floor calibration: 1150}
1146 * http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL \ 1151
1147 * &p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=7245893.PN.&OS=PN/7 1152static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
1153{
1154 struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
1155 hist->index = (hist->index + 1) & (ATH5K_NF_CAL_HIST_MAX-1);
1156 hist->nfval[hist->index] = noise_floor;
1157}
1158
1159static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
1160{
1161 s16 sort[ATH5K_NF_CAL_HIST_MAX];
1162 s16 tmp;
1163 int i, j;
1164
1165 memcpy(sort, ah->ah_nfcal_hist.nfval, sizeof(sort));
1166 for (i = 0; i < ATH5K_NF_CAL_HIST_MAX - 1; i++) {
1167 for (j = 1; j < ATH5K_NF_CAL_HIST_MAX - i; j++) {
1168 if (sort[j] > sort[j-1]) {
1169 tmp = sort[j];
1170 sort[j] = sort[j-1];
1171 sort[j-1] = tmp;
1172 }
1173 }
1174 }
1175 for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) {
1176 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1177 "cal %d:%d\n", i, sort[i]);
1178 }
1179 return sort[(ATH5K_NF_CAL_HIST_MAX-1) / 2];
1180}
1181
1182/*
1183 * When we tell the hardware to perform a noise floor calibration
1184 * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
1185 * sample-and-hold the minimum noise level seen at the antennas.
1186 * This value is then stored in a ring buffer of recently measured
1187 * noise floor values so we have a moving window of the last few
1188 * samples.
1148 * 1189 *
1149 * XXX: Since during noise floor calibration antennas are detached according to 1190 * The median of the values in the history is then loaded into the
1150 * the patent, we should stop tx queues here. 1191 * hardware for its own use for RSSI and CCA measurements.
1151 */ 1192 */
1152int 1193void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1153ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
1154{ 1194{
1155 int ret; 1195 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1156 unsigned int i; 1196 u32 val;
1157 s32 noise_floor; 1197 s16 nf, threshold;
1198 u8 ee_mode;
1158 1199
1159 /* 1200 /* keep last value if calibration hasn't completed */
1160 * Enable noise floor calibration 1201 if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) {
1161 */ 1202 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1162 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1203 "NF did not complete in calibration window\n");
1163 AR5K_PHY_AGCCTL_NF);
1164 1204
1165 ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, 1205 return;
1166 AR5K_PHY_AGCCTL_NF, 0, false);
1167 if (ret) {
1168 ATH5K_ERR(ah->ah_sc,
1169 "noise floor calibration timeout (%uMHz)\n", freq);
1170 return -EAGAIN;
1171 } 1206 }
1172 1207
1173 /* Wait until the noise floor is calibrated and read the value */ 1208 switch (ah->ah_current_channel->hw_value & CHANNEL_MODES) {
1174 for (i = 20; i > 0; i--) { 1209 case CHANNEL_A:
1175 mdelay(1); 1210 case CHANNEL_T:
1176 noise_floor = ath5k_hw_reg_read(ah, AR5K_PHY_NF); 1211 case CHANNEL_XR:
1177 noise_floor = AR5K_PHY_NF_RVAL(noise_floor); 1212 ee_mode = AR5K_EEPROM_MODE_11A;
1178 if (noise_floor & AR5K_PHY_NF_ACTIVE) { 1213 break;
1179 noise_floor = AR5K_PHY_NF_AVAL(noise_floor); 1214 case CHANNEL_G:
1180 1215 case CHANNEL_TG:
1181 if (noise_floor <= AR5K_TUNE_NOISE_FLOOR) 1216 ee_mode = AR5K_EEPROM_MODE_11G;
1182 break; 1217 break;
1183 } 1218 default:
1219 case CHANNEL_B:
1220 ee_mode = AR5K_EEPROM_MODE_11B;
1221 break;
1184 } 1222 }
1185 1223
1186 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1187 "noise floor %d\n", noise_floor);
1188 1224
1189 if (noise_floor > AR5K_TUNE_NOISE_FLOOR) { 1225 /* completed NF calibration, test threshold */
1190 ATH5K_ERR(ah->ah_sc, 1226 nf = ath5k_hw_read_measured_noise_floor(ah);
1191 "noise floor calibration failed (%uMHz)\n", freq); 1227 threshold = ee->ee_noise_floor_thr[ee_mode];
1192 return -EAGAIN; 1228
1229 if (nf > threshold) {
1230 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1231 "noise floor failure detected; "
1232 "read %d, threshold %d\n",
1233 nf, threshold);
1234
1235 nf = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
1193 } 1236 }
1194 1237
1195 ah->ah_noise_floor = noise_floor; 1238 ath5k_hw_update_nfcal_hist(ah, nf);
1239 nf = ath5k_hw_get_median_noise_floor(ah);
1196 1240
1197 return 0; 1241 /* load noise floor (in .5 dBm) so the hardware will use it */
1242 val = ath5k_hw_reg_read(ah, AR5K_PHY_NF) & ~AR5K_PHY_NF_M;
1243 val |= (nf * 2) & AR5K_PHY_NF_M;
1244 ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
1245
1246 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
1247 ~(AR5K_PHY_AGCCTL_NF_EN | AR5K_PHY_AGCCTL_NF_NOUPDATE));
1248
1249 ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
1250 0, false);
1251
1252 /*
1253 * Load a high max CCA Power value (-50 dBm in .5 dBm units)
1254 * so that we're not capped by the median we just loaded.
1255 * This will be used as the initial value for the next noise
1256 * floor calibration.
1257 */
1258 val = (val & ~AR5K_PHY_NF_M) | ((-50 * 2) & AR5K_PHY_NF_M);
1259 ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
1260 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1261 AR5K_PHY_AGCCTL_NF_EN |
1262 AR5K_PHY_AGCCTL_NF_NOUPDATE |
1263 AR5K_PHY_AGCCTL_NF);
1264
1265 ah->ah_noise_floor = nf;
1266
1267 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1268 "noise floor calibrated: %d\n", nf);
1198} 1269}
1199 1270
1200/* 1271/*
@@ -1287,7 +1358,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1287 return ret; 1358 return ret;
1288 } 1359 }
1289 1360
1290 ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 1361 ath5k_hw_update_noise_floor(ah);
1291 1362
1292 /* 1363 /*
1293 * Re-enable RX/TX and beacons 1364 * Re-enable RX/TX and beacons
@@ -1360,7 +1431,7 @@ done:
1360 * since noise floor calibration interrupts rx path while I/Q 1431 * since noise floor calibration interrupts rx path while I/Q
1361 * calibration doesn't. We don't need to run noise floor calibration 1432 * calibration doesn't. We don't need to run noise floor calibration
1362 * as often as I/Q calibration.*/ 1433 * as often as I/Q calibration.*/
1363 ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 1434 ath5k_hw_update_noise_floor(ah);
1364 1435
1365 /* Initiate a gain_F calibration */ 1436 /* Initiate a gain_F calibration */
1366 ath5k_hw_request_rfgain_probe(ah); 1437 ath5k_hw_request_rfgain_probe(ah);
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 64227abe3c20..4cb9c5df9f46 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -2033,17 +2033,14 @@
2033#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automaticaly */ 2033#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automaticaly */
2034 2034
2035/* 2035/*
2036 * PHY noise floor status register 2036 * PHY noise floor status register (CCA = Clear Channel Assessment)
2037 */ 2037 */
2038#define AR5K_PHY_NF 0x9864 /* Register address */ 2038#define AR5K_PHY_NF 0x9864 /* Register address */
2039#define AR5K_PHY_NF_M 0x000001ff /* Noise floor mask */ 2039#define AR5K_PHY_NF_M 0x000001ff /* Noise floor, written to hardware in 1/2 dBm units */
2040#define AR5K_PHY_NF_ACTIVE 0x00000100 /* Noise floor calibration still active */ 2040#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
2041#define AR5K_PHY_NF_RVAL(_n) (((_n) >> 19) & AR5K_PHY_NF_M)
2042#define AR5K_PHY_NF_AVAL(_n) (-((_n) ^ AR5K_PHY_NF_M) + 1)
2043#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
2044#define AR5K_PHY_NF_THRESH62 0x0007f000 /* Thresh62 -check ANI patent- (field) */ 2041#define AR5K_PHY_NF_THRESH62 0x0007f000 /* Thresh62 -check ANI patent- (field) */
2045#define AR5K_PHY_NF_THRESH62_S 12 2042#define AR5K_PHY_NF_THRESH62_S 12
2046#define AR5K_PHY_NF_MINCCA_PWR 0x0ff80000 /* ??? */ 2043#define AR5K_PHY_NF_MINCCA_PWR 0x0ff80000 /* Minimum measured noise level, read from hardware in 1 dBm units */
2047#define AR5K_PHY_NF_MINCCA_PWR_S 19 2044#define AR5K_PHY_NF_MINCCA_PWR_S 19
2048 2045
2049/* 2046/*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 3dab3d856d7b..62954fc77869 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -1293,7 +1293,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1293 * out and/or noise floor calibration might timeout. 1293 * out and/or noise floor calibration might timeout.
1294 */ 1294 */
1295 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1295 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1296 AR5K_PHY_AGCCTL_CAL); 1296 AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
1297 1297
1298 /* At the same time start I/Q calibration for QAM constellation 1298 /* At the same time start I/Q calibration for QAM constellation
1299 * -no need for CCK- */ 1299 * -no need for CCK- */
@@ -1314,21 +1314,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1314 channel->center_freq); 1314 channel->center_freq);
1315 } 1315 }
1316 1316
1317 /*
1318 * If we run NF calibration before AGC, it always times out.
1319 * Binary HAL starts NF and AGC calibration at the same time
1320 * and only waits for AGC to finish. Also if AGC or NF cal.
1321 * times out, reset doesn't fail on binary HAL. I believe
1322 * that's wrong because since rx path is routed to a detector,
1323 * if cal. doesn't finish we won't have RX. Sam's HAL for AR5210/5211
1324 * enables noise floor calibration after offset calibration and if noise
1325 * floor calibration fails, reset fails. I believe that's
1326 * a better approach, we just need to find a polling interval
1327 * that suits best, even if reset continues we need to make
1328 * sure that rx path is ready.
1329 */
1330 ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
1331
1332 /* Restore antenna mode */ 1317 /* Restore antenna mode */
1333 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode); 1318 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
1334 1319
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 33c9e8167185..25531f231b67 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -29,15 +29,13 @@ static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
29 29
30static void ath_ahb_cleanup(struct ath_common *common) 30static void ath_ahb_cleanup(struct ath_common *common)
31{ 31{
32 struct ath_hw *ah = (struct ath_hw *) common->ah; 32 struct ath_softc *sc = (struct ath_softc *)common->priv;
33 struct ath_softc *sc = ah->ah_sc;
34 iounmap(sc->mem); 33 iounmap(sc->mem);
35} 34}
36 35
37static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) 36static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
38{ 37{
39 struct ath_hw *ah = (struct ath_hw *) common->ah; 38 struct ath_softc *sc = (struct ath_softc *)common->priv;
40 struct ath_softc *sc = ah->ah_sc;
41 struct platform_device *pdev = to_platform_device(sc->dev); 39 struct platform_device *pdev = to_platform_device(sc->dev);
42 struct ath9k_platform_data *pdata; 40 struct ath9k_platform_data *pdata;
43 41
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index f46bd05df443..551f8801459f 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -813,7 +813,7 @@ static void ath9k_olc_temp_compensation(struct ath_hw *ah)
813 } 813 }
814} 814}
815 815
816static void ath9k_hw_9271_pa_cal(struct ath_hw *ah) 816static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
817{ 817{
818 u32 regVal; 818 u32 regVal;
819 unsigned int i; 819 unsigned int i;
@@ -889,10 +889,19 @@ static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
889 REG_WRITE(ah, 0x7834, regVal); 889 REG_WRITE(ah, 0x7834, regVal);
890 } 890 }
891 891
892 /* Empirical offset correction */ 892 regVal = (regVal >>20) & 0x7f;
893#if 0 893
894 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0x20); 894 /* Update PA cal info */
895#endif 895 if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
896 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
897 ah->pacal_info.max_skipcount =
898 2 * ah->pacal_info.max_skipcount;
899 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
900 } else {
901 ah->pacal_info.max_skipcount = 1;
902 ah->pacal_info.skipcount = 0;
903 ah->pacal_info.prev_offset = regVal;
904 }
896 905
897 regVal = REG_READ(ah, 0x7834); 906 regVal = REG_READ(ah, 0x7834);
898 regVal |= 0x1; 907 regVal |= 0x1;
@@ -1043,7 +1052,7 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
1043 if (longcal) { 1052 if (longcal) {
1044 /* Do periodic PAOffset Cal */ 1053 /* Do periodic PAOffset Cal */
1045 if (AR_SREV_9271(ah)) 1054 if (AR_SREV_9271(ah))
1046 ath9k_hw_9271_pa_cal(ah); 1055 ath9k_hw_9271_pa_cal(ah, false);
1047 else if (AR_SREV_9285_11_OR_LATER(ah)) { 1056 else if (AR_SREV_9285_11_OR_LATER(ah)) {
1048 if (!ah->pacal_info.skipcount) 1057 if (!ah->pacal_info.skipcount)
1049 ath9k_hw_9285_pa_cal(ah, false); 1058 ath9k_hw_9285_pa_cal(ah, false);
@@ -1070,6 +1079,7 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
1070} 1079}
1071EXPORT_SYMBOL(ath9k_hw_calibrate); 1080EXPORT_SYMBOL(ath9k_hw_calibrate);
1072 1081
1082/* Carrier leakage Calibration fix */
1073static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan) 1083static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1074{ 1084{
1075 struct ath_common *common = ath9k_hw_common(ah); 1085 struct ath_common *common = ath9k_hw_common(ah);
@@ -1115,7 +1125,7 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1115{ 1125{
1116 struct ath_common *common = ath9k_hw_common(ah); 1126 struct ath_common *common = ath9k_hw_common(ah);
1117 1127
1118 if (AR_SREV_9285_12_OR_LATER(ah)) { 1128 if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
1119 if (!ar9285_clc(ah, chan)) 1129 if (!ar9285_clc(ah, chan))
1120 return false; 1130 return false;
1121 } else { 1131 } else {
@@ -1151,7 +1161,9 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1151 } 1161 }
1152 1162
1153 /* Do PA Calibration */ 1163 /* Do PA Calibration */
1154 if (AR_SREV_9285_11_OR_LATER(ah)) 1164 if (AR_SREV_9271(ah))
1165 ath9k_hw_9271_pa_cal(ah, true);
1166 else if (AR_SREV_9285_11_OR_LATER(ah))
1155 ath9k_hw_9285_pa_cal(ah, true); 1167 ath9k_hw_9285_pa_cal(ah, true);
1156 1168
1157 /* Do NF Calibration after DC offset and other calibrations */ 1169 /* Do NF Calibration after DC offset and other calibrations */
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 063936423d86..bb72b46567f9 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -679,7 +679,7 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
679 return rate; 679 return rate;
680 680
681 if (rate_table->info[rate].valid_single_stream && 681 if (rate_table->info[rate].valid_single_stream &&
682 !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)); 682 !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG))
683 return rate; 683 return rate;
684 684
685 /* This should not happen */ 685 /* This should not happen */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index a8620b1d091b..2a4efcbced60 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2079,7 +2079,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2079 if (needreset) { 2079 if (needreset) {
2080 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2080 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2081 "tx hung, resetting the chip\n"); 2081 "tx hung, resetting the chip\n");
2082 ath9k_ps_wakeup(sc);
2082 ath_reset(sc, false); 2083 ath_reset(sc, false);
2084 ath9k_ps_restore(sc);
2083 } 2085 }
2084 2086
2085 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2087 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index c1dd857697a7..a1c39526161a 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -65,10 +65,13 @@ enum CountryCode {
65 CTRY_ALGERIA = 12, 65 CTRY_ALGERIA = 12,
66 CTRY_ARGENTINA = 32, 66 CTRY_ARGENTINA = 32,
67 CTRY_ARMENIA = 51, 67 CTRY_ARMENIA = 51,
68 CTRY_ARUBA = 533,
68 CTRY_AUSTRALIA = 36, 69 CTRY_AUSTRALIA = 36,
69 CTRY_AUSTRIA = 40, 70 CTRY_AUSTRIA = 40,
70 CTRY_AZERBAIJAN = 31, 71 CTRY_AZERBAIJAN = 31,
71 CTRY_BAHRAIN = 48, 72 CTRY_BAHRAIN = 48,
73 CTRY_BANGLADESH = 50,
74 CTRY_BARBADOS = 52,
72 CTRY_BELARUS = 112, 75 CTRY_BELARUS = 112,
73 CTRY_BELGIUM = 56, 76 CTRY_BELGIUM = 56,
74 CTRY_BELIZE = 84, 77 CTRY_BELIZE = 84,
@@ -77,6 +80,7 @@ enum CountryCode {
77 CTRY_BRAZIL = 76, 80 CTRY_BRAZIL = 76,
78 CTRY_BRUNEI_DARUSSALAM = 96, 81 CTRY_BRUNEI_DARUSSALAM = 96,
79 CTRY_BULGARIA = 100, 82 CTRY_BULGARIA = 100,
83 CTRY_CAMBODIA = 116,
80 CTRY_CANADA = 124, 84 CTRY_CANADA = 124,
81 CTRY_CHILE = 152, 85 CTRY_CHILE = 152,
82 CTRY_CHINA = 156, 86 CTRY_CHINA = 156,
@@ -97,7 +101,11 @@ enum CountryCode {
97 CTRY_GEORGIA = 268, 101 CTRY_GEORGIA = 268,
98 CTRY_GERMANY = 276, 102 CTRY_GERMANY = 276,
99 CTRY_GREECE = 300, 103 CTRY_GREECE = 300,
104 CTRY_GREENLAND = 304,
105 CTRY_GRENEDA = 308,
106 CTRY_GUAM = 316,
100 CTRY_GUATEMALA = 320, 107 CTRY_GUATEMALA = 320,
108 CTRY_HAITI = 332,
101 CTRY_HONDURAS = 340, 109 CTRY_HONDURAS = 340,
102 CTRY_HONG_KONG = 344, 110 CTRY_HONG_KONG = 344,
103 CTRY_HUNGARY = 348, 111 CTRY_HUNGARY = 348,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 9847af72208c..248c670fdfbe 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -288,13 +288,16 @@ static struct country_code_to_enum_rd allCountries[] = {
288 {CTRY_DEFAULT, FCC1_FCCA, "CO"}, 288 {CTRY_DEFAULT, FCC1_FCCA, "CO"},
289 {CTRY_ALBANIA, NULL1_WORLD, "AL"}, 289 {CTRY_ALBANIA, NULL1_WORLD, "AL"},
290 {CTRY_ALGERIA, NULL1_WORLD, "DZ"}, 290 {CTRY_ALGERIA, NULL1_WORLD, "DZ"},
291 {CTRY_ARGENTINA, APL3_WORLD, "AR"}, 291 {CTRY_ARGENTINA, FCC3_WORLD, "AR"},
292 {CTRY_ARMENIA, ETSI4_WORLD, "AM"}, 292 {CTRY_ARMENIA, ETSI4_WORLD, "AM"},
293 {CTRY_ARUBA, ETSI1_WORLD, "AW"},
293 {CTRY_AUSTRALIA, FCC2_WORLD, "AU"}, 294 {CTRY_AUSTRALIA, FCC2_WORLD, "AU"},
294 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, 295 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
295 {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, 296 {CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
296 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, 297 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
297 {CTRY_BAHRAIN, APL6_WORLD, "BH"}, 298 {CTRY_BAHRAIN, APL6_WORLD, "BH"},
299 {CTRY_BANGLADESH, NULL1_WORLD, "BD"},
300 {CTRY_BARBADOS, FCC2_WORLD, "BB"},
298 {CTRY_BELARUS, ETSI1_WORLD, "BY"}, 301 {CTRY_BELARUS, ETSI1_WORLD, "BY"},
299 {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, 302 {CTRY_BELGIUM, ETSI1_WORLD, "BE"},
300 {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, 303 {CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
@@ -304,13 +307,14 @@ static struct country_code_to_enum_rd allCountries[] = {
304 {CTRY_BRAZIL, FCC3_WORLD, "BR"}, 307 {CTRY_BRAZIL, FCC3_WORLD, "BR"},
305 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"}, 308 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"},
306 {CTRY_BULGARIA, ETSI6_WORLD, "BG"}, 309 {CTRY_BULGARIA, ETSI6_WORLD, "BG"},
307 {CTRY_CANADA, FCC2_FCCA, "CA"}, 310 {CTRY_CAMBODIA, ETSI1_WORLD, "KH"},
311 {CTRY_CANADA, FCC3_FCCA, "CA"},
308 {CTRY_CANADA2, FCC6_FCCA, "CA"}, 312 {CTRY_CANADA2, FCC6_FCCA, "CA"},
309 {CTRY_CHILE, APL6_WORLD, "CL"}, 313 {CTRY_CHILE, APL6_WORLD, "CL"},
310 {CTRY_CHINA, APL1_WORLD, "CN"}, 314 {CTRY_CHINA, APL1_WORLD, "CN"},
311 {CTRY_COLOMBIA, FCC1_FCCA, "CO"}, 315 {CTRY_COLOMBIA, FCC1_FCCA, "CO"},
312 {CTRY_COSTA_RICA, FCC1_WORLD, "CR"}, 316 {CTRY_COSTA_RICA, FCC1_WORLD, "CR"},
313 {CTRY_CROATIA, ETSI3_WORLD, "HR"}, 317 {CTRY_CROATIA, ETSI1_WORLD, "HR"},
314 {CTRY_CYPRUS, ETSI1_WORLD, "CY"}, 318 {CTRY_CYPRUS, ETSI1_WORLD, "CY"},
315 {CTRY_CZECH, ETSI3_WORLD, "CZ"}, 319 {CTRY_CZECH, ETSI3_WORLD, "CZ"},
316 {CTRY_DENMARK, ETSI1_WORLD, "DK"}, 320 {CTRY_DENMARK, ETSI1_WORLD, "DK"},
@@ -324,18 +328,22 @@ static struct country_code_to_enum_rd allCountries[] = {
324 {CTRY_GEORGIA, ETSI4_WORLD, "GE"}, 328 {CTRY_GEORGIA, ETSI4_WORLD, "GE"},
325 {CTRY_GERMANY, ETSI1_WORLD, "DE"}, 329 {CTRY_GERMANY, ETSI1_WORLD, "DE"},
326 {CTRY_GREECE, ETSI1_WORLD, "GR"}, 330 {CTRY_GREECE, ETSI1_WORLD, "GR"},
331 {CTRY_GREENLAND, ETSI1_WORLD, "GL"},
332 {CTRY_GRENEDA, FCC3_FCCA, "GD"},
333 {CTRY_GUAM, FCC1_FCCA, "GU"},
327 {CTRY_GUATEMALA, FCC1_FCCA, "GT"}, 334 {CTRY_GUATEMALA, FCC1_FCCA, "GT"},
335 {CTRY_HAITI, ETSI1_WORLD, "HT"},
328 {CTRY_HONDURAS, NULL1_WORLD, "HN"}, 336 {CTRY_HONDURAS, NULL1_WORLD, "HN"},
329 {CTRY_HONG_KONG, FCC2_WORLD, "HK"}, 337 {CTRY_HONG_KONG, FCC3_WORLD, "HK"},
330 {CTRY_HUNGARY, ETSI1_WORLD, "HU"}, 338 {CTRY_HUNGARY, ETSI1_WORLD, "HU"},
331 {CTRY_ICELAND, ETSI1_WORLD, "IS"}, 339 {CTRY_ICELAND, ETSI1_WORLD, "IS"},
332 {CTRY_INDIA, APL6_WORLD, "IN"}, 340 {CTRY_INDIA, APL6_WORLD, "IN"},
333 {CTRY_INDONESIA, APL1_WORLD, "ID"}, 341 {CTRY_INDONESIA, NULL1_WORLD, "ID"},
334 {CTRY_IRAN, APL1_WORLD, "IR"}, 342 {CTRY_IRAN, APL1_WORLD, "IR"},
335 {CTRY_IRELAND, ETSI1_WORLD, "IE"}, 343 {CTRY_IRELAND, ETSI1_WORLD, "IE"},
336 {CTRY_ISRAEL, NULL1_WORLD, "IL"}, 344 {CTRY_ISRAEL, NULL1_WORLD, "IL"},
337 {CTRY_ITALY, ETSI1_WORLD, "IT"}, 345 {CTRY_ITALY, ETSI1_WORLD, "IT"},
338 {CTRY_JAMAICA, ETSI1_WORLD, "JM"}, 346 {CTRY_JAMAICA, FCC3_WORLD, "JM"},
339 347
340 {CTRY_JAPAN, MKK1_MKKA, "JP"}, 348 {CTRY_JAPAN, MKK1_MKKA, "JP"},
341 {CTRY_JAPAN1, MKK1_MKKB, "JP"}, 349 {CTRY_JAPAN1, MKK1_MKKB, "JP"},
@@ -402,7 +410,7 @@ static struct country_code_to_enum_rd allCountries[] = {
402 {CTRY_KOREA_ROC, APL9_WORLD, "KR"}, 410 {CTRY_KOREA_ROC, APL9_WORLD, "KR"},
403 {CTRY_KOREA_ROC2, APL2_WORLD, "K2"}, 411 {CTRY_KOREA_ROC2, APL2_WORLD, "K2"},
404 {CTRY_KOREA_ROC3, APL9_WORLD, "K3"}, 412 {CTRY_KOREA_ROC3, APL9_WORLD, "K3"},
405 {CTRY_KUWAIT, NULL1_WORLD, "KW"}, 413 {CTRY_KUWAIT, ETSI3_WORLD, "KW"},
406 {CTRY_LATVIA, ETSI1_WORLD, "LV"}, 414 {CTRY_LATVIA, ETSI1_WORLD, "LV"},
407 {CTRY_LEBANON, NULL1_WORLD, "LB"}, 415 {CTRY_LEBANON, NULL1_WORLD, "LB"},
408 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"}, 416 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"},
@@ -414,13 +422,13 @@ static struct country_code_to_enum_rd allCountries[] = {
414 {CTRY_MALTA, ETSI1_WORLD, "MT"}, 422 {CTRY_MALTA, ETSI1_WORLD, "MT"},
415 {CTRY_MEXICO, FCC1_FCCA, "MX"}, 423 {CTRY_MEXICO, FCC1_FCCA, "MX"},
416 {CTRY_MONACO, ETSI4_WORLD, "MC"}, 424 {CTRY_MONACO, ETSI4_WORLD, "MC"},
417 {CTRY_MOROCCO, NULL1_WORLD, "MA"}, 425 {CTRY_MOROCCO, APL4_WORLD, "MA"},
418 {CTRY_NEPAL, APL1_WORLD, "NP"}, 426 {CTRY_NEPAL, APL1_WORLD, "NP"},
419 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"}, 427 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"},
420 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"}, 428 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"},
421 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"}, 429 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"},
422 {CTRY_NORWAY, ETSI1_WORLD, "NO"}, 430 {CTRY_NORWAY, ETSI1_WORLD, "NO"},
423 {CTRY_OMAN, APL6_WORLD, "OM"}, 431 {CTRY_OMAN, FCC3_WORLD, "OM"},
424 {CTRY_PAKISTAN, NULL1_WORLD, "PK"}, 432 {CTRY_PAKISTAN, NULL1_WORLD, "PK"},
425 {CTRY_PANAMA, FCC1_FCCA, "PA"}, 433 {CTRY_PANAMA, FCC1_FCCA, "PA"},
426 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"}, 434 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"},
@@ -429,7 +437,7 @@ static struct country_code_to_enum_rd allCountries[] = {
429 {CTRY_POLAND, ETSI1_WORLD, "PL"}, 437 {CTRY_POLAND, ETSI1_WORLD, "PL"},
430 {CTRY_PORTUGAL, ETSI1_WORLD, "PT"}, 438 {CTRY_PORTUGAL, ETSI1_WORLD, "PT"},
431 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"}, 439 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"},
432 {CTRY_QATAR, NULL1_WORLD, "QA"}, 440 {CTRY_QATAR, APL1_WORLD, "QA"},
433 {CTRY_ROMANIA, NULL1_WORLD, "RO"}, 441 {CTRY_ROMANIA, NULL1_WORLD, "RO"},
434 {CTRY_RUSSIA, NULL1_WORLD, "RU"}, 442 {CTRY_RUSSIA, NULL1_WORLD, "RU"},
435 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, 443 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
@@ -445,7 +453,7 @@ static struct country_code_to_enum_rd allCountries[] = {
445 {CTRY_SYRIA, NULL1_WORLD, "SY"}, 453 {CTRY_SYRIA, NULL1_WORLD, "SY"},
446 {CTRY_TAIWAN, APL3_FCCA, "TW"}, 454 {CTRY_TAIWAN, APL3_FCCA, "TW"},
447 {CTRY_THAILAND, FCC3_WORLD, "TH"}, 455 {CTRY_THAILAND, FCC3_WORLD, "TH"},
448 {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT"}, 456 {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"},
449 {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, 457 {CTRY_TUNISIA, ETSI3_WORLD, "TN"},
450 {CTRY_TURKEY, ETSI3_WORLD, "TR"}, 458 {CTRY_TURKEY, ETSI3_WORLD, "TR"},
451 {CTRY_UKRAINE, NULL1_WORLD, "UA"}, 459 {CTRY_UKRAINE, NULL1_WORLD, "UA"},
@@ -456,7 +464,7 @@ static struct country_code_to_enum_rd allCountries[] = {
456 * would need to assign new special alpha2 to CRDA db as with the world 464 * would need to assign new special alpha2 to CRDA db as with the world
457 * regdomain and use another alpha2 */ 465 * regdomain and use another alpha2 */
458 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"}, 466 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"},
459 {CTRY_URUGUAY, APL2_WORLD, "UY"}, 467 {CTRY_URUGUAY, FCC3_WORLD, "UY"},
460 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"}, 468 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"},
461 {CTRY_VENEZUELA, APL2_ETSIC, "VE"}, 469 {CTRY_VENEZUELA, APL2_ETSIC, "VE"},
462 {CTRY_VIET_NAM, NULL1_WORLD, "VN"}, 470 {CTRY_VIET_NAM, NULL1_WORLD, "VN"},
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 660716214d49..65b23f725a04 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -749,12 +749,6 @@ struct b43_wldev {
749#endif 749#endif
750}; 750};
751 751
752/*
753 * Include goes here to avoid a dependency problem.
754 * A better fix would be to integrate xmit.h into b43.h.
755 */
756#include "xmit.h"
757
758/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ 752/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
759struct b43_wl { 753struct b43_wl {
760 /* Pointer to the active wireless device on this chip */ 754 /* Pointer to the active wireless device on this chip */
@@ -830,13 +824,9 @@ struct b43_wl {
830 struct b43_leds leds; 824 struct b43_leds leds;
831 825
832#ifdef CONFIG_B43_PIO 826#ifdef CONFIG_B43_PIO
833 /* 827 /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
834 * RX/TX header/tail buffers used by the frame transmit functions. 828 u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
835 */ 829 u8 pio_tailspace[4] __attribute__((__aligned__(8)));
836 struct b43_rxhdr_fw4 rxhdr;
837 struct b43_txhdr txhdr;
838 u8 rx_tail[4];
839 u8 tx_tail[4];
840#endif /* CONFIG_B43_PIO */ 830#endif /* CONFIG_B43_PIO */
841}; 831};
842 832
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 8701034569fa..de4e804bedf0 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1157,8 +1157,9 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1157} 1157}
1158 1158
1159static int dma_tx_fragment(struct b43_dmaring *ring, 1159static int dma_tx_fragment(struct b43_dmaring *ring,
1160 struct sk_buff *skb) 1160 struct sk_buff **in_skb)
1161{ 1161{
1162 struct sk_buff *skb = *in_skb;
1162 const struct b43_dma_ops *ops = ring->ops; 1163 const struct b43_dma_ops *ops = ring->ops;
1163 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1164 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1164 u8 *header; 1165 u8 *header;
@@ -1224,8 +1225,14 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1224 } 1225 }
1225 1226
1226 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); 1227 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1228 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1229 bounce_skb->dev = skb->dev;
1230 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1231 info = IEEE80211_SKB_CB(bounce_skb);
1232
1227 dev_kfree_skb_any(skb); 1233 dev_kfree_skb_any(skb);
1228 skb = bounce_skb; 1234 skb = bounce_skb;
1235 *in_skb = bounce_skb;
1229 meta->skb = skb; 1236 meta->skb = skb;
1230 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1237 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1231 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1238 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
@@ -1355,7 +1362,11 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1355 * static, so we don't need to store it per frame. */ 1362 * static, so we don't need to store it per frame. */
1356 ring->queue_prio = skb_get_queue_mapping(skb); 1363 ring->queue_prio = skb_get_queue_mapping(skb);
1357 1364
1358 err = dma_tx_fragment(ring, skb); 1365 /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1366 * into the skb data or cb now. */
1367 hdr = NULL;
1368 info = NULL;
1369 err = dma_tx_fragment(ring, &skb);
1359 if (unlikely(err == -ENOKEY)) { 1370 if (unlikely(err == -ENOKEY)) {
1360 /* Drop this packet, as we don't have the encryption key 1371 /* Drop this packet, as we don't have the encryption key
1361 * anymore and must not transmit it unencrypted. */ 1372 * anymore and must not transmit it unencrypted. */
diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h
index 4c56187810fc..32b66d53cdac 100644
--- a/drivers/net/wireless/b43/leds.h
+++ b/drivers/net/wireless/b43/leds.h
@@ -1,6 +1,7 @@
1#ifndef B43_LEDS_H_ 1#ifndef B43_LEDS_H_
2#define B43_LEDS_H_ 2#define B43_LEDS_H_
3 3
4struct b43_wl;
4struct b43_wldev; 5struct b43_wldev;
5 6
6#ifdef CONFIG_B43_LEDS 7#ifdef CONFIG_B43_LEDS
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index df6b26a0c05e..ed6e96a34743 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4501,7 +4501,6 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4501 4501
4502 cancel_work_sync(&(wl->beacon_update_trigger)); 4502 cancel_work_sync(&(wl->beacon_update_trigger));
4503 4503
4504 wiphy_rfkill_stop_polling(hw->wiphy);
4505 mutex_lock(&wl->mutex); 4504 mutex_lock(&wl->mutex);
4506 if (b43_status(dev) >= B43_STAT_STARTED) { 4505 if (b43_status(dev) >= B43_STAT_STARTED) {
4507 dev = b43_wireless_core_stop(dev); 4506 dev = b43_wireless_core_stop(dev);
@@ -4671,7 +4670,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
4671{ 4670{
4672 struct b43_wl *wl = dev->wl; 4671 struct b43_wl *wl = dev->wl;
4673 struct ssb_bus *bus = dev->dev->bus; 4672 struct ssb_bus *bus = dev->dev->bus;
4674 struct pci_dev *pdev = bus->host_pci; 4673 struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
4675 int err; 4674 int err;
4676 bool have_2ghz_phy = 0, have_5ghz_phy = 0; 4675 bool have_2ghz_phy = 0, have_5ghz_phy = 0;
4677 u32 tmp; 4676 u32 tmp;
@@ -4804,7 +4803,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
4804 4803
4805 if (!list_empty(&wl->devlist)) { 4804 if (!list_empty(&wl->devlist)) {
4806 /* We are not the first core on this chip. */ 4805 /* We are not the first core on this chip. */
4807 pdev = dev->bus->host_pci; 4806 pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
4808 /* Only special chips support more than one wireless 4807 /* Only special chips support more than one wireless
4809 * core, although some of the other chips have more than 4808 * core, although some of the other chips have more than
4810 * one wireless core as well. Check for this and 4809 * one wireless core as well. Check for this and
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index dbbf0d11e18e..3105f235303a 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -341,12 +341,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
341 q->mmio_base + B43_PIO_TXDATA, 341 q->mmio_base + B43_PIO_TXDATA,
342 sizeof(u16)); 342 sizeof(u16));
343 if (data_len & 1) { 343 if (data_len & 1) {
344 u8 *tail = wl->pio_tailspace;
345 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
346
344 /* Write the last byte. */ 347 /* Write the last byte. */
345 ctl &= ~B43_PIO_TXCTL_WRITEHI; 348 ctl &= ~B43_PIO_TXCTL_WRITEHI;
346 b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 349 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
347 wl->tx_tail[0] = data[data_len - 1]; 350 tail[0] = data[data_len - 1];
348 wl->tx_tail[1] = 0; 351 tail[1] = 0;
349 ssb_block_write(dev->dev, wl->tx_tail, 2, 352 ssb_block_write(dev->dev, tail, 2,
350 q->mmio_base + B43_PIO_TXDATA, 353 q->mmio_base + B43_PIO_TXDATA,
351 sizeof(u16)); 354 sizeof(u16));
352 } 355 }
@@ -392,31 +395,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
392 q->mmio_base + B43_PIO8_TXDATA, 395 q->mmio_base + B43_PIO8_TXDATA,
393 sizeof(u32)); 396 sizeof(u32));
394 if (data_len & 3) { 397 if (data_len & 3) {
395 wl->tx_tail[3] = 0; 398 u8 *tail = wl->pio_tailspace;
399 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
400
401 memset(tail, 0, 4);
396 /* Write the last few bytes. */ 402 /* Write the last few bytes. */
397 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | 403 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
398 B43_PIO8_TXCTL_24_31); 404 B43_PIO8_TXCTL_24_31);
399 switch (data_len & 3) { 405 switch (data_len & 3) {
400 case 3: 406 case 3:
401 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15; 407 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
402 wl->tx_tail[0] = data[data_len - 3]; 408 tail[0] = data[data_len - 3];
403 wl->tx_tail[1] = data[data_len - 2]; 409 tail[1] = data[data_len - 2];
404 wl->tx_tail[2] = data[data_len - 1]; 410 tail[2] = data[data_len - 1];
405 break; 411 break;
406 case 2: 412 case 2:
407 ctl |= B43_PIO8_TXCTL_8_15; 413 ctl |= B43_PIO8_TXCTL_8_15;
408 wl->tx_tail[0] = data[data_len - 2]; 414 tail[0] = data[data_len - 2];
409 wl->tx_tail[1] = data[data_len - 1]; 415 tail[1] = data[data_len - 1];
410 wl->tx_tail[2] = 0;
411 break; 416 break;
412 case 1: 417 case 1:
413 wl->tx_tail[0] = data[data_len - 1]; 418 tail[0] = data[data_len - 1];
414 wl->tx_tail[1] = 0;
415 wl->tx_tail[2] = 0;
416 break; 419 break;
417 } 420 }
418 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); 421 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
419 ssb_block_write(dev->dev, wl->tx_tail, 4, 422 ssb_block_write(dev->dev, tail, 4,
420 q->mmio_base + B43_PIO8_TXDATA, 423 q->mmio_base + B43_PIO8_TXDATA,
421 sizeof(u32)); 424 sizeof(u32));
422 } 425 }
@@ -455,6 +458,7 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
455 int err; 458 int err;
456 unsigned int hdrlen; 459 unsigned int hdrlen;
457 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 460 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
461 struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
458 462
459 B43_WARN_ON(list_empty(&q->packets_list)); 463 B43_WARN_ON(list_empty(&q->packets_list));
460 pack = list_entry(q->packets_list.next, 464 pack = list_entry(q->packets_list.next,
@@ -462,7 +466,9 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
462 466
463 cookie = generate_cookie(q, pack); 467 cookie = generate_cookie(q, pack);
464 hdrlen = b43_txhdr_size(dev); 468 hdrlen = b43_txhdr_size(dev);
465 err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb, 469 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
470 B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
471 err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
466 info, cookie); 472 info, cookie);
467 if (err) 473 if (err)
468 return err; 474 return err;
@@ -476,9 +482,9 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
476 482
477 pack->skb = skb; 483 pack->skb = skb;
478 if (q->rev >= 8) 484 if (q->rev >= 8)
479 pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); 485 pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
480 else 486 else
481 pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); 487 pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
482 488
483 /* Remove it from the list of available packet slots. 489 /* Remove it from the list of available packet slots.
484 * It will be put back when we receive the status report. */ 490 * It will be put back when we receive the status report. */
@@ -624,8 +630,11 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
624 unsigned int i, padding; 630 unsigned int i, padding;
625 struct sk_buff *skb; 631 struct sk_buff *skb;
626 const char *err_msg = NULL; 632 const char *err_msg = NULL;
633 struct b43_rxhdr_fw4 *rxhdr =
634 (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
627 635
628 memset(&wl->rxhdr, 0, sizeof(wl->rxhdr)); 636 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
637 memset(rxhdr, 0, sizeof(*rxhdr));
629 638
630 /* Check if we have data and wait for it to get ready. */ 639 /* Check if we have data and wait for it to get ready. */
631 if (q->rev >= 8) { 640 if (q->rev >= 8) {
@@ -663,16 +672,16 @@ data_ready:
663 672
664 /* Get the preamble (RX header) */ 673 /* Get the preamble (RX header) */
665 if (q->rev >= 8) { 674 if (q->rev >= 8) {
666 ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), 675 ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
667 q->mmio_base + B43_PIO8_RXDATA, 676 q->mmio_base + B43_PIO8_RXDATA,
668 sizeof(u32)); 677 sizeof(u32));
669 } else { 678 } else {
670 ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), 679 ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
671 q->mmio_base + B43_PIO_RXDATA, 680 q->mmio_base + B43_PIO_RXDATA,
672 sizeof(u16)); 681 sizeof(u16));
673 } 682 }
674 /* Sanity checks. */ 683 /* Sanity checks. */
675 len = le16_to_cpu(wl->rxhdr.frame_len); 684 len = le16_to_cpu(rxhdr->frame_len);
676 if (unlikely(len > 0x700)) { 685 if (unlikely(len > 0x700)) {
677 err_msg = "len > 0x700"; 686 err_msg = "len > 0x700";
678 goto rx_error; 687 goto rx_error;
@@ -682,7 +691,7 @@ data_ready:
682 goto rx_error; 691 goto rx_error;
683 } 692 }
684 693
685 macstat = le32_to_cpu(wl->rxhdr.mac_status); 694 macstat = le32_to_cpu(rxhdr->mac_status);
686 if (macstat & B43_RX_MAC_FCSERR) { 695 if (macstat & B43_RX_MAC_FCSERR) {
687 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { 696 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
688 /* Drop frames with failed FCS. */ 697 /* Drop frames with failed FCS. */
@@ -707,22 +716,25 @@ data_ready:
707 q->mmio_base + B43_PIO8_RXDATA, 716 q->mmio_base + B43_PIO8_RXDATA,
708 sizeof(u32)); 717 sizeof(u32));
709 if (len & 3) { 718 if (len & 3) {
719 u8 *tail = wl->pio_tailspace;
720 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
721
710 /* Read the last few bytes. */ 722 /* Read the last few bytes. */
711 ssb_block_read(dev->dev, wl->rx_tail, 4, 723 ssb_block_read(dev->dev, tail, 4,
712 q->mmio_base + B43_PIO8_RXDATA, 724 q->mmio_base + B43_PIO8_RXDATA,
713 sizeof(u32)); 725 sizeof(u32));
714 switch (len & 3) { 726 switch (len & 3) {
715 case 3: 727 case 3:
716 skb->data[len + padding - 3] = wl->rx_tail[0]; 728 skb->data[len + padding - 3] = tail[0];
717 skb->data[len + padding - 2] = wl->rx_tail[1]; 729 skb->data[len + padding - 2] = tail[1];
718 skb->data[len + padding - 1] = wl->rx_tail[2]; 730 skb->data[len + padding - 1] = tail[2];
719 break; 731 break;
720 case 2: 732 case 2:
721 skb->data[len + padding - 2] = wl->rx_tail[0]; 733 skb->data[len + padding - 2] = tail[0];
722 skb->data[len + padding - 1] = wl->rx_tail[1]; 734 skb->data[len + padding - 1] = tail[1];
723 break; 735 break;
724 case 1: 736 case 1:
725 skb->data[len + padding - 1] = wl->rx_tail[0]; 737 skb->data[len + padding - 1] = tail[0];
726 break; 738 break;
727 } 739 }
728 } 740 }
@@ -731,15 +743,18 @@ data_ready:
731 q->mmio_base + B43_PIO_RXDATA, 743 q->mmio_base + B43_PIO_RXDATA,
732 sizeof(u16)); 744 sizeof(u16));
733 if (len & 1) { 745 if (len & 1) {
746 u8 *tail = wl->pio_tailspace;
747 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
748
734 /* Read the last byte. */ 749 /* Read the last byte. */
735 ssb_block_read(dev->dev, wl->rx_tail, 2, 750 ssb_block_read(dev->dev, tail, 2,
736 q->mmio_base + B43_PIO_RXDATA, 751 q->mmio_base + B43_PIO_RXDATA,
737 sizeof(u16)); 752 sizeof(u16));
738 skb->data[len + padding - 1] = wl->rx_tail[0]; 753 skb->data[len + padding - 1] = tail[0];
739 } 754 }
740 } 755 }
741 756
742 b43_rx(q->dev, skb, &wl->rxhdr); 757 b43_rx(q->dev, skb, rxhdr);
743 758
744 return 1; 759 return 1;
745 760
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 7a3218c5ba7d..ffdce6f3c909 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -33,7 +33,8 @@ bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
33 & B43_MMIO_RADIO_HWENABLED_HI_MASK)) 33 & B43_MMIO_RADIO_HWENABLED_HI_MASK))
34 return 1; 34 return 1;
35 } else { 35 } else {
36 if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) 36 if (b43_status(dev) >= B43_STAT_STARTED &&
37 b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
37 & B43_MMIO_RADIO_HWENABLED_LO_MASK) 38 & B43_MMIO_RADIO_HWENABLED_LO_MASK)
38 return 1; 39 return 1;
39 } 40 }
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index f4e9695ec186..7a5e294be2bc 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -27,7 +27,7 @@
27 27
28*/ 28*/
29 29
30#include "b43.h" 30#include "xmit.h"
31#include "phy_common.h" 31#include "phy_common.h"
32#include "dma.h" 32#include "dma.h"
33#include "pio.h" 33#include "pio.h"
@@ -690,10 +690,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
690 } 690 }
691 691
692 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 692 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
693 693 ieee80211_rx_ni(dev->wl->hw, skb);
694 local_bh_disable();
695 ieee80211_rx(dev->wl->hw, skb);
696 local_bh_enable();
697 694
698#if B43_DEBUG 695#if B43_DEBUG
699 dev->rx_count++; 696 dev->rx_count++;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1d9223b3d4c4..0983406f4630 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3592,7 +3592,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
3592{ 3592{
3593 struct b43legacy_wl *wl = dev->wl; 3593 struct b43legacy_wl *wl = dev->wl;
3594 struct ssb_bus *bus = dev->dev->bus; 3594 struct ssb_bus *bus = dev->dev->bus;
3595 struct pci_dev *pdev = bus->host_pci; 3595 struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
3596 int err; 3596 int err;
3597 int have_bphy = 0; 3597 int have_bphy = 0;
3598 int have_gphy = 0; 3598 int have_gphy = 0;
@@ -3706,7 +3706,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
3706 3706
3707 if (!list_empty(&wl->devlist)) { 3707 if (!list_empty(&wl->devlist)) {
3708 /* We are not the first core on this chip. */ 3708 /* We are not the first core on this chip. */
3709 pdev = dev->bus->host_pci; 3709 pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
3710 /* Only special chips support more than one wireless 3710 /* Only special chips support more than one wireless
3711 * core, although some of the other chips have more than 3711 * core, although some of the other chips have more than
3712 * one wireless core as well. Check for this and 3712 * one wireless core as well. Check for this and
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 240cff1e6979..a741d37fd96f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6325,8 +6325,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6325 6325
6326 fail: 6326 fail:
6327 if (dev) { 6327 if (dev) {
6328 if (registered) 6328 if (registered) {
6329 unregister_ieee80211(priv->ieee);
6329 unregister_netdev(dev); 6330 unregister_netdev(dev);
6331 }
6330 6332
6331 ipw2100_hw_stop_adapter(priv); 6333 ipw2100_hw_stop_adapter(priv);
6332 6334
@@ -6383,6 +6385,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6383 /* Unregister the device first - this results in close() 6385 /* Unregister the device first - this results in close()
6384 * being called if the device is open. If we free storage 6386 * being called if the device is open. If we free storage
6385 * first, then close() will crash. */ 6387 * first, then close() will crash. */
6388 unregister_ieee80211(priv->ieee);
6386 unregister_netdev(dev); 6389 unregister_netdev(dev);
6387 6390
6388 /* ipw2100_down will ensure that there is no more pending work 6391 /* ipw2100_down will ensure that there is no more pending work
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 61ef8904af97..4539e63e978e 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11822,6 +11822,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11822 if (err) { 11822 if (err) {
11823 IPW_ERROR("Failed to register promiscuous network " 11823 IPW_ERROR("Failed to register promiscuous network "
11824 "device (error %d).\n", err); 11824 "device (error %d).\n", err);
11825 unregister_ieee80211(priv->ieee);
11825 unregister_netdev(priv->net_dev); 11826 unregister_netdev(priv->net_dev);
11826 goto out_remove_sysfs; 11827 goto out_remove_sysfs;
11827 } 11828 }
@@ -11872,6 +11873,7 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11872 11873
11873 mutex_unlock(&priv->mutex); 11874 mutex_unlock(&priv->mutex);
11874 11875
11876 unregister_ieee80211(priv->ieee);
11875 unregister_netdev(priv->net_dev); 11877 unregister_netdev(priv->net_dev);
11876 11878
11877 if (priv->rxq) { 11879 if (priv->rxq) {
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index bf45391172f3..f42ade6c2d3e 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -1020,6 +1020,7 @@ static inline int libipw_is_cck_rate(u8 rate)
1020/* ieee80211.c */ 1020/* ieee80211.c */
1021extern void free_ieee80211(struct net_device *dev, int monitor); 1021extern void free_ieee80211(struct net_device *dev, int monitor);
1022extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor); 1022extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor);
1023extern void unregister_ieee80211(struct libipw_device *ieee);
1023extern int libipw_change_mtu(struct net_device *dev, int new_mtu); 1024extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
1024 1025
1025extern void libipw_networks_age(struct libipw_device *ieee, 1026extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index a0e9f6aed7da..be5b809ec97a 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -235,16 +235,19 @@ void free_ieee80211(struct net_device *dev, int monitor)
235 libipw_networks_free(ieee); 235 libipw_networks_free(ieee);
236 236
237 /* free cfg80211 resources */ 237 /* free cfg80211 resources */
238 if (!monitor) { 238 if (!monitor)
239 wiphy_unregister(ieee->wdev.wiphy);
240 kfree(ieee->a_band.channels);
241 kfree(ieee->bg_band.channels);
242 wiphy_free(ieee->wdev.wiphy); 239 wiphy_free(ieee->wdev.wiphy);
243 }
244 240
245 free_netdev(dev); 241 free_netdev(dev);
246} 242}
247 243
244void unregister_ieee80211(struct libipw_device *ieee)
245{
246 wiphy_unregister(ieee->wdev.wiphy);
247 kfree(ieee->a_band.channels);
248 kfree(ieee->bg_band.channels);
249}
250
248#ifdef CONFIG_LIBIPW_DEBUG 251#ifdef CONFIG_LIBIPW_DEBUG
249 252
250static int debug = 0; 253static int debug = 0;
@@ -330,3 +333,4 @@ module_init(libipw_init);
330 333
331EXPORT_SYMBOL(alloc_ieee80211); 334EXPORT_SYMBOL(alloc_ieee80211);
332EXPORT_SYMBOL(free_ieee80211); 335EXPORT_SYMBOL(free_ieee80211);
336EXPORT_SYMBOL(unregister_ieee80211);
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 679a67ff76eb..3a645e485dda 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -110,8 +110,7 @@ static struct iwl_lib_ops iwl1000_lib = {
110 .send_tx_power = iwl5000_send_tx_power, 110 .send_tx_power = iwl5000_send_tx_power,
111 .update_chain_flags = iwl_update_chain_flags, 111 .update_chain_flags = iwl_update_chain_flags,
112 .apm_ops = { 112 .apm_ops = {
113 .init = iwl5000_apm_init, 113 .init = iwl_apm_init,
114 .reset = iwl5000_apm_reset,
115 .stop = iwl_apm_stop, 114 .stop = iwl_apm_stop,
116 .config = iwl1000_nic_config, 115 .config = iwl1000_nic_config,
117 .set_pwr_src = iwl_set_pwr_src, 116 .set_pwr_src = iwl_set_pwr_src,
@@ -159,15 +158,20 @@ struct iwl_cfg iwl1000_bgn_cfg = {
159 .eeprom_size = OTP_LOW_IMAGE_SIZE, 158 .eeprom_size = OTP_LOW_IMAGE_SIZE,
160 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 159 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
161 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 160 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
161 .num_of_queues = IWL50_NUM_QUEUES,
162 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
162 .mod_params = &iwl50_mod_params, 163 .mod_params = &iwl50_mod_params,
163 .valid_tx_ant = ANT_A, 164 .valid_tx_ant = ANT_A,
164 .valid_rx_ant = ANT_AB, 165 .valid_rx_ant = ANT_AB,
165 .need_pll_cfg = true, 166 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
167 .set_l0s = false,
168 .use_bsm = false,
166 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 169 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
167 .shadow_ram_support = false, 170 .shadow_ram_support = false,
168 .ht_greenfield_support = true, 171 .ht_greenfield_support = true,
169 .led_compensation = 51, 172 .led_compensation = 51,
170 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 173 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
174 .support_ct_kill_exit = true,
171}; 175};
172 176
173struct iwl_cfg iwl1000_bg_cfg = { 177struct iwl_cfg iwl1000_bg_cfg = {
@@ -180,15 +184,20 @@ struct iwl_cfg iwl1000_bg_cfg = {
180 .eeprom_size = OTP_LOW_IMAGE_SIZE, 184 .eeprom_size = OTP_LOW_IMAGE_SIZE,
181 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 185 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
182 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 186 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
187 .num_of_queues = IWL50_NUM_QUEUES,
188 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
183 .mod_params = &iwl50_mod_params, 189 .mod_params = &iwl50_mod_params,
184 .valid_tx_ant = ANT_A, 190 .valid_tx_ant = ANT_A,
185 .valid_rx_ant = ANT_AB, 191 .valid_rx_ant = ANT_AB,
186 .need_pll_cfg = true, 192 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
193 .set_l0s = false,
194 .use_bsm = false,
187 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 195 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
188 .shadow_ram_support = false, 196 .shadow_ram_support = false,
189 .ht_greenfield_support = true, 197 .ht_greenfield_support = true,
190 .led_compensation = 51, 198 .led_compensation = 51,
191 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 199 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
200 .support_ct_kill_exit = true,
192}; 201};
193 202
194MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); 203MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 16772780c5b0..6fd10d443ba3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -71,12 +71,6 @@
71 71
72#include "iwl-eeprom.h" 72#include "iwl-eeprom.h"
73 73
74/*
75 * uCode queue management definitions ...
76 * Queue #4 is the command queue for 3945 and 4965.
77 */
78#define IWL_CMD_QUEUE_NUM 4
79
80/* Time constants */ 74/* Time constants */
81#define SHORT_SLOT_TIME 9 75#define SHORT_SLOT_TIME 9
82#define LONG_SLOT_TIME 20 76#define LONG_SLOT_TIME 20
@@ -254,12 +248,6 @@ struct iwl3945_eeprom {
254#define TFD_CTL_PAD_SET(n) (n << 28) 248#define TFD_CTL_PAD_SET(n) (n << 28)
255#define TFD_CTL_PAD_GET(ctl) (ctl >> 28) 249#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
256 250
257/*
258 * RX related structures and functions
259 */
260#define RX_FREE_BUFFERS 64
261#define RX_LOW_WATERMARK 8
262
263/* Sizes and addresses for instruction and data memory (SRAM) in 251/* Sizes and addresses for instruction and data memory (SRAM) in
264 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ 252 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
265#define IWL39_RTC_INST_LOWER_BOUND (0x000000) 253#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 4115672e2338..09a7bd2c0be4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -293,7 +293,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
293static void iwl3945_rx_reply_tx(struct iwl_priv *priv, 293static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
294 struct iwl_rx_mem_buffer *rxb) 294 struct iwl_rx_mem_buffer *rxb)
295{ 295{
296 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 296 struct iwl_rx_packet *pkt = rxb_addr(rxb);
297 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 297 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
298 int txq_id = SEQ_TO_QUEUE(sequence); 298 int txq_id = SEQ_TO_QUEUE(sequence);
299 int index = SEQ_TO_INDEX(sequence); 299 int index = SEQ_TO_INDEX(sequence);
@@ -353,16 +353,12 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
353void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 353void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
354 struct iwl_rx_mem_buffer *rxb) 354 struct iwl_rx_mem_buffer *rxb)
355{ 355{
356 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 356 struct iwl_rx_packet *pkt = rxb_addr(rxb);
357 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 357 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
358 (int)sizeof(struct iwl3945_notif_statistics), 358 (int)sizeof(struct iwl3945_notif_statistics),
359 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 359 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
360 360
361 memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39)); 361 memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39));
362
363 iwl_leds_background(priv);
364
365 priv->last_statistics_time = jiffies;
366} 362}
367 363
368/****************************************************************************** 364/******************************************************************************
@@ -545,14 +541,18 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
545 struct iwl_rx_mem_buffer *rxb, 541 struct iwl_rx_mem_buffer *rxb,
546 struct ieee80211_rx_status *stats) 542 struct ieee80211_rx_status *stats)
547{ 543{
548 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 544 struct iwl_rx_packet *pkt = rxb_addr(rxb);
549 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 545 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
550 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 546 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
551 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 547 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
552 short len = le16_to_cpu(rx_hdr->len); 548 u16 len = le16_to_cpu(rx_hdr->len);
549 struct sk_buff *skb;
550 int ret;
551 __le16 fc = hdr->frame_control;
553 552
554 /* We received data from the HW, so stop the watchdog */ 553 /* We received data from the HW, so stop the watchdog */
555 if (unlikely((len + IWL39_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { 554 if (unlikely(len + IWL39_RX_FRAME_SIZE >
555 PAGE_SIZE << priv->hw_params.rx_page_order)) {
556 IWL_DEBUG_DROP(priv, "Corruption detected!\n"); 556 IWL_DEBUG_DROP(priv, "Corruption detected!\n");
557 return; 557 return;
558 } 558 }
@@ -564,20 +564,49 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
564 return; 564 return;
565 } 565 }
566 566
567 skb_reserve(rxb->skb, (void *)rx_hdr->payload - (void *)pkt); 567 skb = alloc_skb(IWL_LINK_HDR_MAX, GFP_ATOMIC);
568 /* Set the size of the skb to the size of the frame */ 568 if (!skb) {
569 skb_put(rxb->skb, le16_to_cpu(rx_hdr->len)); 569 IWL_ERR(priv, "alloc_skb failed\n");
570 return;
571 }
570 572
571 if (!iwl3945_mod_params.sw_crypto) 573 if (!iwl3945_mod_params.sw_crypto)
572 iwl_set_decrypted_flag(priv, 574 iwl_set_decrypted_flag(priv,
573 (struct ieee80211_hdr *)rxb->skb->data, 575 (struct ieee80211_hdr *)rxb_addr(rxb),
574 le32_to_cpu(rx_end->status), stats); 576 le32_to_cpu(rx_end->status), stats);
575 577
576 iwl_update_stats(priv, false, hdr->frame_control, len); 578 skb_add_rx_frag(skb, 0, rxb->page,
579 (void *)rx_hdr->payload - (void *)pkt, len);
580
581 /* mac80211 currently doesn't support paged SKB. Convert it to
582 * linear SKB for management frame and data frame requires
583 * software decryption or software defragementation. */
584 if (ieee80211_is_mgmt(fc) ||
585 ieee80211_has_protected(fc) ||
586 ieee80211_has_morefrags(fc) ||
587 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
588 ret = skb_linearize(skb);
589 else
590 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
591 0 : -ENOMEM;
592
593 if (ret) {
594 kfree_skb(skb);
595 goto out;
596 }
597
598 /*
599 * XXX: We cannot touch the page and its virtual memory (pkt) after
600 * here. It might have already been freed by the above skb change.
601 */
602
603 iwl_update_stats(priv, false, fc, len);
604 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
577 605
578 memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats)); 606 ieee80211_rx(priv->hw, skb);
579 ieee80211_rx_irqsafe(priv->hw, rxb->skb); 607 out:
580 rxb->skb = NULL; 608 priv->alloc_rxb_page--;
609 rxb->page = NULL;
581} 610}
582 611
583#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 612#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
@@ -587,7 +616,7 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
587{ 616{
588 struct ieee80211_hdr *header; 617 struct ieee80211_hdr *header;
589 struct ieee80211_rx_status rx_status; 618 struct ieee80211_rx_status rx_status;
590 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 619 struct iwl_rx_packet *pkt = rxb_addr(rxb);
591 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); 620 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
592 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 621 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
593 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 622 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
@@ -787,29 +816,31 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
787 u8 data_retry_limit; 816 u8 data_retry_limit;
788 __le32 tx_flags; 817 __le32 tx_flags;
789 __le16 fc = hdr->frame_control; 818 __le16 fc = hdr->frame_control;
790 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 819 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
791 820
792 rate = iwl3945_rates[rate_index].plcp; 821 rate = iwl3945_rates[rate_index].plcp;
793 tx_flags = tx->tx_flags; 822 tx_flags = tx_cmd->tx_flags;
794 823
795 /* We need to figure out how to get the sta->supp_rates while 824 /* We need to figure out how to get the sta->supp_rates while
796 * in this running context */ 825 * in this running context */
797 rate_mask = IWL_RATES_MASK; 826 rate_mask = IWL_RATES_MASK;
798 827
828
829 /* Set retry limit on DATA packets and Probe Responses*/
830 if (ieee80211_is_probe_resp(fc))
831 data_retry_limit = 3;
832 else
833 data_retry_limit = IWL_DEFAULT_TX_RETRY;
834 tx_cmd->data_retry_limit = data_retry_limit;
835
799 if (tx_id >= IWL_CMD_QUEUE_NUM) 836 if (tx_id >= IWL_CMD_QUEUE_NUM)
800 rts_retry_limit = 3; 837 rts_retry_limit = 3;
801 else 838 else
802 rts_retry_limit = 7; 839 rts_retry_limit = 7;
803 840
804 if (ieee80211_is_probe_resp(fc)) { 841 if (data_retry_limit < rts_retry_limit)
805 data_retry_limit = 3; 842 rts_retry_limit = data_retry_limit;
806 if (data_retry_limit < rts_retry_limit) 843 tx_cmd->rts_retry_limit = rts_retry_limit;
807 rts_retry_limit = data_retry_limit;
808 } else
809 data_retry_limit = IWL_DEFAULT_TX_RETRY;
810
811 if (priv->data_retry_limit != -1)
812 data_retry_limit = priv->data_retry_limit;
813 844
814 if (ieee80211_is_mgmt(fc)) { 845 if (ieee80211_is_mgmt(fc)) {
815 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { 846 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
@@ -827,22 +858,20 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
827 } 858 }
828 } 859 }
829 860
830 tx->rts_retry_limit = rts_retry_limit; 861 tx_cmd->rate = rate;
831 tx->data_retry_limit = data_retry_limit; 862 tx_cmd->tx_flags = tx_flags;
832 tx->rate = rate;
833 tx->tx_flags = tx_flags;
834 863
835 /* OFDM */ 864 /* OFDM */
836 tx->supp_rates[0] = 865 tx_cmd->supp_rates[0] =
837 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF; 866 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
838 867
839 /* CCK */ 868 /* CCK */
840 tx->supp_rates[1] = (rate_mask & 0xF); 869 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
841 870
842 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " 871 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
843 "cck/ofdm mask: 0x%x/0x%x\n", sta_id, 872 "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
844 tx->rate, le32_to_cpu(tx->tx_flags), 873 tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
845 tx->supp_rates[1], tx->supp_rates[0]); 874 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
846} 875}
847 876
848u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags) 877u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
@@ -958,6 +987,11 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
958 987
959 iwl3945_hw_txq_ctx_free(priv); 988 iwl3945_hw_txq_ctx_free(priv);
960 989
990 /* allocate tx queue structure */
991 rc = iwl_alloc_txq_mem(priv);
992 if (rc)
993 return rc;
994
961 /* Tx CMD queue */ 995 /* Tx CMD queue */
962 rc = iwl3945_tx_reset(priv); 996 rc = iwl3945_tx_reset(priv);
963 if (rc) 997 if (rc)
@@ -982,42 +1016,25 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
982 return rc; 1016 return rc;
983} 1017}
984 1018
1019
1020/*
1021 * Start up 3945's basic functionality after it has been reset
1022 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1023 * NOTE: This does not load uCode nor start the embedded processor
1024 */
985static int iwl3945_apm_init(struct iwl_priv *priv) 1025static int iwl3945_apm_init(struct iwl_priv *priv)
986{ 1026{
987 int ret; 1027 int ret = iwl_apm_init(priv);
988
989 iwl_power_initialize(priv);
990 1028
991 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 1029 /* Clear APMG (NIC's internal power management) interrupts */
992 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 1030 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
993 1031 iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
994 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
995 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
996 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
997
998 /* set "initialization complete" bit to move adapter
999 * D0U* --> D0A* state */
1000 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1001
1002 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1003 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1004 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1005 if (ret < 0) {
1006 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1007 goto out;
1008 }
1009
1010 /* enable DMA */
1011 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
1012 APMG_CLK_VAL_BSM_CLK_RQT);
1013
1014 udelay(20);
1015 1032
1016 /* disable L1-Active */ 1033 /* Reset radio chip */
1017 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 1034 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
1018 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1035 udelay(5);
1036 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
1019 1037
1020out:
1021 return ret; 1038 return ret;
1022} 1039}
1023 1040
@@ -1142,12 +1159,16 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1142 int txq_id; 1159 int txq_id;
1143 1160
1144 /* Tx queues */ 1161 /* Tx queues */
1145 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 1162 if (priv->txq)
1146 if (txq_id == IWL_CMD_QUEUE_NUM) 1163 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1147 iwl_cmd_queue_free(priv); 1164 txq_id++)
1148 else 1165 if (txq_id == IWL_CMD_QUEUE_NUM)
1149 iwl_tx_queue_free(priv, txq_id); 1166 iwl_cmd_queue_free(priv);
1167 else
1168 iwl_tx_queue_free(priv, txq_id);
1150 1169
1170 /* free tx queue structure */
1171 iwl_free_txq_mem(priv);
1151} 1172}
1152 1173
1153void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) 1174void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1156,6 +1177,7 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1156 1177
1157 /* stop SCD */ 1178 /* stop SCD */
1158 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); 1179 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
1180 iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1159 1181
1160 /* reset TFD queues */ 1182 /* reset TFD queues */
1161 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 1183 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
@@ -1168,47 +1190,6 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1168 iwl3945_hw_txq_ctx_free(priv); 1190 iwl3945_hw_txq_ctx_free(priv);
1169} 1191}
1170 1192
1171static int iwl3945_apm_reset(struct iwl_priv *priv)
1172{
1173 iwl_apm_stop_master(priv);
1174
1175
1176 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1177 udelay(10);
1178
1179 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1180
1181 iwl_poll_bit(priv, CSR_GP_CNTRL,
1182 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1183 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1184
1185 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
1186 APMG_CLK_VAL_BSM_CLK_RQT);
1187
1188 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
1189 iwl_write_prph(priv, APMG_RTC_INT_STT_REG,
1190 0xFFFFFFFF);
1191
1192 /* enable DMA */
1193 iwl_write_prph(priv, APMG_CLK_EN_REG,
1194 APMG_CLK_VAL_DMA_CLK_RQT |
1195 APMG_CLK_VAL_BSM_CLK_RQT);
1196 udelay(10);
1197
1198 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
1199 APMG_PS_CTRL_VAL_RESET_REQ);
1200 udelay(5);
1201 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
1202 APMG_PS_CTRL_VAL_RESET_REQ);
1203
1204 /* Clear the 'host command active' bit... */
1205 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1206
1207 wake_up_interruptible(&priv->wait_command_queue);
1208
1209 return 0;
1210}
1211
1212/** 1193/**
1213 * iwl3945_hw_reg_adjust_power_by_temp 1194 * iwl3945_hw_reg_adjust_power_by_temp
1214 * return index delta into power gain settings table 1195 * return index delta into power gain settings table
@@ -1817,7 +1798,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1817static int iwl3945_send_rxon_assoc(struct iwl_priv *priv) 1798static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1818{ 1799{
1819 int rc = 0; 1800 int rc = 0;
1820 struct iwl_rx_packet *res = NULL; 1801 struct iwl_rx_packet *pkt;
1821 struct iwl3945_rxon_assoc_cmd rxon_assoc; 1802 struct iwl3945_rxon_assoc_cmd rxon_assoc;
1822 struct iwl_host_cmd cmd = { 1803 struct iwl_host_cmd cmd = {
1823 .id = REPLY_RXON_ASSOC, 1804 .id = REPLY_RXON_ASSOC,
@@ -1846,14 +1827,14 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1846 if (rc) 1827 if (rc)
1847 return rc; 1828 return rc;
1848 1829
1849 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 1830 pkt = (struct iwl_rx_packet *)cmd.reply_page;
1850 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 1831 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1851 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n"); 1832 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
1852 rc = -EIO; 1833 rc = -EIO;
1853 } 1834 }
1854 1835
1855 priv->alloc_rxb_skb--; 1836 priv->alloc_rxb_page--;
1856 dev_kfree_skb_any(cmd.reply_skb); 1837 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
1857 1838
1858 return rc; 1839 return rc;
1859} 1840}
@@ -2001,12 +1982,6 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
2001 return 0; 1982 return 0;
2002} 1983}
2003 1984
2004/* will add 3945 channel switch cmd handling later */
2005int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel)
2006{
2007 return 0;
2008}
2009
2010/** 1985/**
2011 * iwl3945_reg_txpower_periodic - called when time to check our temperature. 1986 * iwl3945_reg_txpower_periodic - called when time to check our temperature.
2012 * 1987 *
@@ -2516,11 +2491,10 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2516 } 2491 }
2517 2492
2518 /* Assign number of Usable TX queues */ 2493 /* Assign number of Usable TX queues */
2519 priv->hw_params.max_txq_num = IWL39_NUM_QUEUES; 2494 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
2520 2495
2521 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); 2496 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2522 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K; 2497 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
2523 priv->hw_params.max_pkt_size = 2342;
2524 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 2498 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2525 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 2499 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2526 priv->hw_params.max_stations = IWL3945_STATION_COUNT; 2500 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
@@ -2803,7 +2777,6 @@ static struct iwl_lib_ops iwl3945_lib = {
2803 .dump_nic_error_log = iwl3945_dump_nic_error_log, 2777 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2804 .apm_ops = { 2778 .apm_ops = {
2805 .init = iwl3945_apm_init, 2779 .init = iwl3945_apm_init,
2806 .reset = iwl3945_apm_reset,
2807 .stop = iwl_apm_stop, 2780 .stop = iwl_apm_stop,
2808 .config = iwl3945_nic_config, 2781 .config = iwl3945_nic_config,
2809 .set_pwr_src = iwl3945_set_pwr_src, 2782 .set_pwr_src = iwl3945_set_pwr_src,
@@ -2833,6 +2806,7 @@ static struct iwl_lib_ops iwl3945_lib = {
2833static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2806static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2834 .get_hcmd_size = iwl3945_get_hcmd_size, 2807 .get_hcmd_size = iwl3945_get_hcmd_size,
2835 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2808 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2809 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2836}; 2810};
2837 2811
2838static struct iwl_ops iwl3945_ops = { 2812static struct iwl_ops iwl3945_ops = {
@@ -2852,7 +2826,11 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2852 .eeprom_size = IWL3945_EEPROM_IMG_SIZE, 2826 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2853 .eeprom_ver = EEPROM_3945_EEPROM_VERSION, 2827 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2854 .ops = &iwl3945_ops, 2828 .ops = &iwl3945_ops,
2829 .num_of_queues = IWL39_NUM_QUEUES,
2855 .mod_params = &iwl3945_mod_params, 2830 .mod_params = &iwl3945_mod_params,
2831 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2832 .set_l0s = false,
2833 .use_bsm = true,
2856 .use_isr_legacy = true, 2834 .use_isr_legacy = true,
2857 .ht_greenfield_support = false, 2835 .ht_greenfield_support = false,
2858 .led_compensation = 64, 2836 .led_compensation = 64,
@@ -2867,6 +2845,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2867 .eeprom_size = IWL3945_EEPROM_IMG_SIZE, 2845 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2868 .eeprom_ver = EEPROM_3945_EEPROM_VERSION, 2846 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2869 .ops = &iwl3945_ops, 2847 .ops = &iwl3945_ops,
2848 .num_of_queues = IWL39_NUM_QUEUES,
2870 .mod_params = &iwl3945_mod_params, 2849 .mod_params = &iwl3945_mod_params,
2871 .use_isr_legacy = true, 2850 .use_isr_legacy = true,
2872 .ht_greenfield_support = false, 2851 .ht_greenfield_support = false,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index f3907c1079f5..ebb999a51b58 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -130,12 +130,6 @@ struct iwl3945_frame {
130#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) 130#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
131#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) 131#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
132 132
133/*
134 * RX related structures and functions
135 */
136#define RX_FREE_BUFFERS 64
137#define RX_LOW_WATERMARK 8
138
139#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 133#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
140#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 134#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
141#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 135#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
@@ -280,8 +274,6 @@ extern void iwl3945_config_ap(struct iwl_priv *priv);
280 */ 274 */
281extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid); 275extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
282 276
283extern int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel);
284
285/* 277/*
286 * Forward declare iwl-3945.c functions for iwl-base.c 278 * Forward declare iwl-3945.c functions for iwl-base.c
287 */ 279 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index b34322a32458..c606366b582c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -76,12 +76,9 @@
76 76
77/* 77/*
78 * uCode queue management definitions ... 78 * uCode queue management definitions ...
79 * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4.
80 * The first queue used for block-ack aggregation is #7 (4965 only). 79 * The first queue used for block-ack aggregation is #7 (4965 only).
81 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7. 80 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
82 */ 81 */
83#define IWL_CMD_QUEUE_NUM 4
84#define IWL_CMD_FIFO_NUM 4
85#define IWL49_FIRST_AMPDU_QUEUE 7 82#define IWL49_FIRST_AMPDU_QUEUE 7
86 83
87/* Time constants */ 84/* Time constants */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index f8eed9a4abc1..1ff465ad40d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -62,8 +62,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
62 62
63/* module parameters */ 63/* module parameters */
64static struct iwl_mod_params iwl4965_mod_params = { 64static struct iwl_mod_params iwl4965_mod_params = {
65 .num_of_queues = IWL49_NUM_QUEUES,
66 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
67 .amsdu_size_8K = 1, 65 .amsdu_size_8K = 1,
68 .restart_fw = 1, 66 .restart_fw = 1,
69 /* the rest are 0 by default */ 67 /* the rest are 0 by default */
@@ -319,64 +317,13 @@ static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
319 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask); 317 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
320} 318}
321 319
322static int iwl4965_apm_init(struct iwl_priv *priv)
323{
324 int ret = 0;
325
326 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
327 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
328
329 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
330 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
331 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
332
333 /* set "initialization complete" bit to move adapter
334 * D0U* --> D0A* state */
335 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
336
337 /* wait for clock stabilization */
338 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
339 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
340 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
341 if (ret < 0) {
342 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
343 goto out;
344 }
345
346 /* enable DMA */
347 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
348 APMG_CLK_VAL_BSM_CLK_RQT);
349
350 udelay(20);
351
352 /* disable L1-Active */
353 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
354 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
355
356out:
357 return ret;
358}
359
360
361static void iwl4965_nic_config(struct iwl_priv *priv) 320static void iwl4965_nic_config(struct iwl_priv *priv)
362{ 321{
363 unsigned long flags; 322 unsigned long flags;
364 u16 radio_cfg; 323 u16 radio_cfg;
365 u16 lctl;
366 324
367 spin_lock_irqsave(&priv->lock, flags); 325 spin_lock_irqsave(&priv->lock, flags);
368 326
369 lctl = iwl_pcie_link_ctl(priv);
370
371 /* HW bug W/A - negligible power consumption */
372 /* L1-ASPM is enabled by BIOS */
373 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
374 /* L1-ASPM enabled: disable L0S */
375 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
376 else
377 /* L1-ASPM disabled: enable L0S */
378 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
379
380 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 327 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
381 328
382 /* write radio config values to register */ 329 /* write radio config values to register */
@@ -397,46 +344,6 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
397 spin_unlock_irqrestore(&priv->lock, flags); 344 spin_unlock_irqrestore(&priv->lock, flags);
398} 345}
399 346
400static int iwl4965_apm_reset(struct iwl_priv *priv)
401{
402 int ret = 0;
403
404 iwl_apm_stop_master(priv);
405
406
407 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
408
409 udelay(10);
410
411 /* FIXME: put here L1A -L0S w/a */
412
413 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
414
415 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
416 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
417 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
418 if (ret < 0)
419 goto out;
420
421 udelay(10);
422
423 /* Enable DMA and BSM Clock */
424 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
425 APMG_CLK_VAL_BSM_CLK_RQT);
426
427 udelay(10);
428
429 /* disable L1A */
430 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
431 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
432
433 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
434 wake_up_interruptible(&priv->wait_command_queue);
435
436out:
437 return ret;
438}
439
440/* Reset differential Rx gains in NIC to prepare for chain noise calibration. 347/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
441 * Called after every association, but this runs only once! 348 * Called after every association, but this runs only once!
442 * ... once chain noise is calibrated the first time, it's good forever. */ 349 * ... once chain noise is calibrated the first time, it's good forever. */
@@ -526,18 +433,6 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
526 data->beacon_count = 0; 433 data->beacon_count = 0;
527} 434}
528 435
529static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
530 __le32 *tx_flags)
531{
532 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
533 *tx_flags |= TX_CMD_FLG_RTS_MSK;
534 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
535 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
536 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
537 *tx_flags |= TX_CMD_FLG_CTS_MSK;
538 }
539}
540
541static void iwl4965_bg_txpower_work(struct work_struct *work) 436static void iwl4965_bg_txpower_work(struct work_struct *work)
542{ 437{
543 struct iwl_priv *priv = container_of(work, struct iwl_priv, 438 struct iwl_priv *priv = container_of(work, struct iwl_priv,
@@ -718,6 +613,10 @@ static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
718 613
719 .nrg_th_cck = 100, 614 .nrg_th_cck = 100,
720 .nrg_th_ofdm = 100, 615 .nrg_th_ofdm = 100,
616
617 .barker_corr_th_min = 190,
618 .barker_corr_th_min_mrc = 390,
619 .nrg_th_cca = 62,
721}; 620};
722 621
723static void iwl4965_set_ct_threshold(struct iwl_priv *priv) 622static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
@@ -734,19 +633,16 @@ static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
734 */ 633 */
735static int iwl4965_hw_set_hw_params(struct iwl_priv *priv) 634static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
736{ 635{
636 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
637 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
638 priv->cfg->num_of_queues =
639 priv->cfg->mod_params->num_of_queues;
737 640
738 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) || 641 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
739 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
740 IWL_ERR(priv,
741 "invalid queues_num, should be between %d and %d\n",
742 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
743 return -EINVAL;
744 }
745
746 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
747 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; 642 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
748 priv->hw_params.scd_bc_tbls_size = 643 priv->hw_params.scd_bc_tbls_size =
749 IWL49_NUM_QUEUES * sizeof(struct iwl4965_scd_bc_tbl); 644 priv->cfg->num_of_queues *
645 sizeof(struct iwl4965_scd_bc_tbl);
750 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 646 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
751 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 647 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
752 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 648 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
@@ -757,10 +653,10 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
757 653
758 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 654 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
759 655
760 priv->hw_params.tx_chains_num = 2; 656 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
761 priv->hw_params.rx_chains_num = 2; 657 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
762 priv->hw_params.valid_tx_ant = ANT_A | ANT_B; 658 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
763 priv->hw_params.valid_rx_ant = ANT_A | ANT_B; 659 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
764 if (priv->cfg->ops->lib->temp_ops.set_ct_kill) 660 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
765 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); 661 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
766 662
@@ -1537,14 +1433,13 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1537 return ret; 1433 return ret;
1538} 1434}
1539 1435
1540#ifdef IEEE80211_CONF_CHANNEL_SWITCH
1541static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) 1436static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1542{ 1437{
1543 int rc; 1438 int rc;
1544 u8 band = 0; 1439 u8 band = 0;
1545 bool is_ht40 = false; 1440 bool is_ht40 = false;
1546 u8 ctrl_chan_high = 0; 1441 u8 ctrl_chan_high = 0;
1547 struct iwl4965_channel_switch_cmd cmd = { 0 }; 1442 struct iwl4965_channel_switch_cmd cmd;
1548 const struct iwl_channel_info *ch_info; 1443 const struct iwl_channel_info *ch_info;
1549 1444
1550 band = priv->band == IEEE80211_BAND_2GHZ; 1445 band = priv->band == IEEE80211_BAND_2GHZ;
@@ -1565,8 +1460,11 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1565 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); 1460 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1566 if (ch_info) 1461 if (ch_info)
1567 cmd.expect_beacon = is_channel_radar(ch_info); 1462 cmd.expect_beacon = is_channel_radar(ch_info);
1568 else 1463 else {
1569 cmd.expect_beacon = 1; 1464 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1465 priv->active_rxon.channel, channel);
1466 return -EFAULT;
1467 }
1570 1468
1571 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40, 1469 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40,
1572 ctrl_chan_high, &cmd.tx_power); 1470 ctrl_chan_high, &cmd.tx_power);
@@ -1578,7 +1476,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1578 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1476 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1579 return rc; 1477 return rc;
1580} 1478}
1581#endif
1582 1479
1583/** 1480/**
1584 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 1481 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
@@ -1775,11 +1672,13 @@ static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1775 u16 ssn_idx, u8 tx_fifo) 1672 u16 ssn_idx, u8 tx_fifo)
1776{ 1673{
1777 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1674 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1778 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) { 1675 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
1676 <= txq_id)) {
1779 IWL_WARN(priv, 1677 IWL_WARN(priv,
1780 "queue number out of range: %d, must be %d to %d\n", 1678 "queue number out of range: %d, must be %d to %d\n",
1781 txq_id, IWL49_FIRST_AMPDU_QUEUE, 1679 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1782 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1); 1680 IWL49_FIRST_AMPDU_QUEUE +
1681 priv->cfg->num_of_ampdu_queues - 1);
1783 return -EINVAL; 1682 return -EINVAL;
1784 } 1683 }
1785 1684
@@ -1840,11 +1739,13 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1840 u16 ra_tid; 1739 u16 ra_tid;
1841 1740
1842 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1741 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1843 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) { 1742 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
1743 <= txq_id)) {
1844 IWL_WARN(priv, 1744 IWL_WARN(priv,
1845 "queue number out of range: %d, must be %d to %d\n", 1745 "queue number out of range: %d, must be %d to %d\n",
1846 txq_id, IWL49_FIRST_AMPDU_QUEUE, 1746 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1847 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1); 1747 IWL49_FIRST_AMPDU_QUEUE +
1748 priv->cfg->num_of_ampdu_queues - 1);
1848 return -EINVAL; 1749 return -EINVAL;
1849 } 1750 }
1850 1751
@@ -2048,7 +1949,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2048static void iwl4965_rx_reply_tx(struct iwl_priv *priv, 1949static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2049 struct iwl_rx_mem_buffer *rxb) 1950 struct iwl_rx_mem_buffer *rxb)
2050{ 1951{
2051 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1952 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2052 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1953 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2053 int txq_id = SEQ_TO_QUEUE(sequence); 1954 int txq_id = SEQ_TO_QUEUE(sequence);
2054 int index = SEQ_TO_INDEX(sequence); 1955 int index = SEQ_TO_INDEX(sequence);
@@ -2249,7 +2150,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2249 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2150 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2250 .chain_noise_reset = iwl4965_chain_noise_reset, 2151 .chain_noise_reset = iwl4965_chain_noise_reset,
2251 .gain_computation = iwl4965_gain_computation, 2152 .gain_computation = iwl4965_gain_computation,
2252 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag, 2153 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2253 .calc_rssi = iwl4965_calc_rssi, 2154 .calc_rssi = iwl4965_calc_rssi,
2254}; 2155};
2255 2156
@@ -2271,9 +2172,9 @@ static struct iwl_lib_ops iwl4965_lib = {
2271 .load_ucode = iwl4965_load_bsm, 2172 .load_ucode = iwl4965_load_bsm,
2272 .dump_nic_event_log = iwl_dump_nic_event_log, 2173 .dump_nic_event_log = iwl_dump_nic_event_log,
2273 .dump_nic_error_log = iwl_dump_nic_error_log, 2174 .dump_nic_error_log = iwl_dump_nic_error_log,
2175 .set_channel_switch = iwl4965_hw_channel_switch,
2274 .apm_ops = { 2176 .apm_ops = {
2275 .init = iwl4965_apm_init, 2177 .init = iwl_apm_init,
2276 .reset = iwl4965_apm_reset,
2277 .stop = iwl_apm_stop, 2178 .stop = iwl_apm_stop,
2278 .config = iwl4965_nic_config, 2179 .config = iwl4965_nic_config,
2279 .set_pwr_src = iwl_set_pwr_src, 2180 .set_pwr_src = iwl_set_pwr_src,
@@ -2323,7 +2224,14 @@ struct iwl_cfg iwl4965_agn_cfg = {
2323 .eeprom_ver = EEPROM_4965_EEPROM_VERSION, 2224 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2324 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, 2225 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2325 .ops = &iwl4965_ops, 2226 .ops = &iwl4965_ops,
2227 .num_of_queues = IWL49_NUM_QUEUES,
2228 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2326 .mod_params = &iwl4965_mod_params, 2229 .mod_params = &iwl4965_mod_params,
2230 .valid_tx_ant = ANT_AB,
2231 .valid_rx_ant = ANT_AB,
2232 .pll_cfg_val = 0,
2233 .set_l0s = true,
2234 .use_bsm = true,
2327 .use_isr_legacy = true, 2235 .use_isr_legacy = true,
2328 .ht_greenfield_support = false, 2236 .ht_greenfield_support = false,
2329 .broken_powersave = true, 2237 .broken_powersave = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 8cc3d50e7f59..d256fecc6cda 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -72,115 +72,14 @@ static const u16 iwl5000_default_queue_to_tx_fifo[] = {
72 IWL_TX_FIFO_HCCA_2 72 IWL_TX_FIFO_HCCA_2
73}; 73};
74 74
75int iwl5000_apm_init(struct iwl_priv *priv)
76{
77 int ret = 0;
78
79 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
80 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
81
82 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
83 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
84 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
85
86 /* Set FH wait threshold to maximum (HW error during stress W/A) */
87 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
88
89 /* enable HAP INTA to move device L1a -> L0s */
90 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
91 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
92
93 if (priv->cfg->need_pll_cfg)
94 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
95
96 /* set "initialization complete" bit to move adapter
97 * D0U* --> D0A* state */
98 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
99
100 /* wait for clock stabilization */
101 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
102 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
103 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
104 if (ret < 0) {
105 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
106 return ret;
107 }
108
109 /* enable DMA */
110 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
111
112 udelay(20);
113
114 /* disable L1-Active */
115 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
116 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
117
118 return ret;
119}
120
121int iwl5000_apm_reset(struct iwl_priv *priv)
122{
123 int ret = 0;
124
125 iwl_apm_stop_master(priv);
126
127 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
128
129 udelay(10);
130
131
132 /* FIXME: put here L1A -L0S w/a */
133
134 if (priv->cfg->need_pll_cfg)
135 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
136
137 /* set "initialization complete" bit to move adapter
138 * D0U* --> D0A* state */
139 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
140
141 /* wait for clock stabilization */
142 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
143 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
144 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
145 if (ret < 0) {
146 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
147 goto out;
148 }
149
150 /* enable DMA */
151 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
152
153 udelay(20);
154
155 /* disable L1-Active */
156 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
157 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
158out:
159
160 return ret;
161}
162
163
164/* NIC configuration for 5000 series */ 75/* NIC configuration for 5000 series */
165void iwl5000_nic_config(struct iwl_priv *priv) 76void iwl5000_nic_config(struct iwl_priv *priv)
166{ 77{
167 unsigned long flags; 78 unsigned long flags;
168 u16 radio_cfg; 79 u16 radio_cfg;
169 u16 lctl;
170 80
171 spin_lock_irqsave(&priv->lock, flags); 81 spin_lock_irqsave(&priv->lock, flags);
172 82
173 lctl = iwl_pcie_link_ctl(priv);
174
175 /* HW bug W/A */
176 /* L1-ASPM is enabled by BIOS */
177 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
178 /* L1-APSM enabled: disable L0S */
179 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
180 else
181 /* L1-ASPM disabled: enable L0S */
182 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
183
184 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 83 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
185 84
186 /* write radio config values to register */ 85 /* write radio config values to register */
@@ -279,7 +178,7 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
279 data->delta_gain_code[i] = 0; 178 data->delta_gain_code[i] = 0;
280 continue; 179 continue;
281 } 180 }
282 delta_g = (1000 * ((s32)average_noise[0] - 181 delta_g = (1000 * ((s32)average_noise[default_chain] -
283 (s32)average_noise[i])) / 1500; 182 (s32)average_noise[i])) / 1500;
284 /* bound gain by 2 bits value max, 3rd bit is sign */ 183 /* bound gain by 2 bits value max, 3rd bit is sign */
285 data->delta_gain_code[i] = 184 data->delta_gain_code[i] =
@@ -372,6 +271,10 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
372 .auto_corr_max_cck_mrc = 400, 271 .auto_corr_max_cck_mrc = 400,
373 .nrg_th_cck = 95, 272 .nrg_th_cck = 95,
374 .nrg_th_ofdm = 95, 273 .nrg_th_ofdm = 95,
274
275 .barker_corr_th_min = 190,
276 .barker_corr_th_min_mrc = 390,
277 .nrg_th_cca = 62,
375}; 278};
376 279
377static struct iwl_sensitivity_ranges iwl5150_sensitivity = { 280static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
@@ -394,6 +297,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
394 .auto_corr_max_cck_mrc = 400, 297 .auto_corr_max_cck_mrc = 400,
395 .nrg_th_cck = 95, 298 .nrg_th_cck = 95,
396 .nrg_th_ofdm = 95, 299 .nrg_th_ofdm = 95,
300
301 .barker_corr_th_min = 190,
302 .barker_corr_th_min_mrc = 390,
303 .nrg_th_cca = 62,
397}; 304};
398 305
399const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, 306const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
@@ -458,7 +365,7 @@ static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
458static void iwl5000_rx_calib_result(struct iwl_priv *priv, 365static void iwl5000_rx_calib_result(struct iwl_priv *priv,
459 struct iwl_rx_mem_buffer *rxb) 366 struct iwl_rx_mem_buffer *rxb)
460{ 367{
461 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 368 struct iwl_rx_packet *pkt = rxb_addr(rxb);
462 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw; 369 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
463 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 370 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
464 int index; 371 int index;
@@ -784,18 +691,16 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
784 691
785int iwl5000_hw_set_hw_params(struct iwl_priv *priv) 692int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
786{ 693{
787 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) || 694 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
788 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 695 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
789 IWL_ERR(priv, 696 priv->cfg->num_of_queues =
790 "invalid queues_num, should be between %d and %d\n", 697 priv->cfg->mod_params->num_of_queues;
791 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
792 return -EINVAL;
793 }
794 698
795 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 699 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
796 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 700 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
797 priv->hw_params.scd_bc_tbls_size = 701 priv->hw_params.scd_bc_tbls_size =
798 IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl); 702 priv->cfg->num_of_queues *
703 sizeof(struct iwl5000_scd_bc_tbl);
799 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 704 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
800 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 705 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
801 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 706 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
@@ -947,11 +852,13 @@ int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
947 u16 ra_tid; 852 u16 ra_tid;
948 853
949 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 854 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
950 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 855 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
856 <= txq_id)) {
951 IWL_WARN(priv, 857 IWL_WARN(priv,
952 "queue number out of range: %d, must be %d to %d\n", 858 "queue number out of range: %d, must be %d to %d\n",
953 txq_id, IWL50_FIRST_AMPDU_QUEUE, 859 txq_id, IWL50_FIRST_AMPDU_QUEUE,
954 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 860 IWL50_FIRST_AMPDU_QUEUE +
861 priv->cfg->num_of_ampdu_queues - 1);
955 return -EINVAL; 862 return -EINVAL;
956 } 863 }
957 864
@@ -1005,11 +912,13 @@ int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1005 u16 ssn_idx, u8 tx_fifo) 912 u16 ssn_idx, u8 tx_fifo)
1006{ 913{
1007 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 914 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
1008 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 915 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
916 <= txq_id)) {
1009 IWL_ERR(priv, 917 IWL_ERR(priv,
1010 "queue number out of range: %d, must be %d to %d\n", 918 "queue number out of range: %d, must be %d to %d\n",
1011 txq_id, IWL50_FIRST_AMPDU_QUEUE, 919 txq_id, IWL50_FIRST_AMPDU_QUEUE,
1012 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 920 IWL50_FIRST_AMPDU_QUEUE +
921 priv->cfg->num_of_ampdu_queues - 1);
1013 return -EINVAL; 922 return -EINVAL;
1014 } 923 }
1015 924
@@ -1176,7 +1085,7 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1176static void iwl5000_rx_reply_tx(struct iwl_priv *priv, 1085static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1177 struct iwl_rx_mem_buffer *rxb) 1086 struct iwl_rx_mem_buffer *rxb)
1178{ 1087{
1179 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1088 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1180 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1089 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1181 int txq_id = SEQ_TO_QUEUE(sequence); 1090 int txq_id = SEQ_TO_QUEUE(sequence);
1182 int index = SEQ_TO_INDEX(sequence); 1091 int index = SEQ_TO_INDEX(sequence);
@@ -1473,6 +1382,36 @@ IWL5000_UCODE_GET(init_size);
1473IWL5000_UCODE_GET(init_data_size); 1382IWL5000_UCODE_GET(init_data_size);
1474IWL5000_UCODE_GET(boot_size); 1383IWL5000_UCODE_GET(boot_size);
1475 1384
1385static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1386{
1387 struct iwl5000_channel_switch_cmd cmd;
1388 const struct iwl_channel_info *ch_info;
1389 struct iwl_host_cmd hcmd = {
1390 .id = REPLY_CHANNEL_SWITCH,
1391 .len = sizeof(cmd),
1392 .flags = CMD_SIZE_HUGE,
1393 .data = &cmd,
1394 };
1395
1396 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
1397 priv->active_rxon.channel, channel);
1398 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
1399 cmd.channel = cpu_to_le16(channel);
1400 cmd.rxon_flags = priv->active_rxon.flags;
1401 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
1402 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1403 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1404 if (ch_info)
1405 cmd.expect_beacon = is_channel_radar(ch_info);
1406 else {
1407 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1408 priv->active_rxon.channel, channel);
1409 return -EFAULT;
1410 }
1411
1412 return iwl_send_cmd_sync(priv, &hcmd);
1413}
1414
1476struct iwl_hcmd_ops iwl5000_hcmd = { 1415struct iwl_hcmd_ops iwl5000_hcmd = {
1477 .rxon_assoc = iwl5000_send_rxon_assoc, 1416 .rxon_assoc = iwl5000_send_rxon_assoc,
1478 .commit_rxon = iwl_commit_rxon, 1417 .commit_rxon = iwl_commit_rxon,
@@ -1520,9 +1459,9 @@ struct iwl_lib_ops iwl5000_lib = {
1520 .alive_notify = iwl5000_alive_notify, 1459 .alive_notify = iwl5000_alive_notify,
1521 .send_tx_power = iwl5000_send_tx_power, 1460 .send_tx_power = iwl5000_send_tx_power,
1522 .update_chain_flags = iwl_update_chain_flags, 1461 .update_chain_flags = iwl_update_chain_flags,
1462 .set_channel_switch = iwl5000_hw_channel_switch,
1523 .apm_ops = { 1463 .apm_ops = {
1524 .init = iwl5000_apm_init, 1464 .init = iwl_apm_init,
1525 .reset = iwl5000_apm_reset,
1526 .stop = iwl_apm_stop, 1465 .stop = iwl_apm_stop,
1527 .config = iwl5000_nic_config, 1466 .config = iwl5000_nic_config,
1528 .set_pwr_src = iwl_set_pwr_src, 1467 .set_pwr_src = iwl_set_pwr_src,
@@ -1572,9 +1511,9 @@ static struct iwl_lib_ops iwl5150_lib = {
1572 .alive_notify = iwl5000_alive_notify, 1511 .alive_notify = iwl5000_alive_notify,
1573 .send_tx_power = iwl5000_send_tx_power, 1512 .send_tx_power = iwl5000_send_tx_power,
1574 .update_chain_flags = iwl_update_chain_flags, 1513 .update_chain_flags = iwl_update_chain_flags,
1514 .set_channel_switch = iwl5000_hw_channel_switch,
1575 .apm_ops = { 1515 .apm_ops = {
1576 .init = iwl5000_apm_init, 1516 .init = iwl_apm_init,
1577 .reset = iwl5000_apm_reset,
1578 .stop = iwl_apm_stop, 1517 .stop = iwl_apm_stop,
1579 .config = iwl5000_nic_config, 1518 .config = iwl5000_nic_config,
1580 .set_pwr_src = iwl_set_pwr_src, 1519 .set_pwr_src = iwl_set_pwr_src,
@@ -1621,8 +1560,6 @@ static struct iwl_ops iwl5150_ops = {
1621}; 1560};
1622 1561
1623struct iwl_mod_params iwl50_mod_params = { 1562struct iwl_mod_params iwl50_mod_params = {
1624 .num_of_queues = IWL50_NUM_QUEUES,
1625 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1626 .amsdu_size_8K = 1, 1563 .amsdu_size_8K = 1,
1627 .restart_fw = 1, 1564 .restart_fw = 1,
1628 /* the rest are 0 by default */ 1565 /* the rest are 0 by default */
@@ -1639,10 +1576,14 @@ struct iwl_cfg iwl5300_agn_cfg = {
1639 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1576 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1640 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1577 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1641 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1578 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1579 .num_of_queues = IWL50_NUM_QUEUES,
1580 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1642 .mod_params = &iwl50_mod_params, 1581 .mod_params = &iwl50_mod_params,
1643 .valid_tx_ant = ANT_ABC, 1582 .valid_tx_ant = ANT_ABC,
1644 .valid_rx_ant = ANT_ABC, 1583 .valid_rx_ant = ANT_ABC,
1645 .need_pll_cfg = true, 1584 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1585 .set_l0s = true,
1586 .use_bsm = false,
1646 .ht_greenfield_support = true, 1587 .ht_greenfield_support = true,
1647 .led_compensation = 51, 1588 .led_compensation = 51,
1648 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1589 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -1658,10 +1599,14 @@ struct iwl_cfg iwl5100_bg_cfg = {
1658 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1599 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1659 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1600 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1660 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1601 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1602 .num_of_queues = IWL50_NUM_QUEUES,
1603 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1661 .mod_params = &iwl50_mod_params, 1604 .mod_params = &iwl50_mod_params,
1662 .valid_tx_ant = ANT_B, 1605 .valid_tx_ant = ANT_B,
1663 .valid_rx_ant = ANT_AB, 1606 .valid_rx_ant = ANT_AB,
1664 .need_pll_cfg = true, 1607 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1608 .set_l0s = true,
1609 .use_bsm = false,
1665 .ht_greenfield_support = true, 1610 .ht_greenfield_support = true,
1666 .led_compensation = 51, 1611 .led_compensation = 51,
1667 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1612 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -1677,10 +1622,14 @@ struct iwl_cfg iwl5100_abg_cfg = {
1677 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1622 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1678 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1623 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1679 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1624 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1625 .num_of_queues = IWL50_NUM_QUEUES,
1626 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1680 .mod_params = &iwl50_mod_params, 1627 .mod_params = &iwl50_mod_params,
1681 .valid_tx_ant = ANT_B, 1628 .valid_tx_ant = ANT_B,
1682 .valid_rx_ant = ANT_AB, 1629 .valid_rx_ant = ANT_AB,
1683 .need_pll_cfg = true, 1630 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1631 .set_l0s = true,
1632 .use_bsm = false,
1684 .ht_greenfield_support = true, 1633 .ht_greenfield_support = true,
1685 .led_compensation = 51, 1634 .led_compensation = 51,
1686 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1635 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -1696,10 +1645,14 @@ struct iwl_cfg iwl5100_agn_cfg = {
1696 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1645 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1697 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1646 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1698 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1647 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1648 .num_of_queues = IWL50_NUM_QUEUES,
1649 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1699 .mod_params = &iwl50_mod_params, 1650 .mod_params = &iwl50_mod_params,
1700 .valid_tx_ant = ANT_B, 1651 .valid_tx_ant = ANT_B,
1701 .valid_rx_ant = ANT_AB, 1652 .valid_rx_ant = ANT_AB,
1702 .need_pll_cfg = true, 1653 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1654 .set_l0s = true,
1655 .use_bsm = false,
1703 .ht_greenfield_support = true, 1656 .ht_greenfield_support = true,
1704 .led_compensation = 51, 1657 .led_compensation = 51,
1705 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1658 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -1715,10 +1668,14 @@ struct iwl_cfg iwl5350_agn_cfg = {
1715 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1668 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1716 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 1669 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1717 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 1670 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1671 .num_of_queues = IWL50_NUM_QUEUES,
1672 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1718 .mod_params = &iwl50_mod_params, 1673 .mod_params = &iwl50_mod_params,
1719 .valid_tx_ant = ANT_ABC, 1674 .valid_tx_ant = ANT_ABC,
1720 .valid_rx_ant = ANT_ABC, 1675 .valid_rx_ant = ANT_ABC,
1721 .need_pll_cfg = true, 1676 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1677 .set_l0s = true,
1678 .use_bsm = false,
1722 .ht_greenfield_support = true, 1679 .ht_greenfield_support = true,
1723 .led_compensation = 51, 1680 .led_compensation = 51,
1724 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1681 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -1734,10 +1691,14 @@ struct iwl_cfg iwl5150_agn_cfg = {
1734 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1691 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1735 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 1692 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1736 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 1693 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1694 .num_of_queues = IWL50_NUM_QUEUES,
1695 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1737 .mod_params = &iwl50_mod_params, 1696 .mod_params = &iwl50_mod_params,
1738 .valid_tx_ant = ANT_A, 1697 .valid_tx_ant = ANT_A,
1739 .valid_rx_ant = ANT_AB, 1698 .valid_rx_ant = ANT_AB,
1740 .need_pll_cfg = true, 1699 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1700 .set_l0s = true,
1701 .use_bsm = false,
1741 .ht_greenfield_support = true, 1702 .ht_greenfield_support = true,
1742 .led_compensation = 51, 1703 .led_compensation = 51,
1743 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1704 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index d1f0b0b4ad0c..32466d38d1ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -52,8 +52,8 @@
52#define IWL6050_UCODE_API_MAX 4 52#define IWL6050_UCODE_API_MAX 4
53 53
54/* Lowest firmware API version supported */ 54/* Lowest firmware API version supported */
55#define IWL6000_UCODE_API_MIN 1 55#define IWL6000_UCODE_API_MIN 4
56#define IWL6050_UCODE_API_MIN 1 56#define IWL6050_UCODE_API_MIN 4
57 57
58#define IWL6000_FW_PRE "iwlwifi-6000-" 58#define IWL6000_FW_PRE "iwlwifi-6000-"
59#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 59#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -121,22 +121,24 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
121 .auto_corr_max_cck_mrc = 310, 121 .auto_corr_max_cck_mrc = 310,
122 .nrg_th_cck = 97, 122 .nrg_th_cck = 97,
123 .nrg_th_ofdm = 100, 123 .nrg_th_ofdm = 100,
124
125 .barker_corr_th_min = 190,
126 .barker_corr_th_min_mrc = 390,
127 .nrg_th_cca = 62,
124}; 128};
125 129
126static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) 130static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
127{ 131{
128 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) || 132 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
129 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 133 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
130 IWL_ERR(priv, 134 priv->cfg->num_of_queues =
131 "invalid queues_num, should be between %d and %d\n", 135 priv->cfg->mod_params->num_of_queues;
132 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
133 return -EINVAL;
134 }
135 136
136 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 137 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
137 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 138 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
138 priv->hw_params.scd_bc_tbls_size = 139 priv->hw_params.scd_bc_tbls_size =
139 IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl); 140 priv->cfg->num_of_queues *
141 sizeof(struct iwl5000_scd_bc_tbl);
140 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 142 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
141 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 143 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
142 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 144 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
@@ -170,6 +172,37 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
170 return 0; 172 return 0;
171} 173}
172 174
175static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
176{
177 struct iwl6000_channel_switch_cmd cmd;
178 const struct iwl_channel_info *ch_info;
179 struct iwl_host_cmd hcmd = {
180 .id = REPLY_CHANNEL_SWITCH,
181 .len = sizeof(cmd),
182 .flags = CMD_SIZE_HUGE,
183 .data = &cmd,
184 };
185
186 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
187 priv->active_rxon.channel, channel);
188
189 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
190 cmd.channel = cpu_to_le16(channel);
191 cmd.rxon_flags = priv->active_rxon.flags;
192 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
193 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
194 ch_info = iwl_get_channel_info(priv, priv->band, channel);
195 if (ch_info)
196 cmd.expect_beacon = is_channel_radar(ch_info);
197 else {
198 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
199 priv->active_rxon.channel, channel);
200 return -EFAULT;
201 }
202
203 return iwl_send_cmd_sync(priv, &hcmd);
204}
205
173static struct iwl_lib_ops iwl6000_lib = { 206static struct iwl_lib_ops iwl6000_lib = {
174 .set_hw_params = iwl6000_hw_set_hw_params, 207 .set_hw_params = iwl6000_hw_set_hw_params,
175 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 208 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
@@ -190,9 +223,9 @@ static struct iwl_lib_ops iwl6000_lib = {
190 .alive_notify = iwl5000_alive_notify, 223 .alive_notify = iwl5000_alive_notify,
191 .send_tx_power = iwl5000_send_tx_power, 224 .send_tx_power = iwl5000_send_tx_power,
192 .update_chain_flags = iwl_update_chain_flags, 225 .update_chain_flags = iwl_update_chain_flags,
226 .set_channel_switch = iwl6000_hw_channel_switch,
193 .apm_ops = { 227 .apm_ops = {
194 .init = iwl5000_apm_init, 228 .init = iwl_apm_init,
195 .reset = iwl5000_apm_reset,
196 .stop = iwl_apm_stop, 229 .stop = iwl_apm_stop,
197 .config = iwl6000_nic_config, 230 .config = iwl6000_nic_config,
198 .set_pwr_src = iwl_set_pwr_src, 231 .set_pwr_src = iwl_set_pwr_src,
@@ -231,6 +264,21 @@ static struct iwl_ops iwl6000_ops = {
231 .led = &iwlagn_led_ops, 264 .led = &iwlagn_led_ops,
232}; 265};
233 266
267static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
268 .get_hcmd_size = iwl5000_get_hcmd_size,
269 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
270 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
271 .calc_rssi = iwl5000_calc_rssi,
272};
273
274static struct iwl_ops iwl6050_ops = {
275 .ucode = &iwl5000_ucode,
276 .lib = &iwl6000_lib,
277 .hcmd = &iwl5000_hcmd,
278 .utils = &iwl6050_hcmd_utils,
279 .led = &iwlagn_led_ops,
280};
281
234 282
235/* 283/*
236 * "h": Hybrid configuration, use both internal and external Power Amplifier 284 * "h": Hybrid configuration, use both internal and external Power Amplifier
@@ -245,10 +293,14 @@ struct iwl_cfg iwl6000h_2agn_cfg = {
245 .eeprom_size = OTP_LOW_IMAGE_SIZE, 293 .eeprom_size = OTP_LOW_IMAGE_SIZE,
246 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 294 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
247 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 295 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
296 .num_of_queues = IWL50_NUM_QUEUES,
297 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
248 .mod_params = &iwl50_mod_params, 298 .mod_params = &iwl50_mod_params,
249 .valid_tx_ant = ANT_AB, 299 .valid_tx_ant = ANT_AB,
250 .valid_rx_ant = ANT_AB, 300 .valid_rx_ant = ANT_AB,
251 .need_pll_cfg = false, 301 .pll_cfg_val = 0,
302 .set_l0s = false,
303 .use_bsm = false,
252 .pa_type = IWL_PA_HYBRID, 304 .pa_type = IWL_PA_HYBRID,
253 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 305 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
254 .shadow_ram_support = true, 306 .shadow_ram_support = true,
@@ -257,6 +309,8 @@ struct iwl_cfg iwl6000h_2agn_cfg = {
257 .use_rts_for_ht = true, /* use rts/cts protection */ 309 .use_rts_for_ht = true, /* use rts/cts protection */
258 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 310 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
259 .supports_idle = true, 311 .supports_idle = true,
312 .adv_thermal_throttle = true,
313 .support_ct_kill_exit = true,
260}; 314};
261 315
262struct iwl_cfg iwl6000h_2abg_cfg = { 316struct iwl_cfg iwl6000h_2abg_cfg = {
@@ -269,10 +323,14 @@ struct iwl_cfg iwl6000h_2abg_cfg = {
269 .eeprom_size = OTP_LOW_IMAGE_SIZE, 323 .eeprom_size = OTP_LOW_IMAGE_SIZE,
270 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 324 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
271 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 325 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
326 .num_of_queues = IWL50_NUM_QUEUES,
327 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
272 .mod_params = &iwl50_mod_params, 328 .mod_params = &iwl50_mod_params,
273 .valid_tx_ant = ANT_AB, 329 .valid_tx_ant = ANT_AB,
274 .valid_rx_ant = ANT_AB, 330 .valid_rx_ant = ANT_AB,
275 .need_pll_cfg = false, 331 .pll_cfg_val = 0,
332 .set_l0s = false,
333 .use_bsm = false,
276 .pa_type = IWL_PA_HYBRID, 334 .pa_type = IWL_PA_HYBRID,
277 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 335 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
278 .shadow_ram_support = true, 336 .shadow_ram_support = true,
@@ -280,6 +338,8 @@ struct iwl_cfg iwl6000h_2abg_cfg = {
280 .led_compensation = 51, 338 .led_compensation = 51,
281 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 339 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
282 .supports_idle = true, 340 .supports_idle = true,
341 .adv_thermal_throttle = true,
342 .support_ct_kill_exit = true,
283}; 343};
284 344
285struct iwl_cfg iwl6000h_2bg_cfg = { 345struct iwl_cfg iwl6000h_2bg_cfg = {
@@ -292,10 +352,14 @@ struct iwl_cfg iwl6000h_2bg_cfg = {
292 .eeprom_size = OTP_LOW_IMAGE_SIZE, 352 .eeprom_size = OTP_LOW_IMAGE_SIZE,
293 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 353 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
294 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 354 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
355 .num_of_queues = IWL50_NUM_QUEUES,
356 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
295 .mod_params = &iwl50_mod_params, 357 .mod_params = &iwl50_mod_params,
296 .valid_tx_ant = ANT_AB, 358 .valid_tx_ant = ANT_AB,
297 .valid_rx_ant = ANT_AB, 359 .valid_rx_ant = ANT_AB,
298 .need_pll_cfg = false, 360 .pll_cfg_val = 0,
361 .set_l0s = false,
362 .use_bsm = false,
299 .pa_type = IWL_PA_HYBRID, 363 .pa_type = IWL_PA_HYBRID,
300 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 364 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
301 .shadow_ram_support = true, 365 .shadow_ram_support = true,
@@ -303,6 +367,8 @@ struct iwl_cfg iwl6000h_2bg_cfg = {
303 .led_compensation = 51, 367 .led_compensation = 51,
304 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 368 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
305 .supports_idle = true, 369 .supports_idle = true,
370 .adv_thermal_throttle = true,
371 .support_ct_kill_exit = true,
306}; 372};
307 373
308/* 374/*
@@ -318,10 +384,14 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
318 .eeprom_size = OTP_LOW_IMAGE_SIZE, 384 .eeprom_size = OTP_LOW_IMAGE_SIZE,
319 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 385 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
320 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 386 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
387 .num_of_queues = IWL50_NUM_QUEUES,
388 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
321 .mod_params = &iwl50_mod_params, 389 .mod_params = &iwl50_mod_params,
322 .valid_tx_ant = ANT_BC, 390 .valid_tx_ant = ANT_BC,
323 .valid_rx_ant = ANT_BC, 391 .valid_rx_ant = ANT_BC,
324 .need_pll_cfg = false, 392 .pll_cfg_val = 0,
393 .set_l0s = false,
394 .use_bsm = false,
325 .pa_type = IWL_PA_INTERNAL, 395 .pa_type = IWL_PA_INTERNAL,
326 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 396 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
327 .shadow_ram_support = true, 397 .shadow_ram_support = true,
@@ -330,6 +400,8 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
330 .use_rts_for_ht = true, /* use rts/cts protection */ 400 .use_rts_for_ht = true, /* use rts/cts protection */
331 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 401 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
332 .supports_idle = true, 402 .supports_idle = true,
403 .adv_thermal_throttle = true,
404 .support_ct_kill_exit = true,
333}; 405};
334 406
335struct iwl_cfg iwl6000i_2abg_cfg = { 407struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -342,10 +414,14 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
342 .eeprom_size = OTP_LOW_IMAGE_SIZE, 414 .eeprom_size = OTP_LOW_IMAGE_SIZE,
343 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 415 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
344 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 416 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
417 .num_of_queues = IWL50_NUM_QUEUES,
418 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
345 .mod_params = &iwl50_mod_params, 419 .mod_params = &iwl50_mod_params,
346 .valid_tx_ant = ANT_BC, 420 .valid_tx_ant = ANT_BC,
347 .valid_rx_ant = ANT_BC, 421 .valid_rx_ant = ANT_BC,
348 .need_pll_cfg = false, 422 .pll_cfg_val = 0,
423 .set_l0s = false,
424 .use_bsm = false,
349 .pa_type = IWL_PA_INTERNAL, 425 .pa_type = IWL_PA_INTERNAL,
350 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 426 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
351 .shadow_ram_support = true, 427 .shadow_ram_support = true,
@@ -353,6 +429,8 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
353 .led_compensation = 51, 429 .led_compensation = 51,
354 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 430 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
355 .supports_idle = true, 431 .supports_idle = true,
432 .adv_thermal_throttle = true,
433 .support_ct_kill_exit = true,
356}; 434};
357 435
358struct iwl_cfg iwl6000i_2bg_cfg = { 436struct iwl_cfg iwl6000i_2bg_cfg = {
@@ -365,10 +443,14 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
365 .eeprom_size = OTP_LOW_IMAGE_SIZE, 443 .eeprom_size = OTP_LOW_IMAGE_SIZE,
366 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 444 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
367 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 445 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
446 .num_of_queues = IWL50_NUM_QUEUES,
447 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
368 .mod_params = &iwl50_mod_params, 448 .mod_params = &iwl50_mod_params,
369 .valid_tx_ant = ANT_BC, 449 .valid_tx_ant = ANT_BC,
370 .valid_rx_ant = ANT_BC, 450 .valid_rx_ant = ANT_BC,
371 .need_pll_cfg = false, 451 .pll_cfg_val = 0,
452 .set_l0s = false,
453 .use_bsm = false,
372 .pa_type = IWL_PA_INTERNAL, 454 .pa_type = IWL_PA_INTERNAL,
373 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 455 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
374 .shadow_ram_support = true, 456 .shadow_ram_support = true,
@@ -376,6 +458,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
376 .led_compensation = 51, 458 .led_compensation = 51,
377 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 459 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
378 .supports_idle = true, 460 .supports_idle = true,
461 .adv_thermal_throttle = true,
462 .support_ct_kill_exit = true,
379}; 463};
380 464
381struct iwl_cfg iwl6050_2agn_cfg = { 465struct iwl_cfg iwl6050_2agn_cfg = {
@@ -384,22 +468,28 @@ struct iwl_cfg iwl6050_2agn_cfg = {
384 .ucode_api_max = IWL6050_UCODE_API_MAX, 468 .ucode_api_max = IWL6050_UCODE_API_MAX,
385 .ucode_api_min = IWL6050_UCODE_API_MIN, 469 .ucode_api_min = IWL6050_UCODE_API_MIN,
386 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 470 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
387 .ops = &iwl6000_ops, 471 .ops = &iwl6050_ops,
388 .eeprom_size = OTP_LOW_IMAGE_SIZE, 472 .eeprom_size = OTP_LOW_IMAGE_SIZE,
389 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 473 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
390 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 474 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
475 .num_of_queues = IWL50_NUM_QUEUES,
476 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
391 .mod_params = &iwl50_mod_params, 477 .mod_params = &iwl50_mod_params,
392 .valid_tx_ant = ANT_AB, 478 .valid_tx_ant = ANT_AB,
393 .valid_rx_ant = ANT_AB, 479 .valid_rx_ant = ANT_AB,
394 .need_pll_cfg = false, 480 .pll_cfg_val = 0,
481 .set_l0s = false,
482 .use_bsm = false,
395 .pa_type = IWL_PA_SYSTEM, 483 .pa_type = IWL_PA_SYSTEM,
396 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 484 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
397 .shadow_ram_support = true, 485 .shadow_ram_support = true,
398 .ht_greenfield_support = true, 486 .ht_greenfield_support = true,
399 .led_compensation = 51, 487 .led_compensation = 51,
400 .use_rts_for_ht = true, /* use rts/cts protection */ 488 .use_rts_for_ht = true, /* use rts/cts protection */
401 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 489 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
402 .supports_idle = true, 490 .supports_idle = true,
491 .adv_thermal_throttle = true,
492 .support_ct_kill_exit = true,
403}; 493};
404 494
405struct iwl_cfg iwl6050_2abg_cfg = { 495struct iwl_cfg iwl6050_2abg_cfg = {
@@ -408,21 +498,27 @@ struct iwl_cfg iwl6050_2abg_cfg = {
408 .ucode_api_max = IWL6050_UCODE_API_MAX, 498 .ucode_api_max = IWL6050_UCODE_API_MAX,
409 .ucode_api_min = IWL6050_UCODE_API_MIN, 499 .ucode_api_min = IWL6050_UCODE_API_MIN,
410 .sku = IWL_SKU_A|IWL_SKU_G, 500 .sku = IWL_SKU_A|IWL_SKU_G,
411 .ops = &iwl6000_ops, 501 .ops = &iwl6050_ops,
412 .eeprom_size = OTP_LOW_IMAGE_SIZE, 502 .eeprom_size = OTP_LOW_IMAGE_SIZE,
413 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 503 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
414 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 504 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
505 .num_of_queues = IWL50_NUM_QUEUES,
506 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
415 .mod_params = &iwl50_mod_params, 507 .mod_params = &iwl50_mod_params,
416 .valid_tx_ant = ANT_AB, 508 .valid_tx_ant = ANT_AB,
417 .valid_rx_ant = ANT_AB, 509 .valid_rx_ant = ANT_AB,
418 .need_pll_cfg = false, 510 .pll_cfg_val = 0,
511 .set_l0s = false,
512 .use_bsm = false,
419 .pa_type = IWL_PA_SYSTEM, 513 .pa_type = IWL_PA_SYSTEM,
420 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 514 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
421 .shadow_ram_support = true, 515 .shadow_ram_support = true,
422 .ht_greenfield_support = true, 516 .ht_greenfield_support = true,
423 .led_compensation = 51, 517 .led_compensation = 51,
424 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 518 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
425 .supports_idle = true, 519 .supports_idle = true,
520 .adv_thermal_throttle = true,
521 .support_ct_kill_exit = true,
426}; 522};
427 523
428struct iwl_cfg iwl6000_3agn_cfg = { 524struct iwl_cfg iwl6000_3agn_cfg = {
@@ -435,10 +531,14 @@ struct iwl_cfg iwl6000_3agn_cfg = {
435 .eeprom_size = OTP_LOW_IMAGE_SIZE, 531 .eeprom_size = OTP_LOW_IMAGE_SIZE,
436 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 532 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
437 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 533 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
534 .num_of_queues = IWL50_NUM_QUEUES,
535 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
438 .mod_params = &iwl50_mod_params, 536 .mod_params = &iwl50_mod_params,
439 .valid_tx_ant = ANT_ABC, 537 .valid_tx_ant = ANT_ABC,
440 .valid_rx_ant = ANT_ABC, 538 .valid_rx_ant = ANT_ABC,
441 .need_pll_cfg = false, 539 .pll_cfg_val = 0,
540 .set_l0s = false,
541 .use_bsm = false,
442 .pa_type = IWL_PA_SYSTEM, 542 .pa_type = IWL_PA_SYSTEM,
443 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 543 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
444 .shadow_ram_support = true, 544 .shadow_ram_support = true,
@@ -447,6 +547,8 @@ struct iwl_cfg iwl6000_3agn_cfg = {
447 .use_rts_for_ht = true, /* use rts/cts protection */ 547 .use_rts_for_ht = true, /* use rts/cts protection */
448 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 548 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
449 .supports_idle = true, 549 .supports_idle = true,
550 .adv_thermal_throttle = true,
551 .support_ct_kill_exit = true,
450}; 552};
451 553
452struct iwl_cfg iwl6050_3agn_cfg = { 554struct iwl_cfg iwl6050_3agn_cfg = {
@@ -455,22 +557,28 @@ struct iwl_cfg iwl6050_3agn_cfg = {
455 .ucode_api_max = IWL6050_UCODE_API_MAX, 557 .ucode_api_max = IWL6050_UCODE_API_MAX,
456 .ucode_api_min = IWL6050_UCODE_API_MIN, 558 .ucode_api_min = IWL6050_UCODE_API_MIN,
457 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 559 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
458 .ops = &iwl6000_ops, 560 .ops = &iwl6050_ops,
459 .eeprom_size = OTP_LOW_IMAGE_SIZE, 561 .eeprom_size = OTP_LOW_IMAGE_SIZE,
460 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 562 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
461 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 563 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
564 .num_of_queues = IWL50_NUM_QUEUES,
565 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
462 .mod_params = &iwl50_mod_params, 566 .mod_params = &iwl50_mod_params,
463 .valid_tx_ant = ANT_ABC, 567 .valid_tx_ant = ANT_ABC,
464 .valid_rx_ant = ANT_ABC, 568 .valid_rx_ant = ANT_ABC,
465 .need_pll_cfg = false, 569 .pll_cfg_val = 0,
570 .set_l0s = false,
571 .use_bsm = false,
466 .pa_type = IWL_PA_SYSTEM, 572 .pa_type = IWL_PA_SYSTEM,
467 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 573 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
468 .shadow_ram_support = true, 574 .shadow_ram_support = true,
469 .ht_greenfield_support = true, 575 .ht_greenfield_support = true,
470 .led_compensation = 51, 576 .led_compensation = 51,
471 .use_rts_for_ht = true, /* use rts/cts protection */ 577 .use_rts_for_ht = true, /* use rts/cts protection */
472 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 578 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
473 .supports_idle = true, 579 .supports_idle = true,
580 .adv_thermal_throttle = true,
581 .support_ct_kill_exit = true,
474}; 582};
475 583
476MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 584MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index a07be29cc5e5..27d4ece4d467 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -75,106 +75,6 @@ static const u8 ant_toggle_lookup[] = {
75 /*ANT_ABC -> */ ANT_ABC, 75 /*ANT_ABC -> */ ANT_ABC,
76}; 76};
77 77
78/**
79 * struct iwl_rate_scale_data -- tx success history for one rate
80 */
81struct iwl_rate_scale_data {
82 u64 data; /* bitmap of successful frames */
83 s32 success_counter; /* number of frames successful */
84 s32 success_ratio; /* per-cent * 128 */
85 s32 counter; /* number of frames attempted */
86 s32 average_tpt; /* success ratio * expected throughput */
87 unsigned long stamp;
88};
89
90/**
91 * struct iwl_scale_tbl_info -- tx params and success history for all rates
92 *
93 * There are two of these in struct iwl_lq_sta,
94 * one for "active", and one for "search".
95 */
96struct iwl_scale_tbl_info {
97 enum iwl_table_type lq_type;
98 u8 ant_type;
99 u8 is_SGI; /* 1 = short guard interval */
100 u8 is_ht40; /* 1 = 40 MHz channel width */
101 u8 is_dup; /* 1 = duplicated data streams */
102 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
103 u8 max_search; /* maximun number of tables we can search */
104 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
105 u32 current_rate; /* rate_n_flags, uCode API format */
106 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
107};
108
109struct iwl_traffic_load {
110 unsigned long time_stamp; /* age of the oldest statistics */
111 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
112 * slice */
113 u32 total; /* total num of packets during the
114 * last TID_MAX_TIME_DIFF */
115 u8 queue_count; /* number of queues that has
116 * been used since the last cleanup */
117 u8 head; /* start of the circular buffer */
118};
119
120/**
121 * struct iwl_lq_sta -- driver's rate scaling private structure
122 *
123 * Pointer to this gets passed back and forth between driver and mac80211.
124 */
125struct iwl_lq_sta {
126 u8 active_tbl; /* index of active table, range 0-1 */
127 u8 enable_counter; /* indicates HT mode */
128 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
129 u8 search_better_tbl; /* 1: currently trying alternate mode */
130 s32 last_tpt;
131
132 /* The following determine when to search for a new mode */
133 u32 table_count_limit;
134 u32 max_failure_limit; /* # failed frames before new search */
135 u32 max_success_limit; /* # successful frames before new search */
136 u32 table_count;
137 u32 total_failed; /* total failed frames, any/all rates */
138 u32 total_success; /* total successful frames, any/all rates */
139 u64 flush_timer; /* time staying in mode before new search */
140
141 u8 action_counter; /* # mode-switch actions tried */
142 u8 is_green;
143 u8 is_dup;
144 enum ieee80211_band band;
145 u8 ibss_sta_added;
146
147 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
148 u32 supp_rates;
149 u16 active_legacy_rate;
150 u16 active_siso_rate;
151 u16 active_mimo2_rate;
152 u16 active_mimo3_rate;
153 u16 active_rate_basic;
154 s8 max_rate_idx; /* Max rate set by user */
155 u8 missed_rate_counter;
156
157 struct iwl_link_quality_cmd lq;
158 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
159 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
160 u8 tx_agg_tid_en;
161#ifdef CONFIG_MAC80211_DEBUGFS
162 struct dentry *rs_sta_dbgfs_scale_table_file;
163 struct dentry *rs_sta_dbgfs_stats_table_file;
164 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
165 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
166 u32 dbg_fixed_rate;
167#endif
168 struct iwl_priv *drv;
169
170 /* used to be in sta_info */
171 int last_txrate_idx;
172 /* last tx rate_n_flags */
173 u32 last_rate_n_flags;
174 /* packets destined for this STA are aggregated */
175 u8 is_agg;
176};
177
178static void rs_rate_scale_perform(struct iwl_priv *priv, 78static void rs_rate_scale_perform(struct iwl_priv *priv,
179 struct sk_buff *skb, 79 struct sk_buff *skb,
180 struct ieee80211_sta *sta, 80 struct ieee80211_sta *sta,
@@ -2575,19 +2475,17 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2575 gfp_t gfp) 2475 gfp_t gfp)
2576{ 2476{
2577 struct iwl_lq_sta *lq_sta; 2477 struct iwl_lq_sta *lq_sta;
2478 struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2578 struct iwl_priv *priv; 2479 struct iwl_priv *priv;
2579 int i, j; 2480 int i, j;
2580 2481
2581 priv = (struct iwl_priv *)priv_rate; 2482 priv = (struct iwl_priv *)priv_rate;
2582 IWL_DEBUG_RATE(priv, "create station rate scale window\n"); 2483 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2583 2484
2584 lq_sta = kzalloc(sizeof(struct iwl_lq_sta), gfp); 2485 lq_sta = &sta_priv->lq_sta;
2585 2486
2586 if (lq_sta == NULL)
2587 return NULL;
2588 lq_sta->lq.sta_id = 0xff; 2487 lq_sta->lq.sta_id = 0xff;
2589 2488
2590
2591 for (j = 0; j < LQ_SIZE; j++) 2489 for (j = 0; j < LQ_SIZE; j++)
2592 for (i = 0; i < IWL_RATE_COUNT; i++) 2490 for (i = 0; i < IWL_RATE_COUNT; i++)
2593 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); 2491 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
@@ -2819,11 +2717,9 @@ static void rs_free(void *priv_rate)
2819static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta, 2717static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2820 void *priv_sta) 2718 void *priv_sta)
2821{ 2719{
2822 struct iwl_lq_sta *lq_sta = priv_sta;
2823 struct iwl_priv *priv __maybe_unused = priv_r; 2720 struct iwl_priv *priv __maybe_unused = priv_r;
2824 2721
2825 IWL_DEBUG_RATE(priv, "enter\n"); 2722 IWL_DEBUG_RATE(priv, "enter\n");
2826 kfree(lq_sta);
2827 IWL_DEBUG_RATE(priv, "leave\n"); 2723 IWL_DEBUG_RATE(priv, "leave\n");
2828} 2724}
2829 2725
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 9fac530cfb7e..affc0c5a2f2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -54,6 +54,7 @@ struct iwl3945_rate_info {
54 u8 prev_table_rs; /* prev in rate table cmd */ 54 u8 prev_table_rs; /* prev in rate table cmd */
55}; 55};
56 56
57
57/* 58/*
58 * These serve as indexes into 59 * These serve as indexes into
59 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; 60 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
@@ -335,6 +336,106 @@ struct iwl_rate_mcs_info {
335 char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; 336 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
336}; 337};
337 338
339/**
340 * struct iwl_rate_scale_data -- tx success history for one rate
341 */
342struct iwl_rate_scale_data {
343 u64 data; /* bitmap of successful frames */
344 s32 success_counter; /* number of frames successful */
345 s32 success_ratio; /* per-cent * 128 */
346 s32 counter; /* number of frames attempted */
347 s32 average_tpt; /* success ratio * expected throughput */
348 unsigned long stamp;
349};
350
351/**
352 * struct iwl_scale_tbl_info -- tx params and success history for all rates
353 *
354 * There are two of these in struct iwl_lq_sta,
355 * one for "active", and one for "search".
356 */
357struct iwl_scale_tbl_info {
358 enum iwl_table_type lq_type;
359 u8 ant_type;
360 u8 is_SGI; /* 1 = short guard interval */
361 u8 is_ht40; /* 1 = 40 MHz channel width */
362 u8 is_dup; /* 1 = duplicated data streams */
363 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
364 u8 max_search; /* maximun number of tables we can search */
365 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
366 u32 current_rate; /* rate_n_flags, uCode API format */
367 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
368};
369
370struct iwl_traffic_load {
371 unsigned long time_stamp; /* age of the oldest statistics */
372 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
373 * slice */
374 u32 total; /* total num of packets during the
375 * last TID_MAX_TIME_DIFF */
376 u8 queue_count; /* number of queues that has
377 * been used since the last cleanup */
378 u8 head; /* start of the circular buffer */
379};
380
381/**
382 * struct iwl_lq_sta -- driver's rate scaling private structure
383 *
384 * Pointer to this gets passed back and forth between driver and mac80211.
385 */
386struct iwl_lq_sta {
387 u8 active_tbl; /* index of active table, range 0-1 */
388 u8 enable_counter; /* indicates HT mode */
389 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
390 u8 search_better_tbl; /* 1: currently trying alternate mode */
391 s32 last_tpt;
392
393 /* The following determine when to search for a new mode */
394 u32 table_count_limit;
395 u32 max_failure_limit; /* # failed frames before new search */
396 u32 max_success_limit; /* # successful frames before new search */
397 u32 table_count;
398 u32 total_failed; /* total failed frames, any/all rates */
399 u32 total_success; /* total successful frames, any/all rates */
400 u64 flush_timer; /* time staying in mode before new search */
401
402 u8 action_counter; /* # mode-switch actions tried */
403 u8 is_green;
404 u8 is_dup;
405 enum ieee80211_band band;
406 u8 ibss_sta_added;
407
408 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
409 u32 supp_rates;
410 u16 active_legacy_rate;
411 u16 active_siso_rate;
412 u16 active_mimo2_rate;
413 u16 active_mimo3_rate;
414 u16 active_rate_basic;
415 s8 max_rate_idx; /* Max rate set by user */
416 u8 missed_rate_counter;
417
418 struct iwl_link_quality_cmd lq;
419 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
420 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
421 u8 tx_agg_tid_en;
422#ifdef CONFIG_MAC80211_DEBUGFS
423 struct dentry *rs_sta_dbgfs_scale_table_file;
424 struct dentry *rs_sta_dbgfs_stats_table_file;
425 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
426 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
427 u32 dbg_fixed_rate;
428#endif
429 struct iwl_priv *drv;
430
431 /* used to be in sta_info */
432 int last_txrate_idx;
433 /* last tx rate_n_flags */
434 u32 last_rate_n_flags;
435 /* packets destined for this STA are aggregated */
436 u8 is_agg;
437};
438
338static inline u8 num_of_ant(u8 mask) 439static inline u8 num_of_ant(u8 mask)
339{ 440{
340 return !!((mask) & ANT_A) + 441 return !!((mask) & ANT_A) +
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 8d7bc38fe005..fa1672e99e4b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -524,7 +524,7 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv,
524static void iwl_rx_reply_alive(struct iwl_priv *priv, 524static void iwl_rx_reply_alive(struct iwl_priv *priv,
525 struct iwl_rx_mem_buffer *rxb) 525 struct iwl_rx_mem_buffer *rxb)
526{ 526{
527 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 527 struct iwl_rx_packet *pkt = rxb_addr(rxb);
528 struct iwl_alive_resp *palive; 528 struct iwl_alive_resp *palive;
529 struct delayed_work *pwork; 529 struct delayed_work *pwork;
530 530
@@ -610,7 +610,7 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
610 struct iwl_rx_mem_buffer *rxb) 610 struct iwl_rx_mem_buffer *rxb)
611{ 611{
612#ifdef CONFIG_IWLWIFI_DEBUG 612#ifdef CONFIG_IWLWIFI_DEBUG
613 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 613 struct iwl_rx_packet *pkt = rxb_addr(rxb);
614 struct iwl4965_beacon_notif *beacon = 614 struct iwl4965_beacon_notif *beacon =
615 (struct iwl4965_beacon_notif *)pkt->u.raw; 615 (struct iwl4965_beacon_notif *)pkt->u.raw;
616 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 616 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
@@ -634,7 +634,7 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
634static void iwl_rx_card_state_notif(struct iwl_priv *priv, 634static void iwl_rx_card_state_notif(struct iwl_priv *priv,
635 struct iwl_rx_mem_buffer *rxb) 635 struct iwl_rx_mem_buffer *rxb)
636{ 636{
637 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 637 struct iwl_rx_packet *pkt = rxb_addr(rxb);
638 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 638 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
639 unsigned long status = priv->status; 639 unsigned long status = priv->status;
640 640
@@ -769,7 +769,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
769 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); 769 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
770 770
771 /* calculate total frames need to be restock after handling RX */ 771 /* calculate total frames need to be restock after handling RX */
772 total_empty = r - priv->rxq.write_actual; 772 total_empty = r - rxq->write_actual;
773 if (total_empty < 0) 773 if (total_empty < 0)
774 total_empty += RX_QUEUE_SIZE; 774 total_empty += RX_QUEUE_SIZE;
775 775
@@ -786,10 +786,10 @@ void iwl_rx_handle(struct iwl_priv *priv)
786 786
787 rxq->queue[i] = NULL; 787 rxq->queue[i] = NULL;
788 788
789 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, 789 pci_unmap_page(priv->pci_dev, rxb->page_dma,
790 priv->hw_params.rx_buf_size + 256, 790 PAGE_SIZE << priv->hw_params.rx_page_order,
791 PCI_DMA_FROMDEVICE); 791 PCI_DMA_FROMDEVICE);
792 pkt = (struct iwl_rx_packet *)rxb->skb->data; 792 pkt = rxb_addr(rxb);
793 793
794 trace_iwlwifi_dev_rx(priv, pkt, 794 trace_iwlwifi_dev_rx(priv, pkt,
795 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 795 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
@@ -814,8 +814,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
814 if (priv->rx_handlers[pkt->hdr.cmd]) { 814 if (priv->rx_handlers[pkt->hdr.cmd]) {
815 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, 815 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
816 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 816 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
817 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
818 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 817 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
818 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
819 } else { 819 } else {
820 /* No handling needed */ 820 /* No handling needed */
821 IWL_DEBUG_RX(priv, 821 IWL_DEBUG_RX(priv,
@@ -824,35 +824,45 @@ void iwl_rx_handle(struct iwl_priv *priv)
824 pkt->hdr.cmd); 824 pkt->hdr.cmd);
825 } 825 }
826 826
827 /*
828 * XXX: After here, we should always check rxb->page
829 * against NULL before touching it or its virtual
830 * memory (pkt). Because some rx_handler might have
831 * already taken or freed the pages.
832 */
833
827 if (reclaim) { 834 if (reclaim) {
828 /* Invoke any callbacks, transfer the skb to caller, and 835 /* Invoke any callbacks, transfer the buffer to caller,
829 * fire off the (possibly) blocking iwl_send_cmd() 836 * and fire off the (possibly) blocking iwl_send_cmd()
830 * as we reclaim the driver command queue */ 837 * as we reclaim the driver command queue */
831 if (rxb && rxb->skb) 838 if (rxb->page)
832 iwl_tx_cmd_complete(priv, rxb); 839 iwl_tx_cmd_complete(priv, rxb);
833 else 840 else
834 IWL_WARN(priv, "Claim null rxb?\n"); 841 IWL_WARN(priv, "Claim null rxb?\n");
835 } 842 }
836 843
837 /* For now we just don't re-use anything. We can tweak this 844 /* Reuse the page if possible. For notification packets and
838 * later to try and re-use notification packets and SKBs that 845 * SKBs that fail to Rx correctly, add them back into the
839 * fail to Rx correctly */ 846 * rx_free list for reuse later. */
840 if (rxb->skb != NULL) {
841 priv->alloc_rxb_skb--;
842 dev_kfree_skb_any(rxb->skb);
843 rxb->skb = NULL;
844 }
845
846 spin_lock_irqsave(&rxq->lock, flags); 847 spin_lock_irqsave(&rxq->lock, flags);
847 list_add_tail(&rxb->list, &priv->rxq.rx_used); 848 if (rxb->page != NULL) {
849 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
850 0, PAGE_SIZE << priv->hw_params.rx_page_order,
851 PCI_DMA_FROMDEVICE);
852 list_add_tail(&rxb->list, &rxq->rx_free);
853 rxq->free_count++;
854 } else
855 list_add_tail(&rxb->list, &rxq->rx_used);
856
848 spin_unlock_irqrestore(&rxq->lock, flags); 857 spin_unlock_irqrestore(&rxq->lock, flags);
858
849 i = (i + 1) & RX_QUEUE_MASK; 859 i = (i + 1) & RX_QUEUE_MASK;
850 /* If there are a lot of unused frames, 860 /* If there are a lot of unused frames,
851 * restock the Rx queue so ucode wont assert. */ 861 * restock the Rx queue so ucode wont assert. */
852 if (fill_rx) { 862 if (fill_rx) {
853 count++; 863 count++;
854 if (count >= 8) { 864 if (count >= 8) {
855 priv->rxq.read = i; 865 rxq->read = i;
856 iwl_rx_replenish_now(priv); 866 iwl_rx_replenish_now(priv);
857 count = 0; 867 count = 0;
858 } 868 }
@@ -860,7 +870,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
860 } 870 }
861 871
862 /* Backtrack one entry */ 872 /* Backtrack one entry */
863 priv->rxq.read = i; 873 rxq->read = i;
864 if (fill_rx) 874 if (fill_rx)
865 iwl_rx_replenish_now(priv); 875 iwl_rx_replenish_now(priv);
866 else 876 else
@@ -907,6 +917,8 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
907 } 917 }
908#endif 918#endif
909 919
920 spin_unlock_irqrestore(&priv->lock, flags);
921
910 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 922 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
911 * atomic, make sure that inta covers all the interrupts that 923 * atomic, make sure that inta covers all the interrupts that
912 * we've discovered, even if FH interrupt came in just after 924 * we've discovered, even if FH interrupt came in just after
@@ -928,8 +940,6 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
928 940
929 handled |= CSR_INT_BIT_HW_ERR; 941 handled |= CSR_INT_BIT_HW_ERR;
930 942
931 spin_unlock_irqrestore(&priv->lock, flags);
932
933 return; 943 return;
934 } 944 }
935 945
@@ -1019,6 +1029,7 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1019 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1029 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1020 iwl_rx_handle(priv); 1030 iwl_rx_handle(priv);
1021 priv->isr_stats.rx++; 1031 priv->isr_stats.rx++;
1032 iwl_leds_background(priv);
1022 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1033 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1023 } 1034 }
1024 1035
@@ -1056,7 +1067,6 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1056 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1067 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1057 } 1068 }
1058#endif 1069#endif
1059 spin_unlock_irqrestore(&priv->lock, flags);
1060} 1070}
1061 1071
1062/* tasklet for iwlagn interrupt */ 1072/* tasklet for iwlagn interrupt */
@@ -1086,6 +1096,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1086 inta, inta_mask); 1096 inta, inta_mask);
1087 } 1097 }
1088#endif 1098#endif
1099
1100 spin_unlock_irqrestore(&priv->lock, flags);
1101
1089 /* saved interrupt in inta variable now we can reset priv->inta */ 1102 /* saved interrupt in inta variable now we can reset priv->inta */
1090 priv->inta = 0; 1103 priv->inta = 0;
1091 1104
@@ -1101,8 +1114,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1101 1114
1102 handled |= CSR_INT_BIT_HW_ERR; 1115 handled |= CSR_INT_BIT_HW_ERR;
1103 1116
1104 spin_unlock_irqrestore(&priv->lock, flags);
1105
1106 return; 1117 return;
1107 } 1118 }
1108 1119
@@ -1220,6 +1231,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1220 CSR_INT_PERIODIC_ENA); 1231 CSR_INT_PERIODIC_ENA);
1221 1232
1222 priv->isr_stats.rx++; 1233 priv->isr_stats.rx++;
1234 iwl_leds_background(priv);
1223 } 1235 }
1224 1236
1225 if (inta & CSR_INT_BIT_FH_TX) { 1237 if (inta & CSR_INT_BIT_FH_TX) {
@@ -1242,14 +1254,10 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1242 inta & ~priv->inta_mask); 1254 inta & ~priv->inta_mask);
1243 } 1255 }
1244 1256
1245
1246 /* Re-enable all interrupts */ 1257 /* Re-enable all interrupts */
1247 /* only Re-enable if diabled by irq */ 1258 /* only Re-enable if diabled by irq */
1248 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1259 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1249 iwl_enable_interrupts(priv); 1260 iwl_enable_interrupts(priv);
1250
1251 spin_unlock_irqrestore(&priv->lock, flags);
1252
1253} 1261}
1254 1262
1255 1263
@@ -1899,11 +1907,9 @@ static void __iwl_down(struct iwl_priv *priv)
1899 1907
1900 udelay(5); 1908 udelay(5);
1901 1909
1902 /* FIXME: apm_ops.suspend(priv) */ 1910 /* Stop the device, and put it in low power state */
1903 if (exit_pending) 1911 priv->cfg->ops->lib->apm_ops.stop(priv);
1904 priv->cfg->ops->lib->apm_ops.stop(priv); 1912
1905 else
1906 priv->cfg->ops->lib->apm_ops.reset(priv);
1907 exit: 1913 exit:
1908 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 1914 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
1909 1915
@@ -2290,6 +2296,69 @@ void iwl_post_associate(struct iwl_priv *priv)
2290 2296
2291#define UCODE_READY_TIMEOUT (4 * HZ) 2297#define UCODE_READY_TIMEOUT (4 * HZ)
2292 2298
2299/*
2300 * Not a mac80211 entry point function, but it fits in with all the
2301 * other mac80211 functions grouped here.
2302 */
2303static int iwl_setup_mac(struct iwl_priv *priv)
2304{
2305 int ret;
2306 struct ieee80211_hw *hw = priv->hw;
2307 hw->rate_control_algorithm = "iwl-agn-rs";
2308
2309 /* Tell mac80211 our characteristics */
2310 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2311 IEEE80211_HW_NOISE_DBM |
2312 IEEE80211_HW_AMPDU_AGGREGATION |
2313 IEEE80211_HW_SPECTRUM_MGMT;
2314
2315 if (!priv->cfg->broken_powersave)
2316 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2317 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2318
2319 hw->sta_data_size = sizeof(struct iwl_station_priv);
2320 hw->wiphy->interface_modes =
2321 BIT(NL80211_IFTYPE_STATION) |
2322 BIT(NL80211_IFTYPE_ADHOC);
2323
2324 hw->wiphy->custom_regulatory = true;
2325
2326 /* Firmware does not support this */
2327 hw->wiphy->disable_beacon_hints = true;
2328
2329 /*
2330 * For now, disable PS by default because it affects
2331 * RX performance significantly.
2332 */
2333 hw->wiphy->ps_default = false;
2334
2335 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2336 /* we create the 802.11 header and a zero-length SSID element */
2337 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
2338
2339 /* Default value; 4 EDCA QOS priorities */
2340 hw->queues = 4;
2341
2342 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2343
2344 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2345 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2346 &priv->bands[IEEE80211_BAND_2GHZ];
2347 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2348 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2349 &priv->bands[IEEE80211_BAND_5GHZ];
2350
2351 ret = ieee80211_register_hw(priv->hw);
2352 if (ret) {
2353 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2354 return ret;
2355 }
2356 priv->mac80211_registered = 1;
2357
2358 return 0;
2359}
2360
2361
2293static int iwl_mac_start(struct ieee80211_hw *hw) 2362static int iwl_mac_start(struct ieee80211_hw *hw)
2294{ 2363{
2295 struct iwl_priv *priv = hw->priv; 2364 struct iwl_priv *priv = hw->priv;
@@ -3187,6 +3256,15 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3187 iwl_down(priv); 3256 iwl_down(priv);
3188 } 3257 }
3189 3258
3259 /*
3260 * Make sure device is reset to low power before unloading driver.
3261 * This may be redundant with iwl_down(), but there are paths to
3262 * run iwl_down() without calling apm_ops.stop(), and there are
3263 * paths to avoid running iwl_down() at all before leaving driver.
3264 * This (inexpensive) call *makes sure* device is reset.
3265 */
3266 priv->cfg->ops->lib->apm_ops.stop(priv);
3267
3190 iwl_tt_exit(priv); 3268 iwl_tt_exit(priv);
3191 3269
3192 /* make sure we flush any pending irq or 3270 /* make sure we flush any pending irq or
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 69a80d7c2e44..1f801eb9fbff 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -447,11 +447,11 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
447 cpu_to_le16((u16)data->nrg_th_ofdm); 447 cpu_to_le16((u16)data->nrg_th_ofdm);
448 448
449 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = 449 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
450 cpu_to_le16(190); 450 cpu_to_le16(data->barker_corr_th_min);
451 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = 451 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
452 cpu_to_le16(390); 452 cpu_to_le16(data->barker_corr_th_min_mrc);
453 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] = 453 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
454 cpu_to_le16(62); 454 cpu_to_le16(data->nrg_th_cca);
455 455
456 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", 456 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
457 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, 457 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
@@ -524,6 +524,9 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
524 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; 524 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
525 data->nrg_th_cck = ranges->nrg_th_cck; 525 data->nrg_th_cck = ranges->nrg_th_cck;
526 data->nrg_th_ofdm = ranges->nrg_th_ofdm; 526 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
527 data->barker_corr_th_min = ranges->barker_corr_th_min;
528 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
529 data->nrg_th_cca = ranges->nrg_th_cca;
527 530
528 data->last_bad_plcp_cnt_ofdm = 0; 531 data->last_bad_plcp_cnt_ofdm = 0;
529 data->last_fa_cnt_ofdm = 0; 532 data->last_fa_cnt_ofdm = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index ba3e4c837d87..954bad60355d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,11 +109,12 @@ enum {
109 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ 109 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
110 110
111 /* WiMAX coexistence */ 111 /* WiMAX coexistence */
112 COEX_PRIORITY_TABLE_CMD = 0x5a, /*5000 only */ 112 COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
113 COEX_MEDIUM_NOTIFICATION = 0x5b, 113 COEX_MEDIUM_NOTIFICATION = 0x5b,
114 COEX_EVENT_CMD = 0x5c, 114 COEX_EVENT_CMD = 0x5c,
115 115
116 /* Calibration */ 116 /* Calibration */
117 TEMPERATURE_NOTIFICATION = 0x62,
117 CALIBRATION_CFG_CMD = 0x65, 118 CALIBRATION_CFG_CMD = 0x65,
118 CALIBRATION_RES_NOTIFICATION = 0x66, 119 CALIBRATION_RES_NOTIFICATION = 0x66,
119 CALIBRATION_COMPLETE_NOTIFICATION = 0x67, 120 CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
@@ -353,6 +354,9 @@ struct iwl3945_power_per_rate {
353#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 354#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
354#define POWER_TABLE_CCK_ENTRY 32 355#define POWER_TABLE_CCK_ENTRY 32
355 356
357#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
358#define IWL_PWR_CCK_ENTRIES 2
359
356/** 360/**
357 * union iwl4965_tx_power_dual_stream 361 * union iwl4965_tx_power_dual_stream
358 * 362 *
@@ -803,7 +807,7 @@ struct iwl3945_channel_switch_cmd {
803 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 807 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
804} __attribute__ ((packed)); 808} __attribute__ ((packed));
805 809
806struct iwl_channel_switch_cmd { 810struct iwl4965_channel_switch_cmd {
807 u8 band; 811 u8 band;
808 u8 expect_beacon; 812 u8 expect_beacon;
809 __le16 channel; 813 __le16 channel;
@@ -813,6 +817,48 @@ struct iwl_channel_switch_cmd {
813 struct iwl4965_tx_power_db tx_power; 817 struct iwl4965_tx_power_db tx_power;
814} __attribute__ ((packed)); 818} __attribute__ ((packed));
815 819
820/**
821 * struct iwl5000_channel_switch_cmd
822 * @band: 0- 5.2GHz, 1- 2.4GHz
823 * @expect_beacon: 0- resume transmits after channel switch
824 * 1- wait for beacon to resume transmits
825 * @channel: new channel number
826 * @rxon_flags: Rx on flags
827 * @rxon_filter_flags: filtering parameters
828 * @switch_time: switch time in extended beacon format
829 * @reserved: reserved bytes
830 */
831struct iwl5000_channel_switch_cmd {
832 u8 band;
833 u8 expect_beacon;
834 __le16 channel;
835 __le32 rxon_flags;
836 __le32 rxon_filter_flags;
837 __le32 switch_time;
838 __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
839} __attribute__ ((packed));
840
841/**
842 * struct iwl6000_channel_switch_cmd
843 * @band: 0- 5.2GHz, 1- 2.4GHz
844 * @expect_beacon: 0- resume transmits after channel switch
845 * 1- wait for beacon to resume transmits
846 * @channel: new channel number
847 * @rxon_flags: Rx on flags
848 * @rxon_filter_flags: filtering parameters
849 * @switch_time: switch time in extended beacon format
850 * @reserved: reserved bytes
851 */
852struct iwl6000_channel_switch_cmd {
853 u8 band;
854 u8 expect_beacon;
855 __le16 channel;
856 __le32 rxon_flags;
857 __le32 rxon_filter_flags;
858 __le32 switch_time;
859 __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
860} __attribute__ ((packed));
861
816/* 862/*
817 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) 863 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
818 */ 864 */
@@ -2172,6 +2218,19 @@ struct iwl_link_quality_cmd {
2172 __le32 reserved2; 2218 __le32 reserved2;
2173} __attribute__ ((packed)); 2219} __attribute__ ((packed));
2174 2220
2221#define BT_COEX_DISABLE (0x0)
2222#define BT_COEX_MODE_2W (0x1)
2223#define BT_COEX_MODE_3W (0x2)
2224#define BT_COEX_MODE_4W (0x3)
2225
2226#define BT_LEAD_TIME_MIN (0x0)
2227#define BT_LEAD_TIME_DEF (0x1E)
2228#define BT_LEAD_TIME_MAX (0xFF)
2229
2230#define BT_MAX_KILL_MIN (0x1)
2231#define BT_MAX_KILL_DEF (0x5)
2232#define BT_MAX_KILL_MAX (0xFF)
2233
2175/* 2234/*
2176 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 2235 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
2177 * 2236 *
@@ -3247,12 +3306,6 @@ struct iwl_missed_beacon_notif {
3247 * Lower values mean higher energy; this means making sure that the value 3306 * Lower values mean higher energy; this means making sure that the value
3248 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". 3307 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
3249 * 3308 *
3250 * Driver should set the following entries to fixed values:
3251 *
3252 * HD_MIN_ENERGY_OFDM_DET_INDEX 100
3253 * HD_BARKER_CORR_TH_ADD_MIN_INDEX 190
3254 * HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX 390
3255 * HD_OFDM_ENERGY_TH_IN_INDEX 62
3256 */ 3309 */
3257 3310
3258/* 3311/*
@@ -3505,6 +3558,16 @@ struct iwl_wimax_coex_cmd {
3505 *****************************************************************************/ 3558 *****************************************************************************/
3506 3559
3507struct iwl_rx_packet { 3560struct iwl_rx_packet {
3561 /*
3562 * The first 4 bytes of the RX frame header contain both the RX frame
3563 * size and some flags.
3564 * Bit fields:
3565 * 31: flag flush RB request
3566 * 30: flag ignore TC (terminal counter) request
3567 * 29: flag fast IRQ request
3568 * 28-14: Reserved
3569 * 13-00: RX frame size
3570 */
3508 __le32 len_n_flags; 3571 __le32 len_n_flags;
3509 struct iwl_cmd_header hdr; 3572 struct iwl_cmd_header hdr;
3510 union { 3573 union {
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index dc7fd87bed98..d2b56baf98fb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -604,6 +604,23 @@ void iwlcore_free_geos(struct iwl_priv *priv)
604} 604}
605EXPORT_SYMBOL(iwlcore_free_geos); 605EXPORT_SYMBOL(iwlcore_free_geos);
606 606
607/*
608 * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
609 * function.
610 */
611void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
612 __le32 *tx_flags)
613{
614 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
615 *tx_flags |= TX_CMD_FLG_RTS_MSK;
616 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
617 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
618 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
619 *tx_flags |= TX_CMD_FLG_CTS_MSK;
620 }
621}
622EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
623
607static bool is_single_rx_stream(struct iwl_priv *priv) 624static bool is_single_rx_stream(struct iwl_priv *priv)
608{ 625{
609 return !priv->current_ht_config.is_ht || 626 return !priv->current_ht_config.is_ht ||
@@ -1264,13 +1281,18 @@ static void iwl_set_rate(struct iwl_priv *priv)
1264 1281
1265void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1282void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1266{ 1283{
1267 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1284 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1268 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; 1285 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1269 struct iwl_csa_notification *csa = &(pkt->u.csa_notif); 1286 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
1270 IWL_DEBUG_11H(priv, "CSA notif: channel %d, status %d\n", 1287
1271 le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); 1288 if (!le32_to_cpu(csa->status)) {
1272 rxon->channel = csa->channel; 1289 rxon->channel = csa->channel;
1273 priv->staging_rxon.channel = csa->channel; 1290 priv->staging_rxon.channel = csa->channel;
1291 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
1292 le16_to_cpu(csa->channel));
1293 } else
1294 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
1295 le16_to_cpu(csa->channel));
1274} 1296}
1275EXPORT_SYMBOL(iwl_rx_csa); 1297EXPORT_SYMBOL(iwl_rx_csa);
1276 1298
@@ -1352,6 +1374,8 @@ void iwl_apm_stop(struct iwl_priv *priv)
1352{ 1374{
1353 unsigned long flags; 1375 unsigned long flags;
1354 1376
1377 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
1378
1355 iwl_apm_stop_master(priv); 1379 iwl_apm_stop_master(priv);
1356 1380
1357 spin_lock_irqsave(&priv->lock, flags); 1381 spin_lock_irqsave(&priv->lock, flags);
@@ -1365,6 +1389,118 @@ void iwl_apm_stop(struct iwl_priv *priv)
1365} 1389}
1366EXPORT_SYMBOL(iwl_apm_stop); 1390EXPORT_SYMBOL(iwl_apm_stop);
1367 1391
1392
1393/*
1394 * Start up NIC's basic functionality after it has been reset
1395 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1396 * NOTE: This does not load uCode nor start the embedded processor
1397 */
1398int iwl_apm_init(struct iwl_priv *priv)
1399{
1400 int ret = 0;
1401 u16 lctl;
1402
1403 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1404
1405 /*
1406 * Use "set_bit" below rather than "write", to preserve any hardware
1407 * bits already set by default after reset.
1408 */
1409
1410 /* Disable L0S exit timer (platform NMI Work/Around) */
1411 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1412 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1413
1414 /*
1415 * Disable L0s without affecting L1;
1416 * don't wait for ICH L0s (ICH bug W/A)
1417 */
1418 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1419 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1420
1421 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1422 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1423
1424 /*
1425 * Enable HAP INTA (interrupt from management bus) to
1426 * wake device's PCI Express link L1a -> L0s
1427 * NOTE: This is no-op for 3945 (non-existant bit)
1428 */
1429 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1430 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1431
1432 /*
1433 * HW bug W/A - costs negligible power consumption ...
1434 * Check if BIOS (or OS) enabled L1-ASPM on this device
1435 */
1436 if (priv->cfg->set_l0s) {
1437 lctl = iwl_pcie_link_ctl(priv);
1438 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1439 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1440 /* L1-ASPM enabled; disable(!) L0S */
1441 iwl_set_bit(priv, CSR_GIO_REG,
1442 CSR_GIO_REG_VAL_L0S_ENABLED);
1443 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1444 } else {
1445 /* L1-ASPM disabled; enable(!) L0S */
1446 iwl_clear_bit(priv, CSR_GIO_REG,
1447 CSR_GIO_REG_VAL_L0S_ENABLED);
1448 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1449 }
1450 }
1451
1452 /* Configure analog phase-lock-loop before activating to D0A */
1453 if (priv->cfg->pll_cfg_val)
1454 iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val);
1455
1456 /*
1457 * Set "initialization complete" bit to move adapter from
1458 * D0U* --> D0A* (powered-up active) state.
1459 */
1460 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1461
1462 /*
1463 * Wait for clock stabilization; once stabilized, access to
1464 * device-internal resources is supported, e.g. iwl_write_prph()
1465 * and accesses to uCode SRAM.
1466 */
1467 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1468 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1469 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1470 if (ret < 0) {
1471 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1472 goto out;
1473 }
1474
1475 /*
1476 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1477 * BSM (Boostrap State Machine) is only in 3945 and 4965;
1478 * later devices (i.e. 5000 and later) have non-volatile SRAM,
1479 * and don't need BSM to restore data after power-saving sleep.
1480 *
1481 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1482 * do not disable clocks. This preserves any hardware bits already
1483 * set by default in "CLK_CTRL_REG" after reset.
1484 */
1485 if (priv->cfg->use_bsm)
1486 iwl_write_prph(priv, APMG_CLK_EN_REG,
1487 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1488 else
1489 iwl_write_prph(priv, APMG_CLK_EN_REG,
1490 APMG_CLK_VAL_DMA_CLK_RQT);
1491 udelay(20);
1492
1493 /* Disable L1-Active */
1494 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1495 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1496
1497out:
1498 return ret;
1499}
1500EXPORT_SYMBOL(iwl_apm_init);
1501
1502
1503
1368void iwl_configure_filter(struct ieee80211_hw *hw, 1504void iwl_configure_filter(struct ieee80211_hw *hw,
1369 unsigned int changed_flags, 1505 unsigned int changed_flags,
1370 unsigned int *total_flags, 1506 unsigned int *total_flags,
@@ -1412,73 +1548,14 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
1412} 1548}
1413EXPORT_SYMBOL(iwl_configure_filter); 1549EXPORT_SYMBOL(iwl_configure_filter);
1414 1550
1415int iwl_setup_mac(struct iwl_priv *priv)
1416{
1417 int ret;
1418 struct ieee80211_hw *hw = priv->hw;
1419 hw->rate_control_algorithm = "iwl-agn-rs";
1420
1421 /* Tell mac80211 our characteristics */
1422 hw->flags = IEEE80211_HW_SIGNAL_DBM |
1423 IEEE80211_HW_NOISE_DBM |
1424 IEEE80211_HW_AMPDU_AGGREGATION |
1425 IEEE80211_HW_SPECTRUM_MGMT;
1426
1427 if (!priv->cfg->broken_powersave)
1428 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
1429 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
1430
1431 hw->wiphy->interface_modes =
1432 BIT(NL80211_IFTYPE_STATION) |
1433 BIT(NL80211_IFTYPE_ADHOC);
1434
1435 hw->wiphy->custom_regulatory = true;
1436
1437 /* Firmware does not support this */
1438 hw->wiphy->disable_beacon_hints = true;
1439
1440 /*
1441 * For now, disable PS by default because it affects
1442 * RX performance significantly.
1443 */
1444 hw->wiphy->ps_default = false;
1445
1446 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
1447 /* we create the 802.11 header and a zero-length SSID element */
1448 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
1449
1450 /* Default value; 4 EDCA QOS priorities */
1451 hw->queues = 4;
1452
1453 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
1454
1455 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
1456 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1457 &priv->bands[IEEE80211_BAND_2GHZ];
1458 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
1459 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1460 &priv->bands[IEEE80211_BAND_5GHZ];
1461
1462 ret = ieee80211_register_hw(priv->hw);
1463 if (ret) {
1464 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
1465 return ret;
1466 }
1467 priv->mac80211_registered = 1;
1468
1469 return 0;
1470}
1471EXPORT_SYMBOL(iwl_setup_mac);
1472
1473int iwl_set_hw_params(struct iwl_priv *priv) 1551int iwl_set_hw_params(struct iwl_priv *priv)
1474{ 1552{
1475 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 1553 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1476 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 1554 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1477 if (priv->cfg->mod_params->amsdu_size_8K) 1555 if (priv->cfg->mod_params->amsdu_size_8K)
1478 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K; 1556 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
1479 else 1557 else
1480 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K; 1558 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
1481 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
1482 1559
1483 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; 1560 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
1484 1561
@@ -1507,7 +1584,6 @@ int iwl_init_drv(struct iwl_priv *priv)
1507 /* Clear the driver's (not device's) station table */ 1584 /* Clear the driver's (not device's) station table */
1508 iwl_clear_stations_table(priv); 1585 iwl_clear_stations_table(priv);
1509 1586
1510 priv->data_retry_limit = -1;
1511 priv->ieee_channels = NULL; 1587 priv->ieee_channels = NULL;
1512 priv->ieee_rates = NULL; 1588 priv->ieee_rates = NULL;
1513 priv->band = IEEE80211_BAND_2GHZ; 1589 priv->band = IEEE80211_BAND_2GHZ;
@@ -1932,9 +2008,9 @@ EXPORT_SYMBOL(iwl_isr_legacy);
1932int iwl_send_bt_config(struct iwl_priv *priv) 2008int iwl_send_bt_config(struct iwl_priv *priv)
1933{ 2009{
1934 struct iwl_bt_cmd bt_cmd = { 2010 struct iwl_bt_cmd bt_cmd = {
1935 .flags = 3, 2011 .flags = BT_COEX_MODE_4W,
1936 .lead_time = 0xAA, 2012 .lead_time = BT_LEAD_TIME_DEF,
1937 .max_kill = 1, 2013 .max_kill = BT_MAX_KILL_DEF,
1938 .kill_ack_mask = 0, 2014 .kill_ack_mask = 0,
1939 .kill_cts_mask = 0, 2015 .kill_cts_mask = 0,
1940 }; 2016 };
@@ -2094,10 +2170,7 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2094 spin_unlock_irqrestore(&priv->lock, flags); 2170 spin_unlock_irqrestore(&priv->lock, flags);
2095 priv->thermal_throttle.ct_kill_toggle = false; 2171 priv->thermal_throttle.ct_kill_toggle = false;
2096 2172
2097 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 2173 if (priv->cfg->support_ct_kill_exit) {
2098 case CSR_HW_REV_TYPE_1000:
2099 case CSR_HW_REV_TYPE_6x00:
2100 case CSR_HW_REV_TYPE_6x50:
2101 adv_cmd.critical_temperature_enter = 2174 adv_cmd.critical_temperature_enter =
2102 cpu_to_le32(priv->hw_params.ct_kill_threshold); 2175 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2103 adv_cmd.critical_temperature_exit = 2176 adv_cmd.critical_temperature_exit =
@@ -2114,8 +2187,7 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2114 "exit is %d\n", 2187 "exit is %d\n",
2115 priv->hw_params.ct_kill_threshold, 2188 priv->hw_params.ct_kill_threshold,
2116 priv->hw_params.ct_kill_exit_threshold); 2189 priv->hw_params.ct_kill_exit_threshold);
2117 break; 2190 } else {
2118 default:
2119 cmd.critical_temperature_R = 2191 cmd.critical_temperature_R =
2120 cpu_to_le32(priv->hw_params.ct_kill_threshold); 2192 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2121 2193
@@ -2128,7 +2200,6 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2128 "succeeded, " 2200 "succeeded, "
2129 "critical temperature is %d\n", 2201 "critical temperature is %d\n",
2130 priv->hw_params.ct_kill_threshold); 2202 priv->hw_params.ct_kill_threshold);
2131 break;
2132 } 2203 }
2133} 2204}
2134EXPORT_SYMBOL(iwl_rf_kill_ct_config); 2205EXPORT_SYMBOL(iwl_rf_kill_ct_config);
@@ -2160,7 +2231,7 @@ void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
2160 struct iwl_rx_mem_buffer *rxb) 2231 struct iwl_rx_mem_buffer *rxb)
2161{ 2232{
2162#ifdef CONFIG_IWLWIFI_DEBUG 2233#ifdef CONFIG_IWLWIFI_DEBUG
2163 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 2234 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2164 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); 2235 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
2165 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", 2236 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
2166 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 2237 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
@@ -2171,7 +2242,7 @@ EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
2171void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 2242void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
2172 struct iwl_rx_mem_buffer *rxb) 2243 struct iwl_rx_mem_buffer *rxb)
2173{ 2244{
2174 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 2245 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2175 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 2246 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
2176 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " 2247 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
2177 "notification for %s:\n", len, 2248 "notification for %s:\n", len,
@@ -2183,7 +2254,7 @@ EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
2183void iwl_rx_reply_error(struct iwl_priv *priv, 2254void iwl_rx_reply_error(struct iwl_priv *priv,
2184 struct iwl_rx_mem_buffer *rxb) 2255 struct iwl_rx_mem_buffer *rxb)
2185{ 2256{
2186 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 2257 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2187 2258
2188 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " 2259 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
2189 "seq 0x%04X ser 0x%08X\n", 2260 "seq 0x%04X ser 0x%08X\n",
@@ -2648,6 +2719,14 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2648 goto set_ch_out; 2719 goto set_ch_out;
2649 } 2720 }
2650 2721
2722 if (iwl_is_associated(priv) &&
2723 (le16_to_cpu(priv->active_rxon.channel) != ch) &&
2724 priv->cfg->ops->lib->set_channel_switch) {
2725 ret = priv->cfg->ops->lib->set_channel_switch(priv,
2726 ch);
2727 goto out;
2728 }
2729
2651 spin_lock_irqsave(&priv->lock, flags); 2730 spin_lock_irqsave(&priv->lock, flags);
2652 2731
2653 /* Configure HT40 channels */ 2732 /* Configure HT40 channels */
@@ -2826,6 +2905,27 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2826} 2905}
2827EXPORT_SYMBOL(iwl_mac_reset_tsf); 2906EXPORT_SYMBOL(iwl_mac_reset_tsf);
2828 2907
2908int iwl_alloc_txq_mem(struct iwl_priv *priv)
2909{
2910 if (!priv->txq)
2911 priv->txq = kzalloc(
2912 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
2913 GFP_KERNEL);
2914 if (!priv->txq) {
2915 IWL_ERR(priv, "Not enough memory for txq \n");
2916 return -ENOMEM;
2917 }
2918 return 0;
2919}
2920EXPORT_SYMBOL(iwl_alloc_txq_mem);
2921
2922void iwl_free_txq_mem(struct iwl_priv *priv)
2923{
2924 kfree(priv->txq);
2925 priv->txq = NULL;
2926}
2927EXPORT_SYMBOL(iwl_free_txq_mem);
2928
2829#ifdef CONFIG_IWLWIFI_DEBUGFS 2929#ifdef CONFIG_IWLWIFI_DEBUGFS
2830 2930
2831#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) 2931#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 6688b6944200..b875dcfca2d6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -109,7 +109,6 @@ struct iwl_hcmd_utils_ops {
109 109
110struct iwl_apm_ops { 110struct iwl_apm_ops {
111 int (*init)(struct iwl_priv *priv); 111 int (*init)(struct iwl_priv *priv);
112 int (*reset)(struct iwl_priv *priv);
113 void (*stop)(struct iwl_priv *priv); 112 void (*stop)(struct iwl_priv *priv);
114 void (*config)(struct iwl_priv *priv); 113 void (*config)(struct iwl_priv *priv);
115 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src); 114 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
@@ -170,6 +169,7 @@ struct iwl_lib_ops {
170 int (*load_ucode)(struct iwl_priv *priv); 169 int (*load_ucode)(struct iwl_priv *priv);
171 void (*dump_nic_event_log)(struct iwl_priv *priv); 170 void (*dump_nic_event_log)(struct iwl_priv *priv);
172 void (*dump_nic_error_log)(struct iwl_priv *priv); 171 void (*dump_nic_error_log)(struct iwl_priv *priv);
172 int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
173 /* power management */ 173 /* power management */
174 struct iwl_apm_ops apm_ops; 174 struct iwl_apm_ops apm_ops;
175 175
@@ -205,7 +205,6 @@ struct iwl_mod_params {
205 int sw_crypto; /* def: 0 = using hardware encryption */ 205 int sw_crypto; /* def: 0 = using hardware encryption */
206 int disable_hw_scan; /* def: 0 = use h/w scan */ 206 int disable_hw_scan; /* def: 0 = use h/w scan */
207 int num_of_queues; /* def: HW dependent */ 207 int num_of_queues; /* def: HW dependent */
208 int num_of_ampdu_queues;/* def: HW dependent */
209 int disable_11n; /* def: 0 = 11n capabilities enabled */ 208 int disable_11n; /* def: 0 = 11n capabilities enabled */
210 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 209 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
211 int antenna; /* def: 0 = both antennas (use diversity) */ 210 int antenna; /* def: 0 = both antennas (use diversity) */
@@ -227,6 +226,8 @@ struct iwl_mod_params {
227 * The detail algorithm is described in iwl-led.c 226 * The detail algorithm is described in iwl-led.c
228 * @use_rts_for_ht: use rts/cts protection for HT traffic 227 * @use_rts_for_ht: use rts/cts protection for HT traffic
229 * @chain_noise_num_beacons: number of beacons used to compute chain noise 228 * @chain_noise_num_beacons: number of beacons used to compute chain noise
229 * @adv_thermal_throttle: support advance thermal throttle
230 * @support_ct_kill_exit: support ct kill exit condition
230 * 231 *
231 * We enable the driver to be backward compatible wrt API version. The 232 * We enable the driver to be backward compatible wrt API version. The
232 * driver specifies which APIs it supports (with @ucode_api_max being the 233 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -258,11 +259,18 @@ struct iwl_cfg {
258 int eeprom_size; 259 int eeprom_size;
259 u16 eeprom_ver; 260 u16 eeprom_ver;
260 u16 eeprom_calib_ver; 261 u16 eeprom_calib_ver;
262 int num_of_queues; /* def: HW dependent */
263 int num_of_ampdu_queues;/* def: HW dependent */
261 const struct iwl_ops *ops; 264 const struct iwl_ops *ops;
262 const struct iwl_mod_params *mod_params; 265 const struct iwl_mod_params *mod_params;
263 u8 valid_tx_ant; 266 u8 valid_tx_ant;
264 u8 valid_rx_ant; 267 u8 valid_rx_ant;
265 bool need_pll_cfg; 268
269 /* for iwl_apm_init() */
270 u32 pll_cfg_val;
271 bool set_l0s;
272 bool use_bsm;
273
266 bool use_isr_legacy; 274 bool use_isr_legacy;
267 enum iwl_pa_type pa_type; 275 enum iwl_pa_type pa_type;
268 const u16 max_ll_items; 276 const u16 max_ll_items;
@@ -273,6 +281,8 @@ struct iwl_cfg {
273 bool use_rts_for_ht; 281 bool use_rts_for_ht;
274 int chain_noise_num_beacons; 282 int chain_noise_num_beacons;
275 const bool supports_idle; 283 const bool supports_idle;
284 bool adv_thermal_throttle;
285 bool support_ct_kill_exit;
276}; 286};
277 287
278/*************************** 288/***************************
@@ -305,7 +315,6 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
305 unsigned int changed_flags, 315 unsigned int changed_flags,
306 unsigned int *total_flags, u64 multicast); 316 unsigned int *total_flags, u64 multicast);
307int iwl_hw_nic_init(struct iwl_priv *priv); 317int iwl_hw_nic_init(struct iwl_priv *priv);
308int iwl_setup_mac(struct iwl_priv *priv);
309int iwl_set_hw_params(struct iwl_priv *priv); 318int iwl_set_hw_params(struct iwl_priv *priv);
310int iwl_init_drv(struct iwl_priv *priv); 319int iwl_init_drv(struct iwl_priv *priv);
311void iwl_uninit_drv(struct iwl_priv *priv); 320void iwl_uninit_drv(struct iwl_priv *priv);
@@ -327,6 +336,10 @@ void iwl_config_ap(struct iwl_priv *priv);
327int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, 336int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
328 struct ieee80211_tx_queue_stats *stats); 337 struct ieee80211_tx_queue_stats *stats);
329void iwl_mac_reset_tsf(struct ieee80211_hw *hw); 338void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
339int iwl_alloc_txq_mem(struct iwl_priv *priv);
340void iwl_free_txq_mem(struct iwl_priv *priv);
341void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
342 __le32 *tx_flags);
330#ifdef CONFIG_IWLWIFI_DEBUGFS 343#ifdef CONFIG_IWLWIFI_DEBUGFS
331int iwl_alloc_traffic_mem(struct iwl_priv *priv); 344int iwl_alloc_traffic_mem(struct iwl_priv *priv);
332void iwl_free_traffic_mem(struct iwl_priv *priv); 345void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -527,7 +540,7 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
527 const void *data, 540 const void *data,
528 void (*callback)(struct iwl_priv *priv, 541 void (*callback)(struct iwl_priv *priv,
529 struct iwl_device_cmd *cmd, 542 struct iwl_device_cmd *cmd,
530 struct sk_buff *skb)); 543 struct iwl_rx_packet *pkt));
531 544
532int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 545int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
533 546
@@ -660,6 +673,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
660 struct iwl_rx_mem_buffer *rxb); 673 struct iwl_rx_mem_buffer *rxb);
661void iwl_apm_stop(struct iwl_priv *priv); 674void iwl_apm_stop(struct iwl_priv *priv);
662int iwl_apm_stop_master(struct iwl_priv *priv); 675int iwl_apm_stop_master(struct iwl_priv *priv);
676int iwl_apm_init(struct iwl_priv *priv);
663 677
664void iwl_setup_rxon_timing(struct iwl_priv *priv); 678void iwl_setup_rxon_timing(struct iwl_priv *priv);
665static inline int iwl_send_rxon_assoc(struct iwl_priv *priv) 679static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 8f183e0fa512..b6ed5a3147a1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -109,8 +109,9 @@
109 * Bit fields: 109 * Bit fields:
110 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step 110 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
111 */ 111 */
112#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) 112#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
113#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) 113#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
114#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
114 115
115/* Bits for CSR_HW_IF_CONFIG_REG */ 116/* Bits for CSR_HW_IF_CONFIG_REG */
116#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) 117#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
@@ -195,6 +196,7 @@
195#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) 196#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
196#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) 197#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
197#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) 198#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
199#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
198 200
199/* GP (general purpose) CONTROL */ 201/* GP (general purpose) CONTROL */
200#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) 202#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
@@ -235,6 +237,11 @@
235#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */ 237#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */
236#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */ 238#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */
237#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */ 239#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */
240#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
241#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
242#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
243#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
244#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
238 245
239/* EEPROM signature */ 246/* EEPROM signature */
240#define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000) 247#define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index b9ca475cc61c..96c92eab692a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -106,6 +106,7 @@ struct iwl_debugfs {
106 struct dentry *file_sensitivity; 106 struct dentry *file_sensitivity;
107 struct dentry *file_chain_noise; 107 struct dentry *file_chain_noise;
108 struct dentry *file_tx_power; 108 struct dentry *file_tx_power;
109 struct dentry *file_power_save_status;
109 } dbgfs_debug_files; 110 } dbgfs_debug_files;
110 u32 sram_offset; 111 u32 sram_offset;
111 u32 sram_len; 112 u32 sram_len;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 1794b9c4e6ac..8784911fd56e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -801,15 +801,20 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
801 * valid here. However, let's not confuse them and present 801 * valid here. However, let's not confuse them and present
802 * IWL_POWER_INDEX_1 as "1", not "0". 802 * IWL_POWER_INDEX_1 as "1", not "0".
803 */ 803 */
804 if (value > 0) 804 if (value == 0)
805 return -EINVAL;
806 else if (value > 0)
805 value -= 1; 807 value -= 1;
806 808
807 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM)) 809 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
808 return -EINVAL; 810 return -EINVAL;
809 811
812 if (!iwl_is_ready_rf(priv))
813 return -EAGAIN;
814
810 priv->power_data.debug_sleep_level_override = value; 815 priv->power_data.debug_sleep_level_override = value;
811 816
812 iwl_power_update_mode(priv, false); 817 iwl_power_update_mode(priv, true);
813 818
814 return count; 819 return count;
815} 820}
@@ -882,10 +887,14 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
882 struct iwl_rx_queue *rxq = &priv->rxq; 887 struct iwl_rx_queue *rxq = &priv->rxq;
883 char *buf; 888 char *buf;
884 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + 889 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
885 (IWL_MAX_NUM_QUEUES * 32 * 8) + 400; 890 (priv->cfg->num_of_queues * 32 * 8) + 400;
886 const u8 *ptr; 891 const u8 *ptr;
887 ssize_t ret; 892 ssize_t ret;
888 893
894 if (!priv->txq) {
895 IWL_ERR(priv, "txq not ready\n");
896 return -EAGAIN;
897 }
889 buf = kzalloc(bufsz, GFP_KERNEL); 898 buf = kzalloc(bufsz, GFP_KERNEL);
890 if (!buf) { 899 if (!buf) {
891 IWL_ERR(priv, "Can not allocate buffer\n"); 900 IWL_ERR(priv, "Can not allocate buffer\n");
@@ -977,8 +986,12 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
977 int pos = 0; 986 int pos = 0;
978 int cnt; 987 int cnt;
979 int ret; 988 int ret;
980 const size_t bufsz = sizeof(char) * 60 * IWL_MAX_NUM_QUEUES; 989 const size_t bufsz = sizeof(char) * 60 * priv->cfg->num_of_queues;
981 990
991 if (!priv->txq) {
992 IWL_ERR(priv, "txq not ready\n");
993 return -EAGAIN;
994 }
982 buf = kzalloc(bufsz, GFP_KERNEL); 995 buf = kzalloc(bufsz, GFP_KERNEL);
983 if (!buf) 996 if (!buf)
984 return -ENOMEM; 997 return -ENOMEM;
@@ -1069,10 +1082,10 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1069 sizeof(struct statistics_rx_non_phy) * 20 + 1082 sizeof(struct statistics_rx_non_phy) * 20 +
1070 sizeof(struct statistics_rx_ht_phy) * 20 + 400; 1083 sizeof(struct statistics_rx_ht_phy) * 20 + 400;
1071 ssize_t ret; 1084 ssize_t ret;
1072 struct statistics_rx_phy *ofdm; 1085 struct statistics_rx_phy *ofdm, *accum_ofdm;
1073 struct statistics_rx_phy *cck; 1086 struct statistics_rx_phy *cck, *accum_cck;
1074 struct statistics_rx_non_phy *general; 1087 struct statistics_rx_non_phy *general, *accum_general;
1075 struct statistics_rx_ht_phy *ht; 1088 struct statistics_rx_ht_phy *ht, *accum_ht;
1076 1089
1077 if (!iwl_is_alive(priv)) 1090 if (!iwl_is_alive(priv))
1078 return -EAGAIN; 1091 return -EAGAIN;
@@ -1101,155 +1114,268 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1101 cck = &priv->statistics.rx.cck; 1114 cck = &priv->statistics.rx.cck;
1102 general = &priv->statistics.rx.general; 1115 general = &priv->statistics.rx.general;
1103 ht = &priv->statistics.rx.ofdm_ht; 1116 ht = &priv->statistics.rx.ofdm_ht;
1117 accum_ofdm = &priv->accum_statistics.rx.ofdm;
1118 accum_cck = &priv->accum_statistics.rx.cck;
1119 accum_general = &priv->accum_statistics.rx.general;
1120 accum_ht = &priv->accum_statistics.rx.ofdm_ht;
1104 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1121 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1105 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n"); 1122 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n");
1106 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt: %u\n", 1123 pos += scnprintf(buf + pos, bufsz - pos,
1107 le32_to_cpu(ofdm->ina_cnt)); 1124 "\t\t\tcurrent\t\t\taccumulative\n");
1108 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt: %u\n", 1125 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
1109 le32_to_cpu(ofdm->fina_cnt)); 1126 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt);
1110 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n", 1127 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
1111 le32_to_cpu(ofdm->plcp_err)); 1128 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt);
1112 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n", 1129 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
1113 le32_to_cpu(ofdm->crc32_err)); 1130 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err);
1114 pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n", 1131 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
1115 le32_to_cpu(ofdm->overrun_err)); 1132 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err);
1116 pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n", 1133 pos += scnprintf(buf + pos, bufsz - pos,
1117 le32_to_cpu(ofdm->early_overrun_err)); 1134 "overrun_err:\t\t%u\t\t\t%u\n",
1118 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n", 1135 le32_to_cpu(ofdm->overrun_err),
1119 le32_to_cpu(ofdm->crc32_good)); 1136 accum_ofdm->overrun_err);
1120 pos += scnprintf(buf + pos, bufsz - pos, "false_alarm_cnt: %u\n", 1137 pos += scnprintf(buf + pos, bufsz - pos,
1121 le32_to_cpu(ofdm->false_alarm_cnt)); 1138 "early_overrun_err:\t%u\t\t\t%u\n",
1122 pos += scnprintf(buf + pos, bufsz - pos, "fina_sync_err_cnt: %u\n", 1139 le32_to_cpu(ofdm->early_overrun_err),
1123 le32_to_cpu(ofdm->fina_sync_err_cnt)); 1140 accum_ofdm->early_overrun_err);
1124 pos += scnprintf(buf + pos, bufsz - pos, "sfd_timeout: %u\n", 1141 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
1125 le32_to_cpu(ofdm->sfd_timeout)); 1142 le32_to_cpu(ofdm->crc32_good),
1126 pos += scnprintf(buf + pos, bufsz - pos, "fina_timeout: %u\n", 1143 accum_ofdm->crc32_good);
1127 le32_to_cpu(ofdm->fina_timeout)); 1144 pos += scnprintf(buf + pos, bufsz - pos,
1128 pos += scnprintf(buf + pos, bufsz - pos, "unresponded_rts: %u\n", 1145 "false_alarm_cnt:\t%u\t\t\t%u\n",
1129 le32_to_cpu(ofdm->unresponded_rts)); 1146 le32_to_cpu(ofdm->false_alarm_cnt),
1130 pos += scnprintf(buf + pos, bufsz - pos, 1147 accum_ofdm->false_alarm_cnt);
1131 "rxe_frame_limit_overrun: %u\n", 1148 pos += scnprintf(buf + pos, bufsz - pos,
1132 le32_to_cpu(ofdm->rxe_frame_limit_overrun)); 1149 "fina_sync_err_cnt:\t%u\t\t\t%u\n",
1133 pos += scnprintf(buf + pos, bufsz - pos, "sent_ack_cnt: %u\n", 1150 le32_to_cpu(ofdm->fina_sync_err_cnt),
1134 le32_to_cpu(ofdm->sent_ack_cnt)); 1151 accum_ofdm->fina_sync_err_cnt);
1135 pos += scnprintf(buf + pos, bufsz - pos, "sent_cts_cnt: %u\n", 1152 pos += scnprintf(buf + pos, bufsz - pos,
1136 le32_to_cpu(ofdm->sent_cts_cnt)); 1153 "sfd_timeout:\t\t%u\t\t\t%u\n",
1137 pos += scnprintf(buf + pos, bufsz - pos, "sent_ba_rsp_cnt: %u\n", 1154 le32_to_cpu(ofdm->sfd_timeout),
1138 le32_to_cpu(ofdm->sent_ba_rsp_cnt)); 1155 accum_ofdm->sfd_timeout);
1139 pos += scnprintf(buf + pos, bufsz - pos, "dsp_self_kill: %u\n", 1156 pos += scnprintf(buf + pos, bufsz - pos,
1140 le32_to_cpu(ofdm->dsp_self_kill)); 1157 "fina_timeout:\t\t%u\t\t\t%u\n",
1141 pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n", 1158 le32_to_cpu(ofdm->fina_timeout),
1142 le32_to_cpu(ofdm->mh_format_err)); 1159 accum_ofdm->fina_timeout);
1143 pos += scnprintf(buf + pos, bufsz - pos, "re_acq_main_rssi_sum: %u\n", 1160 pos += scnprintf(buf + pos, bufsz - pos,
1144 le32_to_cpu(ofdm->re_acq_main_rssi_sum)); 1161 "unresponded_rts:\t%u\t\t\t%u\n",
1162 le32_to_cpu(ofdm->unresponded_rts),
1163 accum_ofdm->unresponded_rts);
1164 pos += scnprintf(buf + pos, bufsz - pos,
1165 "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
1166 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
1167 accum_ofdm->rxe_frame_limit_overrun);
1168 pos += scnprintf(buf + pos, bufsz - pos,
1169 "sent_ack_cnt:\t\t%u\t\t\t%u\n",
1170 le32_to_cpu(ofdm->sent_ack_cnt),
1171 accum_ofdm->sent_ack_cnt);
1172 pos += scnprintf(buf + pos, bufsz - pos,
1173 "sent_cts_cnt:\t\t%u\t\t\t%u\n",
1174 le32_to_cpu(ofdm->sent_cts_cnt),
1175 accum_ofdm->sent_cts_cnt);
1176 pos += scnprintf(buf + pos, bufsz - pos,
1177 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
1178 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
1179 accum_ofdm->sent_ba_rsp_cnt);
1180 pos += scnprintf(buf + pos, bufsz - pos,
1181 "dsp_self_kill:\t\t%u\t\t\t%u\n",
1182 le32_to_cpu(ofdm->dsp_self_kill),
1183 accum_ofdm->dsp_self_kill);
1184 pos += scnprintf(buf + pos, bufsz - pos,
1185 "mh_format_err:\t\t%u\t\t\t%u\n",
1186 le32_to_cpu(ofdm->mh_format_err),
1187 accum_ofdm->mh_format_err);
1188 pos += scnprintf(buf + pos, bufsz - pos,
1189 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
1190 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
1191 accum_ofdm->re_acq_main_rssi_sum);
1145 1192
1146 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n"); 1193 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n");
1147 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt: %u\n", 1194 pos += scnprintf(buf + pos, bufsz - pos,
1148 le32_to_cpu(cck->ina_cnt)); 1195 "\t\t\tcurrent\t\t\taccumulative\n");
1149 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt: %u\n", 1196 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
1150 le32_to_cpu(cck->fina_cnt)); 1197 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt);
1151 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n", 1198 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
1152 le32_to_cpu(cck->plcp_err)); 1199 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt);
1153 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n", 1200 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
1154 le32_to_cpu(cck->crc32_err)); 1201 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err);
1155 pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n", 1202 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
1156 le32_to_cpu(cck->overrun_err)); 1203 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err);
1157 pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n", 1204 pos += scnprintf(buf + pos, bufsz - pos,
1158 le32_to_cpu(cck->early_overrun_err)); 1205 "overrun_err:\t\t%u\t\t\t%u\n",
1159 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n", 1206 le32_to_cpu(cck->overrun_err),
1160 le32_to_cpu(cck->crc32_good)); 1207 accum_cck->overrun_err);
1161 pos += scnprintf(buf + pos, bufsz - pos, "false_alarm_cnt: %u\n", 1208 pos += scnprintf(buf + pos, bufsz - pos,
1162 le32_to_cpu(cck->false_alarm_cnt)); 1209 "early_overrun_err:\t%u\t\t\t%u\n",
1163 pos += scnprintf(buf + pos, bufsz - pos, "fina_sync_err_cnt: %u\n", 1210 le32_to_cpu(cck->early_overrun_err),
1164 le32_to_cpu(cck->fina_sync_err_cnt)); 1211 accum_cck->early_overrun_err);
1165 pos += scnprintf(buf + pos, bufsz - pos, "sfd_timeout: %u\n", 1212 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
1166 le32_to_cpu(cck->sfd_timeout)); 1213 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good);
1167 pos += scnprintf(buf + pos, bufsz - pos, "fina_timeout: %u\n", 1214 pos += scnprintf(buf + pos, bufsz - pos,
1168 le32_to_cpu(cck->fina_timeout)); 1215 "false_alarm_cnt:\t%u\t\t\t%u\n",
1169 pos += scnprintf(buf + pos, bufsz - pos, "unresponded_rts: %u\n", 1216 le32_to_cpu(cck->false_alarm_cnt),
1170 le32_to_cpu(cck->unresponded_rts)); 1217 accum_cck->false_alarm_cnt);
1171 pos += scnprintf(buf + pos, bufsz - pos, 1218 pos += scnprintf(buf + pos, bufsz - pos,
1172 "rxe_frame_limit_overrun: %u\n", 1219 "fina_sync_err_cnt:\t%u\t\t\t%u\n",
1173 le32_to_cpu(cck->rxe_frame_limit_overrun)); 1220 le32_to_cpu(cck->fina_sync_err_cnt),
1174 pos += scnprintf(buf + pos, bufsz - pos, "sent_ack_cnt: %u\n", 1221 accum_cck->fina_sync_err_cnt);
1175 le32_to_cpu(cck->sent_ack_cnt)); 1222 pos += scnprintf(buf + pos, bufsz - pos,
1176 pos += scnprintf(buf + pos, bufsz - pos, "sent_cts_cnt: %u\n", 1223 "sfd_timeout:\t\t%u\t\t\t%u\n",
1177 le32_to_cpu(cck->sent_cts_cnt)); 1224 le32_to_cpu(cck->sfd_timeout),
1178 pos += scnprintf(buf + pos, bufsz - pos, "sent_ba_rsp_cnt: %u\n", 1225 accum_cck->sfd_timeout);
1179 le32_to_cpu(cck->sent_ba_rsp_cnt)); 1226 pos += scnprintf(buf + pos, bufsz - pos,
1180 pos += scnprintf(buf + pos, bufsz - pos, "dsp_self_kill: %u\n", 1227 "fina_timeout:\t\t%u\t\t\t%u\n",
1181 le32_to_cpu(cck->dsp_self_kill)); 1228 le32_to_cpu(cck->fina_timeout),
1182 pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n", 1229 accum_cck->fina_timeout);
1183 le32_to_cpu(cck->mh_format_err)); 1230 pos += scnprintf(buf + pos, bufsz - pos,
1184 pos += scnprintf(buf + pos, bufsz - pos, "re_acq_main_rssi_sum: %u\n", 1231 "unresponded_rts:\t%u\t\t\t%u\n",
1185 le32_to_cpu(cck->re_acq_main_rssi_sum)); 1232 le32_to_cpu(cck->unresponded_rts),
1233 accum_cck->unresponded_rts);
1234 pos += scnprintf(buf + pos, bufsz - pos,
1235 "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
1236 le32_to_cpu(cck->rxe_frame_limit_overrun),
1237 accum_cck->rxe_frame_limit_overrun);
1238 pos += scnprintf(buf + pos, bufsz - pos,
1239 "sent_ack_cnt:\t\t%u\t\t\t%u\n",
1240 le32_to_cpu(cck->sent_ack_cnt),
1241 accum_cck->sent_ack_cnt);
1242 pos += scnprintf(buf + pos, bufsz - pos,
1243 "sent_cts_cnt:\t\t%u\t\t\t%u\n",
1244 le32_to_cpu(cck->sent_cts_cnt),
1245 accum_cck->sent_cts_cnt);
1246 pos += scnprintf(buf + pos, bufsz - pos,
1247 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
1248 le32_to_cpu(cck->sent_ba_rsp_cnt),
1249 accum_cck->sent_ba_rsp_cnt);
1250 pos += scnprintf(buf + pos, bufsz - pos,
1251 "dsp_self_kill:\t\t%u\t\t\t%u\n",
1252 le32_to_cpu(cck->dsp_self_kill),
1253 accum_cck->dsp_self_kill);
1254 pos += scnprintf(buf + pos, bufsz - pos,
1255 "mh_format_err:\t\t%u\t\t\t%u\n",
1256 le32_to_cpu(cck->mh_format_err),
1257 accum_cck->mh_format_err);
1258 pos += scnprintf(buf + pos, bufsz - pos,
1259 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
1260 le32_to_cpu(cck->re_acq_main_rssi_sum),
1261 accum_cck->re_acq_main_rssi_sum);
1186 1262
1187 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n"); 1263 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n");
1188 pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts: %u\n", 1264 pos += scnprintf(buf + pos, bufsz - pos,
1189 le32_to_cpu(general->bogus_cts)); 1265 "\t\t\tcurrent\t\t\taccumulative\n");
1190 pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack: %u\n", 1266 pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts:\t\t%u\t\t\t%u\n",
1191 le32_to_cpu(general->bogus_ack)); 1267 le32_to_cpu(general->bogus_cts),
1192 pos += scnprintf(buf + pos, bufsz - pos, "non_bssid_frames: %u\n", 1268 accum_general->bogus_cts);
1193 le32_to_cpu(general->non_bssid_frames)); 1269 pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack:\t\t%u\t\t\t%u\n",
1194 pos += scnprintf(buf + pos, bufsz - pos, "filtered_frames: %u\n", 1270 le32_to_cpu(general->bogus_ack),
1195 le32_to_cpu(general->filtered_frames)); 1271 accum_general->bogus_ack);
1196 pos += scnprintf(buf + pos, bufsz - pos, "non_channel_beacons: %u\n", 1272 pos += scnprintf(buf + pos, bufsz - pos,
1197 le32_to_cpu(general->non_channel_beacons)); 1273 "non_bssid_frames:\t%u\t\t\t%u\n",
1198 pos += scnprintf(buf + pos, bufsz - pos, "channel_beacons: %u\n", 1274 le32_to_cpu(general->non_bssid_frames),
1199 le32_to_cpu(general->channel_beacons)); 1275 accum_general->non_bssid_frames);
1200 pos += scnprintf(buf + pos, bufsz - pos, "num_missed_bcon: %u\n", 1276 pos += scnprintf(buf + pos, bufsz - pos,
1201 le32_to_cpu(general->num_missed_bcon)); 1277 "filtered_frames:\t%u\t\t\t%u\n",
1202 pos += scnprintf(buf + pos, bufsz - pos, 1278 le32_to_cpu(general->filtered_frames),
1203 "adc_rx_saturation_time: %u\n", 1279 accum_general->filtered_frames);
1204 le32_to_cpu(general->adc_rx_saturation_time)); 1280 pos += scnprintf(buf + pos, bufsz - pos,
1205 pos += scnprintf(buf + pos, bufsz - pos, 1281 "non_channel_beacons:\t%u\t\t\t%u\n",
1206 "ina_detection_search_time: %u\n", 1282 le32_to_cpu(general->non_channel_beacons),
1207 le32_to_cpu(general->ina_detection_search_time)); 1283 accum_general->non_channel_beacons);
1208 pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_a: %u\n", 1284 pos += scnprintf(buf + pos, bufsz - pos,
1209 le32_to_cpu(general->beacon_silence_rssi_a)); 1285 "channel_beacons:\t%u\t\t\t%u\n",
1210 pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_b: %u\n", 1286 le32_to_cpu(general->channel_beacons),
1211 le32_to_cpu(general->beacon_silence_rssi_b)); 1287 accum_general->channel_beacons);
1212 pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_c: %u\n", 1288 pos += scnprintf(buf + pos, bufsz - pos,
1213 le32_to_cpu(general->beacon_silence_rssi_c)); 1289 "num_missed_bcon:\t%u\t\t\t%u\n",
1214 pos += scnprintf(buf + pos, bufsz - pos, 1290 le32_to_cpu(general->num_missed_bcon),
1215 "interference_data_flag: %u\n", 1291 accum_general->num_missed_bcon);
1216 le32_to_cpu(general->interference_data_flag)); 1292 pos += scnprintf(buf + pos, bufsz - pos,
1217 pos += scnprintf(buf + pos, bufsz - pos, "channel_load: %u\n", 1293 "adc_rx_saturation_time:\t%u\t\t\t%u\n",
1218 le32_to_cpu(general->channel_load)); 1294 le32_to_cpu(general->adc_rx_saturation_time),
1219 pos += scnprintf(buf + pos, bufsz - pos, "dsp_false_alarms: %u\n", 1295 accum_general->adc_rx_saturation_time);
1220 le32_to_cpu(general->dsp_false_alarms)); 1296 pos += scnprintf(buf + pos, bufsz - pos,
1221 pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_a: %u\n", 1297 "ina_detect_search_tm:\t%u\t\t\t%u\n",
1222 le32_to_cpu(general->beacon_rssi_a)); 1298 le32_to_cpu(general->ina_detection_search_time),
1223 pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_b: %u\n", 1299 accum_general->ina_detection_search_time);
1224 le32_to_cpu(general->beacon_rssi_b)); 1300 pos += scnprintf(buf + pos, bufsz - pos,
1225 pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_c: %u\n", 1301 "beacon_silence_rssi_a:\t%u\t\t\t%u\n",
1226 le32_to_cpu(general->beacon_rssi_c)); 1302 le32_to_cpu(general->beacon_silence_rssi_a),
1227 pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_a: %u\n", 1303 accum_general->beacon_silence_rssi_a);
1228 le32_to_cpu(general->beacon_energy_a)); 1304 pos += scnprintf(buf + pos, bufsz - pos,
1229 pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_b: %u\n", 1305 "beacon_silence_rssi_b:\t%u\t\t\t%u\n",
1230 le32_to_cpu(general->beacon_energy_b)); 1306 le32_to_cpu(general->beacon_silence_rssi_b),
1231 pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_c: %u\n", 1307 accum_general->beacon_silence_rssi_b);
1232 le32_to_cpu(general->beacon_energy_c)); 1308 pos += scnprintf(buf + pos, bufsz - pos,
1309 "beacon_silence_rssi_c:\t%u\t\t\t%u\n",
1310 le32_to_cpu(general->beacon_silence_rssi_c),
1311 accum_general->beacon_silence_rssi_c);
1312 pos += scnprintf(buf + pos, bufsz - pos,
1313 "interference_data_flag:\t%u\t\t\t%u\n",
1314 le32_to_cpu(general->interference_data_flag),
1315 accum_general->interference_data_flag);
1316 pos += scnprintf(buf + pos, bufsz - pos,
1317 "channel_load:\t\t%u\t\t\t%u\n",
1318 le32_to_cpu(general->channel_load),
1319 accum_general->channel_load);
1320 pos += scnprintf(buf + pos, bufsz - pos,
1321 "dsp_false_alarms:\t%u\t\t\t%u\n",
1322 le32_to_cpu(general->dsp_false_alarms),
1323 accum_general->dsp_false_alarms);
1324 pos += scnprintf(buf + pos, bufsz - pos,
1325 "beacon_rssi_a:\t\t%u\t\t\t%u\n",
1326 le32_to_cpu(general->beacon_rssi_a),
1327 accum_general->beacon_rssi_a);
1328 pos += scnprintf(buf + pos, bufsz - pos,
1329 "beacon_rssi_b:\t\t%u\t\t\t%u\n",
1330 le32_to_cpu(general->beacon_rssi_b),
1331 accum_general->beacon_rssi_b);
1332 pos += scnprintf(buf + pos, bufsz - pos,
1333 "beacon_rssi_c:\t\t%u\t\t\t%u\n",
1334 le32_to_cpu(general->beacon_rssi_c),
1335 accum_general->beacon_rssi_c);
1336 pos += scnprintf(buf + pos, bufsz - pos,
1337 "beacon_energy_a:\t%u\t\t\t%u\n",
1338 le32_to_cpu(general->beacon_energy_a),
1339 accum_general->beacon_energy_a);
1340 pos += scnprintf(buf + pos, bufsz - pos,
1341 "beacon_energy_b:\t%u\t\t\t%u\n",
1342 le32_to_cpu(general->beacon_energy_b),
1343 accum_general->beacon_energy_b);
1344 pos += scnprintf(buf + pos, bufsz - pos,
1345 "beacon_energy_c:\t%u\t\t\t%u\n",
1346 le32_to_cpu(general->beacon_energy_c),
1347 accum_general->beacon_energy_c);
1233 1348
1234 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n"); 1349 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
1235 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n", 1350 pos += scnprintf(buf + pos, bufsz - pos,
1236 le32_to_cpu(ht->plcp_err)); 1351 "\t\t\tcurrent\t\t\taccumulative\n");
1237 pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n", 1352 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
1238 le32_to_cpu(ht->overrun_err)); 1353 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err);
1239 pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n", 1354 pos += scnprintf(buf + pos, bufsz - pos,
1240 le32_to_cpu(ht->early_overrun_err)); 1355 "overrun_err:\t\t%u\t\t\t%u\n",
1241 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n", 1356 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err);
1242 le32_to_cpu(ht->crc32_good)); 1357 pos += scnprintf(buf + pos, bufsz - pos,
1243 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n", 1358 "early_overrun_err:\t%u\t\t\t%u\n",
1244 le32_to_cpu(ht->crc32_err)); 1359 le32_to_cpu(ht->early_overrun_err),
1245 pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n", 1360 accum_ht->early_overrun_err);
1246 le32_to_cpu(ht->mh_format_err)); 1361 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
1247 pos += scnprintf(buf + pos, bufsz - pos, "agg_crc32_good: %u\n", 1362 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good);
1248 le32_to_cpu(ht->agg_crc32_good)); 1363 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
1249 pos += scnprintf(buf + pos, bufsz - pos, "agg_mpdu_cnt: %u\n", 1364 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err);
1250 le32_to_cpu(ht->agg_mpdu_cnt)); 1365 pos += scnprintf(buf + pos, bufsz - pos,
1251 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt: %u\n", 1366 "mh_format_err:\t\t%u\t\t\t%u\n",
1252 le32_to_cpu(ht->agg_cnt)); 1367 le32_to_cpu(ht->mh_format_err),
1368 accum_ht->mh_format_err);
1369 pos += scnprintf(buf + pos, bufsz - pos,
1370 "agg_crc32_good:\t\t%u\t\t\t%u\n",
1371 le32_to_cpu(ht->agg_crc32_good),
1372 accum_ht->agg_crc32_good);
1373 pos += scnprintf(buf + pos, bufsz - pos,
1374 "agg_mpdu_cnt:\t\t%u\t\t\t%u\n",
1375 le32_to_cpu(ht->agg_mpdu_cnt),
1376 accum_ht->agg_mpdu_cnt);
1377 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
1378 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
1253 1379
1254 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1380 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1255 kfree(buf); 1381 kfree(buf);
@@ -1265,7 +1391,7 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1265 char *buf; 1391 char *buf;
1266 int bufsz = (sizeof(struct statistics_tx) * 24) + 250; 1392 int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
1267 ssize_t ret; 1393 ssize_t ret;
1268 struct statistics_tx *tx; 1394 struct statistics_tx *tx, *accum_tx;
1269 1395
1270 if (!iwl_is_alive(priv)) 1396 if (!iwl_is_alive(priv))
1271 return -EAGAIN; 1397 return -EAGAIN;
@@ -1291,62 +1417,107 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1291 * might not reflect the current uCode activity 1417 * might not reflect the current uCode activity
1292 */ 1418 */
1293 tx = &priv->statistics.tx; 1419 tx = &priv->statistics.tx;
1420 accum_tx = &priv->accum_statistics.tx;
1294 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1421 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1295 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n"); 1422 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n");
1296 pos += scnprintf(buf + pos, bufsz - pos, "preamble: %u\n", 1423 pos += scnprintf(buf + pos, bufsz - pos,
1297 le32_to_cpu(tx->preamble_cnt)); 1424 "\t\t\tcurrent\t\t\taccumulative\n");
1298 pos += scnprintf(buf + pos, bufsz - pos, "rx_detected_cnt: %u\n", 1425 pos += scnprintf(buf + pos, bufsz - pos, "preamble:\t\t\t%u\t\t\t%u\n",
1299 le32_to_cpu(tx->rx_detected_cnt)); 1426 le32_to_cpu(tx->preamble_cnt),
1300 pos += scnprintf(buf + pos, bufsz - pos, "bt_prio_defer_cnt: %u\n", 1427 accum_tx->preamble_cnt);
1301 le32_to_cpu(tx->bt_prio_defer_cnt)); 1428 pos += scnprintf(buf + pos, bufsz - pos,
1302 pos += scnprintf(buf + pos, bufsz - pos, "bt_prio_kill_cnt: %u\n", 1429 "rx_detected_cnt:\t\t%u\t\t\t%u\n",
1303 le32_to_cpu(tx->bt_prio_kill_cnt)); 1430 le32_to_cpu(tx->rx_detected_cnt),
1304 pos += scnprintf(buf + pos, bufsz - pos, "few_bytes_cnt: %u\n", 1431 accum_tx->rx_detected_cnt);
1305 le32_to_cpu(tx->few_bytes_cnt)); 1432 pos += scnprintf(buf + pos, bufsz - pos,
1306 pos += scnprintf(buf + pos, bufsz - pos, "cts_timeout: %u\n", 1433 "bt_prio_defer_cnt:\t\t%u\t\t\t%u\n",
1307 le32_to_cpu(tx->cts_timeout)); 1434 le32_to_cpu(tx->bt_prio_defer_cnt),
1308 pos += scnprintf(buf + pos, bufsz - pos, "ack_timeout: %u\n", 1435 accum_tx->bt_prio_defer_cnt);
1309 le32_to_cpu(tx->ack_timeout)); 1436 pos += scnprintf(buf + pos, bufsz - pos,
1310 pos += scnprintf(buf + pos, bufsz - pos, "expected_ack_cnt: %u\n", 1437 "bt_prio_kill_cnt:\t\t%u\t\t\t%u\n",
1311 le32_to_cpu(tx->expected_ack_cnt)); 1438 le32_to_cpu(tx->bt_prio_kill_cnt),
1312 pos += scnprintf(buf + pos, bufsz - pos, "actual_ack_cnt: %u\n", 1439 accum_tx->bt_prio_kill_cnt);
1313 le32_to_cpu(tx->actual_ack_cnt)); 1440 pos += scnprintf(buf + pos, bufsz - pos,
1314 pos += scnprintf(buf + pos, bufsz - pos, "dump_msdu_cnt: %u\n", 1441 "few_bytes_cnt:\t\t\t%u\t\t\t%u\n",
1315 le32_to_cpu(tx->dump_msdu_cnt)); 1442 le32_to_cpu(tx->few_bytes_cnt),
1316 pos += scnprintf(buf + pos, bufsz - pos, 1443 accum_tx->few_bytes_cnt);
1317 "burst_abort_next_frame_mismatch_cnt: %u\n", 1444 pos += scnprintf(buf + pos, bufsz - pos,
1318 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt)); 1445 "cts_timeout:\t\t\t%u\t\t\t%u\n",
1319 pos += scnprintf(buf + pos, bufsz - pos, 1446 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout);
1320 "burst_abort_missing_next_frame_cnt: %u\n", 1447 pos += scnprintf(buf + pos, bufsz - pos,
1321 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt)); 1448 "ack_timeout:\t\t\t%u\t\t\t%u\n",
1322 pos += scnprintf(buf + pos, bufsz - pos, "cts_timeout_collision: %u\n", 1449 le32_to_cpu(tx->ack_timeout),
1323 le32_to_cpu(tx->cts_timeout_collision)); 1450 accum_tx->ack_timeout);
1324 pos += scnprintf(buf + pos, bufsz - pos, 1451 pos += scnprintf(buf + pos, bufsz - pos,
1325 "ack_or_ba_timeout_collision: %u\n", 1452 "expected_ack_cnt:\t\t%u\t\t\t%u\n",
1326 le32_to_cpu(tx->ack_or_ba_timeout_collision)); 1453 le32_to_cpu(tx->expected_ack_cnt),
1327 pos += scnprintf(buf + pos, bufsz - pos, "agg ba_timeout: %u\n", 1454 accum_tx->expected_ack_cnt);
1328 le32_to_cpu(tx->agg.ba_timeout)); 1455 pos += scnprintf(buf + pos, bufsz - pos,
1329 pos += scnprintf(buf + pos, bufsz - pos, 1456 "actual_ack_cnt:\t\t\t%u\t\t\t%u\n",
1330 "agg ba_reschedule_frames: %u\n", 1457 le32_to_cpu(tx->actual_ack_cnt),
1331 le32_to_cpu(tx->agg.ba_reschedule_frames)); 1458 accum_tx->actual_ack_cnt);
1332 pos += scnprintf(buf + pos, bufsz - pos, 1459 pos += scnprintf(buf + pos, bufsz - pos,
1333 "agg scd_query_agg_frame_cnt: %u\n", 1460 "dump_msdu_cnt:\t\t\t%u\t\t\t%u\n",
1334 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt)); 1461 le32_to_cpu(tx->dump_msdu_cnt),
1335 pos += scnprintf(buf + pos, bufsz - pos, "agg scd_query_no_agg: %u\n", 1462 accum_tx->dump_msdu_cnt);
1336 le32_to_cpu(tx->agg.scd_query_no_agg)); 1463 pos += scnprintf(buf + pos, bufsz - pos,
1337 pos += scnprintf(buf + pos, bufsz - pos, "agg scd_query_agg: %u\n", 1464 "abort_nxt_frame_mismatch:"
1338 le32_to_cpu(tx->agg.scd_query_agg)); 1465 "\t%u\t\t\t%u\n",
1339 pos += scnprintf(buf + pos, bufsz - pos, 1466 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
1340 "agg scd_query_mismatch: %u\n", 1467 accum_tx->burst_abort_next_frame_mismatch_cnt);
1341 le32_to_cpu(tx->agg.scd_query_mismatch)); 1468 pos += scnprintf(buf + pos, bufsz - pos,
1342 pos += scnprintf(buf + pos, bufsz - pos, "agg frame_not_ready: %u\n", 1469 "abort_missing_nxt_frame:"
1343 le32_to_cpu(tx->agg.frame_not_ready)); 1470 "\t%u\t\t\t%u\n",
1344 pos += scnprintf(buf + pos, bufsz - pos, "agg underrun: %u\n", 1471 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
1345 le32_to_cpu(tx->agg.underrun)); 1472 accum_tx->burst_abort_missing_next_frame_cnt);
1346 pos += scnprintf(buf + pos, bufsz - pos, "agg bt_prio_kill: %u\n", 1473 pos += scnprintf(buf + pos, bufsz - pos,
1347 le32_to_cpu(tx->agg.bt_prio_kill)); 1474 "cts_timeout_collision:\t\t%u\t\t\t%u\n",
1348 pos += scnprintf(buf + pos, bufsz - pos, "agg rx_ba_rsp_cnt: %u\n", 1475 le32_to_cpu(tx->cts_timeout_collision),
1349 le32_to_cpu(tx->agg.rx_ba_rsp_cnt)); 1476 accum_tx->cts_timeout_collision);
1477 pos += scnprintf(buf + pos, bufsz - pos,
1478 "ack_ba_timeout_collision:\t%u\t\t\t%u\n",
1479 le32_to_cpu(tx->ack_or_ba_timeout_collision),
1480 accum_tx->ack_or_ba_timeout_collision);
1481 pos += scnprintf(buf + pos, bufsz - pos,
1482 "agg ba_timeout:\t\t\t%u\t\t\t%u\n",
1483 le32_to_cpu(tx->agg.ba_timeout),
1484 accum_tx->agg.ba_timeout);
1485 pos += scnprintf(buf + pos, bufsz - pos,
1486 "agg ba_resched_frames:\t\t%u\t\t\t%u\n",
1487 le32_to_cpu(tx->agg.ba_reschedule_frames),
1488 accum_tx->agg.ba_reschedule_frames);
1489 pos += scnprintf(buf + pos, bufsz - pos,
1490 "agg scd_query_agg_frame:\t%u\t\t\t%u\n",
1491 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
1492 accum_tx->agg.scd_query_agg_frame_cnt);
1493 pos += scnprintf(buf + pos, bufsz - pos,
1494 "agg scd_query_no_agg:\t\t%u\t\t\t%u\n",
1495 le32_to_cpu(tx->agg.scd_query_no_agg),
1496 accum_tx->agg.scd_query_no_agg);
1497 pos += scnprintf(buf + pos, bufsz - pos,
1498 "agg scd_query_agg:\t\t%u\t\t\t%u\n",
1499 le32_to_cpu(tx->agg.scd_query_agg),
1500 accum_tx->agg.scd_query_agg);
1501 pos += scnprintf(buf + pos, bufsz - pos,
1502 "agg scd_query_mismatch:\t\t%u\t\t\t%u\n",
1503 le32_to_cpu(tx->agg.scd_query_mismatch),
1504 accum_tx->agg.scd_query_mismatch);
1505 pos += scnprintf(buf + pos, bufsz - pos,
1506 "agg frame_not_ready:\t\t%u\t\t\t%u\n",
1507 le32_to_cpu(tx->agg.frame_not_ready),
1508 accum_tx->agg.frame_not_ready);
1509 pos += scnprintf(buf + pos, bufsz - pos,
1510 "agg underrun:\t\t\t%u\t\t\t%u\n",
1511 le32_to_cpu(tx->agg.underrun),
1512 accum_tx->agg.underrun);
1513 pos += scnprintf(buf + pos, bufsz - pos,
1514 "agg bt_prio_kill:\t\t%u\t\t\t%u\n",
1515 le32_to_cpu(tx->agg.bt_prio_kill),
1516 accum_tx->agg.bt_prio_kill);
1517 pos += scnprintf(buf + pos, bufsz - pos,
1518 "agg rx_ba_rsp_cnt:\t\t%u\t\t\t%u\n",
1519 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
1520 accum_tx->agg.rx_ba_rsp_cnt);
1350 1521
1351 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1522 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1352 kfree(buf); 1523 kfree(buf);
@@ -1362,9 +1533,9 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1362 char *buf; 1533 char *buf;
1363 int bufsz = sizeof(struct statistics_general) * 4 + 250; 1534 int bufsz = sizeof(struct statistics_general) * 4 + 250;
1364 ssize_t ret; 1535 ssize_t ret;
1365 struct statistics_general *general; 1536 struct statistics_general *general, *accum_general;
1366 struct statistics_dbg *dbg; 1537 struct statistics_dbg *dbg, *accum_dbg;
1367 struct statistics_div *div; 1538 struct statistics_div *div, *accum_div;
1368 1539
1369 if (!iwl_is_alive(priv)) 1540 if (!iwl_is_alive(priv))
1370 return -EAGAIN; 1541 return -EAGAIN;
@@ -1392,34 +1563,53 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1392 general = &priv->statistics.general; 1563 general = &priv->statistics.general;
1393 dbg = &priv->statistics.general.dbg; 1564 dbg = &priv->statistics.general.dbg;
1394 div = &priv->statistics.general.div; 1565 div = &priv->statistics.general.div;
1566 accum_general = &priv->accum_statistics.general;
1567 accum_dbg = &priv->accum_statistics.general.dbg;
1568 accum_div = &priv->accum_statistics.general.div;
1395 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1569 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1396 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n"); 1570 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n");
1397 pos += scnprintf(buf + pos, bufsz - pos, "temperature: %u\n", 1571 pos += scnprintf(buf + pos, bufsz - pos,
1572 "\t\t\tcurrent\t\t\taccumulative\n");
1573 pos += scnprintf(buf + pos, bufsz - pos, "temperature:\t\t\t%u\n",
1398 le32_to_cpu(general->temperature)); 1574 le32_to_cpu(general->temperature));
1399 pos += scnprintf(buf + pos, bufsz - pos, "temperature_m: %u\n", 1575 pos += scnprintf(buf + pos, bufsz - pos, "temperature_m:\t\t\t%u\n",
1400 le32_to_cpu(general->temperature_m)); 1576 le32_to_cpu(general->temperature_m));
1401 pos += scnprintf(buf + pos, bufsz - pos, "burst_check: %u\n", 1577 pos += scnprintf(buf + pos, bufsz - pos,
1402 le32_to_cpu(dbg->burst_check)); 1578 "burst_check:\t\t\t%u\t\t\t%u\n",
1403 pos += scnprintf(buf + pos, bufsz - pos, "burst_count: %u\n", 1579 le32_to_cpu(dbg->burst_check),
1404 le32_to_cpu(dbg->burst_count)); 1580 accum_dbg->burst_check);
1405 pos += scnprintf(buf + pos, bufsz - pos, "sleep_time: %u\n", 1581 pos += scnprintf(buf + pos, bufsz - pos,
1406 le32_to_cpu(general->sleep_time)); 1582 "burst_count:\t\t\t%u\t\t\t%u\n",
1407 pos += scnprintf(buf + pos, bufsz - pos, "slots_out: %u\n", 1583 le32_to_cpu(dbg->burst_count),
1408 le32_to_cpu(general->slots_out)); 1584 accum_dbg->burst_count);
1409 pos += scnprintf(buf + pos, bufsz - pos, "slots_idle: %u\n", 1585 pos += scnprintf(buf + pos, bufsz - pos,
1410 le32_to_cpu(general->slots_idle)); 1586 "sleep_time:\t\t\t%u\t\t\t%u\n",
1411 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp: %u\n", 1587 le32_to_cpu(general->sleep_time),
1588 accum_general->sleep_time);
1589 pos += scnprintf(buf + pos, bufsz - pos,
1590 "slots_out:\t\t\t%u\t\t\t%u\n",
1591 le32_to_cpu(general->slots_out),
1592 accum_general->slots_out);
1593 pos += scnprintf(buf + pos, bufsz - pos,
1594 "slots_idle:\t\t\t%u\t\t\t%u\n",
1595 le32_to_cpu(general->slots_idle),
1596 accum_general->slots_idle);
1597 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
1412 le32_to_cpu(general->ttl_timestamp)); 1598 le32_to_cpu(general->ttl_timestamp));
1413 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a: %u\n", 1599 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a:\t\t\t%u\t\t\t%u\n",
1414 le32_to_cpu(div->tx_on_a)); 1600 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a);
1415 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b: %u\n", 1601 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b:\t\t\t%u\t\t\t%u\n",
1416 le32_to_cpu(div->tx_on_b)); 1602 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b);
1417 pos += scnprintf(buf + pos, bufsz - pos, "exec_time: %u\n", 1603 pos += scnprintf(buf + pos, bufsz - pos,
1418 le32_to_cpu(div->exec_time)); 1604 "exec_time:\t\t\t%u\t\t\t%u\n",
1419 pos += scnprintf(buf + pos, bufsz - pos, "probe_time: %u\n", 1605 le32_to_cpu(div->exec_time), accum_div->exec_time);
1420 le32_to_cpu(div->probe_time)); 1606 pos += scnprintf(buf + pos, bufsz - pos,
1421 pos += scnprintf(buf + pos, bufsz - pos, "rx_enable_counter: %u\n", 1607 "probe_time:\t\t\t%u\t\t\t%u\n",
1422 le32_to_cpu(general->rx_enable_counter)); 1608 le32_to_cpu(div->probe_time), accum_div->probe_time);
1609 pos += scnprintf(buf + pos, bufsz - pos,
1610 "rx_enable_counter:\t\t%u\t\t\t%u\n",
1611 le32_to_cpu(general->rx_enable_counter),
1612 accum_general->rx_enable_counter);
1423 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1613 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1424 kfree(buf); 1614 kfree(buf);
1425 return ret; 1615 return ret;
@@ -1615,6 +1805,29 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
1615 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1805 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1616} 1806}
1617 1807
1808static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
1809 char __user *user_buf,
1810 size_t count, loff_t *ppos)
1811{
1812 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1813 char buf[60];
1814 int pos = 0;
1815 const size_t bufsz = sizeof(buf);
1816 u32 pwrsave_status;
1817
1818 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1819 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1820
1821 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1822 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1823 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1824 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1825 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1826 "error");
1827
1828 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1829}
1830
1618DEBUGFS_READ_WRITE_FILE_OPS(rx_statistics); 1831DEBUGFS_READ_WRITE_FILE_OPS(rx_statistics);
1619DEBUGFS_READ_WRITE_FILE_OPS(tx_statistics); 1832DEBUGFS_READ_WRITE_FILE_OPS(tx_statistics);
1620DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 1833DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1626,6 +1839,7 @@ DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1626DEBUGFS_READ_FILE_OPS(sensitivity); 1839DEBUGFS_READ_FILE_OPS(sensitivity);
1627DEBUGFS_READ_FILE_OPS(chain_noise); 1840DEBUGFS_READ_FILE_OPS(chain_noise);
1628DEBUGFS_READ_FILE_OPS(tx_power); 1841DEBUGFS_READ_FILE_OPS(tx_power);
1842DEBUGFS_READ_FILE_OPS(power_save_status);
1629 1843
1630/* 1844/*
1631 * Create the debugfs files and directories 1845 * Create the debugfs files and directories
@@ -1673,6 +1887,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1673 DEBUGFS_ADD_FILE(rx_queue, debug); 1887 DEBUGFS_ADD_FILE(rx_queue, debug);
1674 DEBUGFS_ADD_FILE(tx_queue, debug); 1888 DEBUGFS_ADD_FILE(tx_queue, debug);
1675 DEBUGFS_ADD_FILE(tx_power, debug); 1889 DEBUGFS_ADD_FILE(tx_power, debug);
1890 DEBUGFS_ADD_FILE(power_save_status, debug);
1676 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 1891 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1677 DEBUGFS_ADD_FILE(ucode_rx_stats, debug); 1892 DEBUGFS_ADD_FILE(ucode_rx_stats, debug);
1678 DEBUGFS_ADD_FILE(ucode_tx_stats, debug); 1893 DEBUGFS_ADD_FILE(ucode_tx_stats, debug);
@@ -1725,6 +1940,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1725 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue); 1940 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
1726 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue); 1941 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
1727 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power); 1942 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
1943 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_power_save_status);
1728 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 1944 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1729 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. 1945 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1730 file_ucode_rx_stats); 1946 file_ucode_rx_stats);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 72946c144be7..e7ce67387662 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -85,8 +85,6 @@ extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
85 __le32 *tx_flags); 85 __le32 *tx_flags);
86extern int iwl5000_calc_rssi(struct iwl_priv *priv, 86extern int iwl5000_calc_rssi(struct iwl_priv *priv,
87 struct iwl_rx_phy_res *rx_resp); 87 struct iwl_rx_phy_res *rx_resp);
88extern int iwl5000_apm_init(struct iwl_priv *priv);
89extern int iwl5000_apm_reset(struct iwl_priv *priv);
90extern void iwl5000_nic_config(struct iwl_priv *priv); 88extern void iwl5000_nic_config(struct iwl_priv *priv);
91extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv); 89extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
92extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, 90extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
@@ -147,12 +145,13 @@ extern void iwl5000_temperature(struct iwl_priv *priv);
147#define DEFAULT_LONG_RETRY_LIMIT 4U 145#define DEFAULT_LONG_RETRY_LIMIT 4U
148 146
149struct iwl_rx_mem_buffer { 147struct iwl_rx_mem_buffer {
150 dma_addr_t real_dma_addr; 148 dma_addr_t page_dma;
151 dma_addr_t aligned_dma_addr; 149 struct page *page;
152 struct sk_buff *skb;
153 struct list_head list; 150 struct list_head list;
154}; 151};
155 152
153#define rxb_addr(r) page_address(r->page)
154
156/* defined below */ 155/* defined below */
157struct iwl_device_cmd; 156struct iwl_device_cmd;
158 157
@@ -168,7 +167,7 @@ struct iwl_cmd_meta {
168 */ 167 */
169 void (*callback)(struct iwl_priv *priv, 168 void (*callback)(struct iwl_priv *priv,
170 struct iwl_device_cmd *cmd, 169 struct iwl_device_cmd *cmd,
171 struct sk_buff *skb); 170 struct iwl_rx_packet *pkt);
172 171
173 /* The CMD_SIZE_HUGE flag bit indicates that the command 172 /* The CMD_SIZE_HUGE flag bit indicates that the command
174 * structure is stored at the end of the shared queue memory. */ 173 * structure is stored at the end of the shared queue memory. */
@@ -324,6 +323,12 @@ struct iwl_channel_info {
324 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ 323 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
325#define IWL_MIN_NUM_QUEUES 10 324#define IWL_MIN_NUM_QUEUES 10
326 325
326/*
327 * uCode queue management definitions ...
328 * Queue #4 is the command queue for 3945/4965/5x00/1000/6x00.
329 */
330#define IWL_CMD_QUEUE_NUM 4
331
327/* Power management (not Tx power) structures */ 332/* Power management (not Tx power) structures */
328 333
329enum iwl_pwr_src { 334enum iwl_pwr_src {
@@ -359,7 +364,14 @@ enum {
359 CMD_WANT_SKB = (1 << 2), 364 CMD_WANT_SKB = (1 << 2),
360}; 365};
361 366
362#define IWL_CMD_MAX_PAYLOAD 320 367#define DEF_CMD_PAYLOAD_SIZE 320
368
369/*
370 * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
371 * SNAP header and alignment. It should also be big enough for 802.11
372 * control frames.
373 */
374#define IWL_LINK_HDR_MAX 64
363 375
364/** 376/**
365 * struct iwl_device_cmd 377 * struct iwl_device_cmd
@@ -376,7 +388,8 @@ struct iwl_device_cmd {
376 u16 val16; 388 u16 val16;
377 u32 val32; 389 u32 val32;
378 struct iwl_tx_cmd tx; 390 struct iwl_tx_cmd tx;
379 u8 payload[IWL_CMD_MAX_PAYLOAD]; 391 struct iwl6000_channel_switch_cmd chswitch;
392 u8 payload[DEF_CMD_PAYLOAD_SIZE];
380 } __attribute__ ((packed)) cmd; 393 } __attribute__ ((packed)) cmd;
381} __attribute__ ((packed)); 394} __attribute__ ((packed));
382 395
@@ -385,21 +398,15 @@ struct iwl_device_cmd {
385 398
386struct iwl_host_cmd { 399struct iwl_host_cmd {
387 const void *data; 400 const void *data;
388 struct sk_buff *reply_skb; 401 unsigned long reply_page;
389 void (*callback)(struct iwl_priv *priv, 402 void (*callback)(struct iwl_priv *priv,
390 struct iwl_device_cmd *cmd, 403 struct iwl_device_cmd *cmd,
391 struct sk_buff *skb); 404 struct iwl_rx_packet *pkt);
392 u32 flags; 405 u32 flags;
393 u16 len; 406 u16 len;
394 u8 id; 407 u8 id;
395}; 408};
396 409
397/*
398 * RX related structures and functions
399 */
400#define RX_FREE_BUFFERS 64
401#define RX_LOW_WATERMARK 8
402
403#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 410#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
404#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 411#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
405#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 412#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
@@ -563,6 +570,19 @@ struct iwl_station_entry {
563 struct iwl_hw_key keyinfo; 570 struct iwl_hw_key keyinfo;
564}; 571};
565 572
573/*
574 * iwl_station_priv: Driver's private station information
575 *
576 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
577 * in the structure for use by driver. This structure is places in that
578 * space.
579 *
580 * At the moment use it for the station's rate scaling information.
581 */
582struct iwl_station_priv {
583 struct iwl_lq_sta lq_sta;
584};
585
566/* one for each uCode image (inst/data, boot/init/runtime) */ 586/* one for each uCode image (inst/data, boot/init/runtime) */
567struct fw_desc { 587struct fw_desc {
568 void *v_addr; /* access by driver */ 588 void *v_addr; /* access by driver */
@@ -624,6 +644,10 @@ struct iwl_sensitivity_ranges {
624 u16 auto_corr_max_cck_mrc; 644 u16 auto_corr_max_cck_mrc;
625 u16 auto_corr_min_cck; 645 u16 auto_corr_min_cck;
626 u16 auto_corr_min_cck_mrc; 646 u16 auto_corr_min_cck_mrc;
647
648 u16 barker_corr_th_min;
649 u16 barker_corr_th_min_mrc;
650 u16 nrg_th_cca;
627}; 651};
628 652
629 653
@@ -641,7 +665,7 @@ struct iwl_sensitivity_ranges {
641 * @valid_tx/rx_ant: usable antennas 665 * @valid_tx/rx_ant: usable antennas
642 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) 666 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
643 * @max_rxq_log: Log-base-2 of max_rxq_size 667 * @max_rxq_log: Log-base-2 of max_rxq_size
644 * @rx_buf_size: Rx buffer size 668 * @rx_page_order: Rx buffer page order
645 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR 669 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
646 * @max_stations: 670 * @max_stations:
647 * @bcast_sta_id: 671 * @bcast_sta_id:
@@ -664,9 +688,8 @@ struct iwl_hw_params {
664 u8 valid_rx_ant; 688 u8 valid_rx_ant;
665 u16 max_rxq_size; 689 u16 max_rxq_size;
666 u16 max_rxq_log; 690 u16 max_rxq_log;
667 u32 rx_buf_size; 691 u32 rx_page_order;
668 u32 rx_wrt_ptr_reg; 692 u32 rx_wrt_ptr_reg;
669 u32 max_pkt_size;
670 u8 max_stations; 693 u8 max_stations;
671 u8 bcast_sta_id; 694 u8 bcast_sta_id;
672 u8 ht40_channel; 695 u8 ht40_channel;
@@ -713,7 +736,11 @@ static inline int iwl_queue_used(const struct iwl_queue *q, int i)
713 736
714static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) 737static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
715{ 738{
716 /* This is for scan command, the big buffer at end of command array */ 739 /*
740 * This is for init calibration result and scan command which
741 * required buffer > TFD_MAX_PAYLOAD_SIZE,
742 * the big buffer at end of command array
743 */
717 if (is_huge) 744 if (is_huge)
718 return q->n_window; /* must be power of 2 */ 745 return q->n_window; /* must be power of 2 */
719 746
@@ -845,6 +872,10 @@ struct iwl_sensitivity_data {
845 s32 nrg_auto_corr_silence_diff; 872 s32 nrg_auto_corr_silence_diff;
846 u32 num_in_cck_no_fa; 873 u32 num_in_cck_no_fa;
847 u32 nrg_th_ofdm; 874 u32 nrg_th_ofdm;
875
876 u16 barker_corr_th_min;
877 u16 barker_corr_th_min_mrc;
878 u16 nrg_th_cca;
848}; 879};
849 880
850/* Chain noise (differential Rx gain) calib data */ 881/* Chain noise (differential Rx gain) calib data */
@@ -961,8 +992,6 @@ struct traffic_stats {
961}; 992};
962#endif 993#endif
963 994
964#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
965
966struct iwl_priv { 995struct iwl_priv {
967 996
968 /* ieee device used by generic ieee processing code */ 997 /* ieee device used by generic ieee processing code */
@@ -976,7 +1005,7 @@ struct iwl_priv {
976 int frames_count; 1005 int frames_count;
977 1006
978 enum ieee80211_band band; 1007 enum ieee80211_band band;
979 int alloc_rxb_skb; 1008 int alloc_rxb_page;
980 1009
981 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 1010 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
982 struct iwl_rx_mem_buffer *rxb); 1011 struct iwl_rx_mem_buffer *rxb);
@@ -1081,7 +1110,6 @@ struct iwl_priv {
1081 u8 last_phy_res[100]; 1110 u8 last_phy_res[100];
1082 1111
1083 /* Rate scaling data */ 1112 /* Rate scaling data */
1084 s8 data_retry_limit;
1085 u8 retry_rate; 1113 u8 retry_rate;
1086 1114
1087 wait_queue_head_t wait_command_queue; 1115 wait_queue_head_t wait_command_queue;
@@ -1090,7 +1118,7 @@ struct iwl_priv {
1090 1118
1091 /* Rx and Tx DMA processing queues */ 1119 /* Rx and Tx DMA processing queues */
1092 struct iwl_rx_queue rxq; 1120 struct iwl_rx_queue rxq;
1093 struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES]; 1121 struct iwl_tx_queue *txq;
1094 unsigned long txq_ctx_active_msk; 1122 unsigned long txq_ctx_active_msk;
1095 struct iwl_dma_ptr kw; /* keep warm address */ 1123 struct iwl_dma_ptr kw; /* keep warm address */
1096 struct iwl_dma_ptr scd_bc_tbls; 1124 struct iwl_dma_ptr scd_bc_tbls;
@@ -1113,7 +1141,9 @@ struct iwl_priv {
1113 struct iwl_tt_mgmt thermal_throttle; 1141 struct iwl_tt_mgmt thermal_throttle;
1114 1142
1115 struct iwl_notif_statistics statistics; 1143 struct iwl_notif_statistics statistics;
1116 unsigned long last_statistics_time; 1144#ifdef CONFIG_IWLWIFI_DEBUG
1145 struct iwl_notif_statistics accum_statistics;
1146#endif
1117 1147
1118 /* context information */ 1148 /* context information */
1119 u16 rates_mask; 1149 u16 rates_mask;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 2e8c40576d22..9429cb1c69bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -358,6 +358,14 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
358 udelay(5); 358 udelay(5);
359 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, 359 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
360 APMG_PS_CTRL_VAL_RESET_REQ); 360 APMG_PS_CTRL_VAL_RESET_REQ);
361
362 /*
363 * CSR auto clock gate disable bit -
364 * this is only applicable for HW with OTP shadow RAM
365 */
366 if (priv->cfg->shadow_ram_support)
367 iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG,
368 CSR_RESET_LINK_PWR_MGMT_DISABLED);
361 } 369 }
362 return ret; 370 return ret;
363} 371}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index b363c96fd6c6..5ba5a4e9e49a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -261,9 +261,12 @@ struct iwl_eeprom_enhanced_txpwr {
261/* 1000 Specific */ 261/* 1000 Specific */
262#define EEPROM_1000_EEPROM_VERSION (0x15C) 262#define EEPROM_1000_EEPROM_VERSION (0x15C)
263 263
264/* 60x0 Specific */ 264/* 6x00 Specific */
265#define EEPROM_6000_EEPROM_VERSION (0x434) 265#define EEPROM_6000_EEPROM_VERSION (0x434)
266 266
267/* 6x50 Specific */
268#define EEPROM_6050_EEPROM_VERSION (0x532)
269
267/* OTP */ 270/* OTP */
268/* lower blocks contain EEPROM image and calibration data */ 271/* lower blocks contain EEPROM image and calibration data */
269#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ 272#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 532c8d6cd8da..f2a60dc4109f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -92,6 +92,8 @@ const char *get_cmd_string(u8 cmd)
92 IWL_CMD(CALIBRATION_RES_NOTIFICATION); 92 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
93 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); 93 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
94 IWL_CMD(REPLY_TX_POWER_DBM_CMD); 94 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
95 IWL_CMD(TEMPERATURE_NOTIFICATION);
96 IWL_CMD(TX_ANT_CONFIGURATION_CMD);
95 default: 97 default:
96 return "UNKNOWN"; 98 return "UNKNOWN";
97 99
@@ -103,17 +105,8 @@ EXPORT_SYMBOL(get_cmd_string);
103 105
104static void iwl_generic_cmd_callback(struct iwl_priv *priv, 106static void iwl_generic_cmd_callback(struct iwl_priv *priv,
105 struct iwl_device_cmd *cmd, 107 struct iwl_device_cmd *cmd,
106 struct sk_buff *skb) 108 struct iwl_rx_packet *pkt)
107{ 109{
108 struct iwl_rx_packet *pkt = NULL;
109
110 if (!skb) {
111 IWL_ERR(priv, "Error: Response NULL in %s.\n",
112 get_cmd_string(cmd->hdr.cmd));
113 return;
114 }
115
116 pkt = (struct iwl_rx_packet *)skb->data;
117 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 110 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
118 IWL_ERR(priv, "Bad return from %s (0x%08X)\n", 111 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
119 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 112 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
@@ -215,7 +208,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
215 ret = -EIO; 208 ret = -EIO;
216 goto fail; 209 goto fail;
217 } 210 }
218 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_skb) { 211 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
219 IWL_ERR(priv, "Error: Response NULL in '%s'\n", 212 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
220 get_cmd_string(cmd->id)); 213 get_cmd_string(cmd->id));
221 ret = -EIO; 214 ret = -EIO;
@@ -237,9 +230,9 @@ cancel:
237 ~CMD_WANT_SKB; 230 ~CMD_WANT_SKB;
238 } 231 }
239fail: 232fail:
240 if (cmd->reply_skb) { 233 if (cmd->reply_page) {
241 dev_kfree_skb_any(cmd->reply_skb); 234 free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
242 cmd->reply_skb = NULL; 235 cmd->reply_page = 0;
243 } 236 }
244out: 237out:
245 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status); 238 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
@@ -272,7 +265,7 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
272 u8 id, u16 len, const void *data, 265 u8 id, u16 len, const void *data,
273 void (*callback)(struct iwl_priv *priv, 266 void (*callback)(struct iwl_priv *priv,
274 struct iwl_device_cmd *cmd, 267 struct iwl_device_cmd *cmd,
275 struct sk_buff *skb)) 268 struct iwl_rx_packet *pkt))
276{ 269{
277 struct iwl_host_cmd cmd = { 270 struct iwl_host_cmd cmd = {
278 .id = id, 271 .id = id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 9c6b14952061..9bce2c1625e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(no_sleep_autoadjust,
66 66
67struct iwl_power_vec_entry { 67struct iwl_power_vec_entry {
68 struct iwl_powertable_cmd cmd; 68 struct iwl_powertable_cmd cmd;
69 u8 no_dtim; 69 u8 no_dtim; /* number of skip dtim */
70}; 70};
71 71
72#define IWL_DTIM_RANGE_0_MAX 2 72#define IWL_DTIM_RANGE_0_MAX 2
@@ -83,8 +83,9 @@ struct iwl_power_vec_entry {
83 cpu_to_le32(X4)} 83 cpu_to_le32(X4)}
84/* default power management (not Tx power) table values */ 84/* default power management (not Tx power) table values */
85/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */ 85/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
86/* DTIM 0 - 2 */
86static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = { 87static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
87 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 88 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 89 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
89 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0}, 90 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
90 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1}, 91 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
@@ -93,15 +94,17 @@ static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
93 94
94 95
95/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ 96/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
97/* DTIM 3 - 10 */
96static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = { 98static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
97 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, 99 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
98 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, 100 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
99 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0}, 101 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
100 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1}, 102 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
101 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2} 103 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
102}; 104};
103 105
104/* for DTIM period > IWL_DTIM_RANGE_1_MAX */ 106/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
107/* DTIM 11 - */
105static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = { 108static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
106 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, 109 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
107 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, 110 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
@@ -115,13 +118,15 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
115 enum iwl_power_level lvl, int period) 118 enum iwl_power_level lvl, int period)
116{ 119{
117 const struct iwl_power_vec_entry *table; 120 const struct iwl_power_vec_entry *table;
118 int max_sleep, i; 121 int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
119 bool skip; 122 int i;
123 u8 skip;
124 u32 slp_itrvl;
120 125
121 table = range_2; 126 table = range_2;
122 if (period < IWL_DTIM_RANGE_1_MAX) 127 if (period <= IWL_DTIM_RANGE_1_MAX)
123 table = range_1; 128 table = range_1;
124 if (period < IWL_DTIM_RANGE_0_MAX) 129 if (period <= IWL_DTIM_RANGE_0_MAX)
125 table = range_0; 130 table = range_0;
126 131
127 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM); 132 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
@@ -129,34 +134,60 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
129 *cmd = table[lvl].cmd; 134 *cmd = table[lvl].cmd;
130 135
131 if (period == 0) { 136 if (period == 0) {
132 skip = false; 137 skip = 0;
133 period = 1; 138 period = 1;
139 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
140 max_sleep[i] = 1;
141
134 } else { 142 } else {
135 skip = !!table[lvl].no_dtim; 143 skip = table[lvl].no_dtim;
144 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
145 max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
146 max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
136 } 147 }
137 148
138 if (skip) { 149 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
139 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]; 150 /* figure out the listen interval based on dtim period and skip */
140 max_sleep = le32_to_cpu(slp_itrvl); 151 if (slp_itrvl == 0xFF)
141 if (max_sleep == 0xFF) 152 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
142 max_sleep = period * (skip + 1); 153 cpu_to_le32(period * (skip + 1));
143 else if (max_sleep > period) 154
144 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period; 155 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
156 if (slp_itrvl > period)
157 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
158 cpu_to_le32((slp_itrvl / period) * period);
159
160 if (skip)
145 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; 161 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
146 } else { 162 else
147 max_sleep = period;
148 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; 163 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
149 }
150 164
151 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) 165 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
152 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) 166 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
153 cmd->sleep_interval[i] = cpu_to_le32(max_sleep); 167 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
168 cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
169
170 /* enforce max sleep interval */
171 for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
172 if (le32_to_cpu(cmd->sleep_interval[i]) >
173 (max_sleep[i] * period))
174 cmd->sleep_interval[i] =
175 cpu_to_le32(max_sleep[i] * period);
176 if (i != (IWL_POWER_VEC_SIZE - 1)) {
177 if (le32_to_cpu(cmd->sleep_interval[i]) >
178 le32_to_cpu(cmd->sleep_interval[i+1]))
179 cmd->sleep_interval[i] =
180 cmd->sleep_interval[i+1];
181 }
182 }
154 183
155 if (priv->power_data.pci_pm) 184 if (priv->power_data.pci_pm)
156 cmd->flags |= IWL_POWER_PCI_PM_MSK; 185 cmd->flags |= IWL_POWER_PCI_PM_MSK;
157 else 186 else
158 cmd->flags &= ~IWL_POWER_PCI_PM_MSK; 187 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
159 188
189 IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
190 skip, period);
160 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); 191 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
161} 192}
162 193
@@ -862,9 +893,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
862 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); 893 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
863 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); 894 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
864 895
865 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 896 if (priv->cfg->adv_thermal_throttle) {
866 case CSR_HW_REV_TYPE_6x00:
867 case CSR_HW_REV_TYPE_6x50:
868 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n"); 897 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
869 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) * 898 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
870 IWL_TI_STATE_MAX, GFP_KERNEL); 899 IWL_TI_STATE_MAX, GFP_KERNEL);
@@ -897,11 +926,9 @@ void iwl_tt_initialize(struct iwl_priv *priv)
897 &restriction_range[0], size); 926 &restriction_range[0], size);
898 priv->thermal_throttle.advanced_tt = true; 927 priv->thermal_throttle.advanced_tt = true;
899 } 928 }
900 break; 929 } else {
901 default:
902 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n"); 930 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
903 priv->thermal_throttle.advanced_tt = false; 931 priv->thermal_throttle.advanced_tt = false;
904 break;
905 } 932 }
906} 933}
907EXPORT_SYMBOL(iwl_tt_initialize); 934EXPORT_SYMBOL(iwl_tt_initialize);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 493626bcd3ec..e5339c9ad13e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -200,7 +200,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
200 list_del(element); 200 list_del(element);
201 201
202 /* Point to Rx buffer via next RBD in circular buffer */ 202 /* Point to Rx buffer via next RBD in circular buffer */
203 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr); 203 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
204 rxq->queue[rxq->write] = rxb; 204 rxq->queue[rxq->write] = rxb;
205 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 205 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
206 rxq->free_count--; 206 rxq->free_count--;
@@ -239,8 +239,9 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
239 struct iwl_rx_queue *rxq = &priv->rxq; 239 struct iwl_rx_queue *rxq = &priv->rxq;
240 struct list_head *element; 240 struct list_head *element;
241 struct iwl_rx_mem_buffer *rxb; 241 struct iwl_rx_mem_buffer *rxb;
242 struct sk_buff *skb; 242 struct page *page;
243 unsigned long flags; 243 unsigned long flags;
244 gfp_t gfp_mask = priority;
244 245
245 while (1) { 246 while (1) {
246 spin_lock_irqsave(&rxq->lock, flags); 247 spin_lock_irqsave(&rxq->lock, flags);
@@ -251,30 +252,35 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
251 spin_unlock_irqrestore(&rxq->lock, flags); 252 spin_unlock_irqrestore(&rxq->lock, flags);
252 253
253 if (rxq->free_count > RX_LOW_WATERMARK) 254 if (rxq->free_count > RX_LOW_WATERMARK)
254 priority |= __GFP_NOWARN; 255 gfp_mask |= __GFP_NOWARN;
255 /* Alloc a new receive buffer */ 256
256 skb = alloc_skb(priv->hw_params.rx_buf_size + 256, 257 if (priv->hw_params.rx_page_order > 0)
257 priority); 258 gfp_mask |= __GFP_COMP;
258 259
259 if (!skb) { 260 /* Alloc a new receive buffer */
261 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
262 if (!page) {
260 if (net_ratelimit()) 263 if (net_ratelimit())
261 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); 264 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
265 "order: %d\n",
266 priv->hw_params.rx_page_order);
267
262 if ((rxq->free_count <= RX_LOW_WATERMARK) && 268 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
263 net_ratelimit()) 269 net_ratelimit())
264 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", 270 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
265 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", 271 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
266 rxq->free_count); 272 rxq->free_count);
267 /* We don't reschedule replenish work here -- we will 273 /* We don't reschedule replenish work here -- we will
268 * call the restock method and if it still needs 274 * call the restock method and if it still needs
269 * more buffers it will schedule replenish */ 275 * more buffers it will schedule replenish */
270 break; 276 return;
271 } 277 }
272 278
273 spin_lock_irqsave(&rxq->lock, flags); 279 spin_lock_irqsave(&rxq->lock, flags);
274 280
275 if (list_empty(&rxq->rx_used)) { 281 if (list_empty(&rxq->rx_used)) {
276 spin_unlock_irqrestore(&rxq->lock, flags); 282 spin_unlock_irqrestore(&rxq->lock, flags);
277 dev_kfree_skb_any(skb); 283 __free_pages(page, priv->hw_params.rx_page_order);
278 return; 284 return;
279 } 285 }
280 element = rxq->rx_used.next; 286 element = rxq->rx_used.next;
@@ -283,24 +289,21 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
283 289
284 spin_unlock_irqrestore(&rxq->lock, flags); 290 spin_unlock_irqrestore(&rxq->lock, flags);
285 291
286 rxb->skb = skb; 292 rxb->page = page;
287 /* Get physical address of RB/SKB */ 293 /* Get physical address of the RB */
288 rxb->real_dma_addr = pci_map_single( 294 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
289 priv->pci_dev, 295 PAGE_SIZE << priv->hw_params.rx_page_order,
290 rxb->skb->data, 296 PCI_DMA_FROMDEVICE);
291 priv->hw_params.rx_buf_size + 256,
292 PCI_DMA_FROMDEVICE);
293 /* dma address must be no more than 36 bits */ 297 /* dma address must be no more than 36 bits */
294 BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36)); 298 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
295 /* and also 256 byte aligned! */ 299 /* and also 256 byte aligned! */
296 rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256); 300 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
297 skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
298 301
299 spin_lock_irqsave(&rxq->lock, flags); 302 spin_lock_irqsave(&rxq->lock, flags);
300 303
301 list_add_tail(&rxb->list, &rxq->rx_free); 304 list_add_tail(&rxb->list, &rxq->rx_free);
302 rxq->free_count++; 305 rxq->free_count++;
303 priv->alloc_rxb_skb++; 306 priv->alloc_rxb_page++;
304 307
305 spin_unlock_irqrestore(&rxq->lock, flags); 308 spin_unlock_irqrestore(&rxq->lock, flags);
306 } 309 }
@@ -336,12 +339,14 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
336{ 339{
337 int i; 340 int i;
338 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 341 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
339 if (rxq->pool[i].skb != NULL) { 342 if (rxq->pool[i].page != NULL) {
340 pci_unmap_single(priv->pci_dev, 343 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
341 rxq->pool[i].real_dma_addr, 344 PAGE_SIZE << priv->hw_params.rx_page_order,
342 priv->hw_params.rx_buf_size + 256, 345 PCI_DMA_FROMDEVICE);
343 PCI_DMA_FROMDEVICE); 346 __free_pages(rxq->pool[i].page,
344 dev_kfree_skb(rxq->pool[i].skb); 347 priv->hw_params.rx_page_order);
348 rxq->pool[i].page = NULL;
349 priv->alloc_rxb_page--;
345 } 350 }
346 } 351 }
347 352
@@ -405,14 +410,14 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
405 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 410 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
406 /* In the reset function, these buffers may have been allocated 411 /* In the reset function, these buffers may have been allocated
407 * to an SKB, so we need to unmap and free potential storage */ 412 * to an SKB, so we need to unmap and free potential storage */
408 if (rxq->pool[i].skb != NULL) { 413 if (rxq->pool[i].page != NULL) {
409 pci_unmap_single(priv->pci_dev, 414 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
410 rxq->pool[i].real_dma_addr, 415 PAGE_SIZE << priv->hw_params.rx_page_order,
411 priv->hw_params.rx_buf_size + 256, 416 PCI_DMA_FROMDEVICE);
412 PCI_DMA_FROMDEVICE); 417 priv->alloc_rxb_page--;
413 priv->alloc_rxb_skb--; 418 __free_pages(rxq->pool[i].page,
414 dev_kfree_skb(rxq->pool[i].skb); 419 priv->hw_params.rx_page_order);
415 rxq->pool[i].skb = NULL; 420 rxq->pool[i].page = NULL;
416 } 421 }
417 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 422 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
418 } 423 }
@@ -491,7 +496,7 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
491 struct iwl_rx_mem_buffer *rxb) 496 struct iwl_rx_mem_buffer *rxb)
492 497
493{ 498{
494 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 499 struct iwl_rx_packet *pkt = rxb_addr(rxb);
495 struct iwl_missed_beacon_notif *missed_beacon; 500 struct iwl_missed_beacon_notif *missed_beacon;
496 501
497 missed_beacon = &pkt->u.missed_beacon; 502 missed_beacon = &pkt->u.missed_beacon;
@@ -548,13 +553,51 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
548 priv->last_rx_noise); 553 priv->last_rx_noise);
549} 554}
550 555
556#ifdef CONFIG_IWLWIFI_DEBUG
557/*
558 * based on the assumption of all statistics counter are in DWORD
559 * FIXME: This function is for debugging, do not deal with
560 * the case of counters roll-over.
561 */
562static void iwl_accumulative_statistics(struct iwl_priv *priv,
563 __le32 *stats)
564{
565 int i;
566 __le32 *prev_stats;
567 u32 *accum_stats;
568
569 prev_stats = (__le32 *)&priv->statistics;
570 accum_stats = (u32 *)&priv->accum_statistics;
571
572 for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
573 i += sizeof(__le32), stats++, prev_stats++, accum_stats++)
574 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats))
575 *accum_stats += (le32_to_cpu(*stats) -
576 le32_to_cpu(*prev_stats));
577
578 /* reset accumulative statistics for "no-counter" type statistics */
579 priv->accum_statistics.general.temperature =
580 priv->statistics.general.temperature;
581 priv->accum_statistics.general.temperature_m =
582 priv->statistics.general.temperature_m;
583 priv->accum_statistics.general.ttl_timestamp =
584 priv->statistics.general.ttl_timestamp;
585 priv->accum_statistics.tx.tx_power.ant_a =
586 priv->statistics.tx.tx_power.ant_a;
587 priv->accum_statistics.tx.tx_power.ant_b =
588 priv->statistics.tx.tx_power.ant_b;
589 priv->accum_statistics.tx.tx_power.ant_c =
590 priv->statistics.tx.tx_power.ant_c;
591}
592#endif
593
551#define REG_RECALIB_PERIOD (60) 594#define REG_RECALIB_PERIOD (60)
552 595
553void iwl_rx_statistics(struct iwl_priv *priv, 596void iwl_rx_statistics(struct iwl_priv *priv,
554 struct iwl_rx_mem_buffer *rxb) 597 struct iwl_rx_mem_buffer *rxb)
555{ 598{
556 int change; 599 int change;
557 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 600 struct iwl_rx_packet *pkt = rxb_addr(rxb);
558 601
559 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 602 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
560 (int)sizeof(priv->statistics), 603 (int)sizeof(priv->statistics),
@@ -566,6 +609,9 @@ void iwl_rx_statistics(struct iwl_priv *priv,
566 STATISTICS_REPLY_FLG_HT40_MODE_MSK) != 609 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
567 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); 610 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
568 611
612#ifdef CONFIG_IWLWIFI_DEBUG
613 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
614#endif
569 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); 615 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
570 616
571 set_bit(STATUS_STATISTICS, &priv->status); 617 set_bit(STATUS_STATISTICS, &priv->status);
@@ -582,9 +628,6 @@ void iwl_rx_statistics(struct iwl_priv *priv,
582 iwl_rx_calc_noise(priv); 628 iwl_rx_calc_noise(priv);
583 queue_work(priv->workqueue, &priv->run_time_calib_work); 629 queue_work(priv->workqueue, &priv->run_time_calib_work);
584 } 630 }
585
586 iwl_leds_background(priv);
587
588 if (priv->cfg->ops->lib->temp_ops.temperature && change) 631 if (priv->cfg->ops->lib->temp_ops.temperature && change)
589 priv->cfg->ops->lib->temp_ops.temperature(priv); 632 priv->cfg->ops->lib->temp_ops.temperature(priv);
590} 633}
@@ -878,6 +921,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
878 struct iwl_rx_mem_buffer *rxb, 921 struct iwl_rx_mem_buffer *rxb,
879 struct ieee80211_rx_status *stats) 922 struct ieee80211_rx_status *stats)
880{ 923{
924 struct sk_buff *skb;
925 int ret = 0;
926 __le16 fc = hdr->frame_control;
927
881 /* We only process data packets if the interface is open */ 928 /* We only process data packets if the interface is open */
882 if (unlikely(!priv->is_open)) { 929 if (unlikely(!priv->is_open)) {
883 IWL_DEBUG_DROP_LIMIT(priv, 930 IWL_DEBUG_DROP_LIMIT(priv,
@@ -890,15 +937,43 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
890 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 937 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
891 return; 938 return;
892 939
893 /* Resize SKB from mac header to end of packet */ 940 skb = alloc_skb(IWL_LINK_HDR_MAX, GFP_ATOMIC);
894 skb_reserve(rxb->skb, (void *)hdr - (void *)rxb->skb->data); 941 if (!skb) {
895 skb_put(rxb->skb, len); 942 IWL_ERR(priv, "alloc_skb failed\n");
943 return;
944 }
945
946 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
947
948 /* mac80211 currently doesn't support paged SKB. Convert it to
949 * linear SKB for management frame and data frame requires
950 * software decryption or software defragementation. */
951 if (ieee80211_is_mgmt(fc) ||
952 ieee80211_has_protected(fc) ||
953 ieee80211_has_morefrags(fc) ||
954 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
955 ret = skb_linearize(skb);
956 else
957 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
958 0 : -ENOMEM;
959
960 if (ret) {
961 kfree_skb(skb);
962 goto out;
963 }
964
965 /*
966 * XXX: We cannot touch the page and its virtual memory (hdr) after
967 * here. It might have already been freed by the above skb change.
968 */
969
970 iwl_update_stats(priv, false, fc, len);
971 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
896 972
897 iwl_update_stats(priv, false, hdr->frame_control, len); 973 ieee80211_rx(priv->hw, skb);
898 memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats)); 974 out:
899 ieee80211_rx_irqsafe(priv->hw, rxb->skb); 975 priv->alloc_rxb_page--;
900 priv->alloc_rxb_skb--; 976 rxb->page = NULL;
901 rxb->skb = NULL;
902} 977}
903 978
904/* This is necessary only for a number of statistics, see the caller. */ 979/* This is necessary only for a number of statistics, see the caller. */
@@ -926,7 +1001,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
926{ 1001{
927 struct ieee80211_hdr *header; 1002 struct ieee80211_hdr *header;
928 struct ieee80211_rx_status rx_status; 1003 struct ieee80211_rx_status rx_status;
929 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1004 struct iwl_rx_packet *pkt = rxb_addr(rxb);
930 struct iwl_rx_phy_res *phy_res; 1005 struct iwl_rx_phy_res *phy_res;
931 __le32 rx_pkt_status; 1006 __le32 rx_pkt_status;
932 struct iwl4965_rx_mpdu_res_start *amsdu; 1007 struct iwl4965_rx_mpdu_res_start *amsdu;
@@ -1087,7 +1162,7 @@ EXPORT_SYMBOL(iwl_rx_reply_rx);
1087void iwl_rx_reply_rx_phy(struct iwl_priv *priv, 1162void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1088 struct iwl_rx_mem_buffer *rxb) 1163 struct iwl_rx_mem_buffer *rxb)
1089{ 1164{
1090 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1165 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1091 priv->last_phy_res[0] = 1; 1166 priv->last_phy_res[0] = 1;
1092 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), 1167 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1093 sizeof(struct iwl_rx_phy_res)); 1168 sizeof(struct iwl_rx_phy_res));
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 41f9a0621250..4fca65a2fe9c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(iwl_scan_cancel_timeout);
111static int iwl_send_scan_abort(struct iwl_priv *priv) 111static int iwl_send_scan_abort(struct iwl_priv *priv)
112{ 112{
113 int ret = 0; 113 int ret = 0;
114 struct iwl_rx_packet *res; 114 struct iwl_rx_packet *pkt;
115 struct iwl_host_cmd cmd = { 115 struct iwl_host_cmd cmd = {
116 .id = REPLY_SCAN_ABORT_CMD, 116 .id = REPLY_SCAN_ABORT_CMD,
117 .flags = CMD_WANT_SKB, 117 .flags = CMD_WANT_SKB,
@@ -131,21 +131,21 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
131 return ret; 131 return ret;
132 } 132 }
133 133
134 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 134 pkt = (struct iwl_rx_packet *)cmd.reply_page;
135 if (res->u.status != CAN_ABORT_STATUS) { 135 if (pkt->u.status != CAN_ABORT_STATUS) {
136 /* The scan abort will return 1 for success or 136 /* The scan abort will return 1 for success or
137 * 2 for "failure". A failure condition can be 137 * 2 for "failure". A failure condition can be
138 * due to simply not being in an active scan which 138 * due to simply not being in an active scan which
139 * can occur if we send the scan abort before we 139 * can occur if we send the scan abort before we
140 * the microcode has notified us that a scan is 140 * the microcode has notified us that a scan is
141 * completed. */ 141 * completed. */
142 IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", res->u.status); 142 IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", pkt->u.status);
143 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 143 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
144 clear_bit(STATUS_SCAN_HW, &priv->status); 144 clear_bit(STATUS_SCAN_HW, &priv->status);
145 } 145 }
146 146
147 priv->alloc_rxb_skb--; 147 priv->alloc_rxb_page--;
148 dev_kfree_skb_any(cmd.reply_skb); 148 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
149 149
150 return ret; 150 return ret;
151} 151}
@@ -155,7 +155,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
155 struct iwl_rx_mem_buffer *rxb) 155 struct iwl_rx_mem_buffer *rxb)
156{ 156{
157#ifdef CONFIG_IWLWIFI_DEBUG 157#ifdef CONFIG_IWLWIFI_DEBUG
158 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 158 struct iwl_rx_packet *pkt = rxb_addr(rxb);
159 struct iwl_scanreq_notification *notif = 159 struct iwl_scanreq_notification *notif =
160 (struct iwl_scanreq_notification *)pkt->u.raw; 160 (struct iwl_scanreq_notification *)pkt->u.raw;
161 161
@@ -167,7 +167,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
167static void iwl_rx_scan_start_notif(struct iwl_priv *priv, 167static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
168 struct iwl_rx_mem_buffer *rxb) 168 struct iwl_rx_mem_buffer *rxb)
169{ 169{
170 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 170 struct iwl_rx_packet *pkt = rxb_addr(rxb);
171 struct iwl_scanstart_notification *notif = 171 struct iwl_scanstart_notification *notif =
172 (struct iwl_scanstart_notification *)pkt->u.raw; 172 (struct iwl_scanstart_notification *)pkt->u.raw;
173 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); 173 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
@@ -186,7 +186,7 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
186 struct iwl_rx_mem_buffer *rxb) 186 struct iwl_rx_mem_buffer *rxb)
187{ 187{
188#ifdef CONFIG_IWLWIFI_DEBUG 188#ifdef CONFIG_IWLWIFI_DEBUG
189 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 189 struct iwl_rx_packet *pkt = rxb_addr(rxb);
190 struct iwl_scanresults_notification *notif = 190 struct iwl_scanresults_notification *notif =
191 (struct iwl_scanresults_notification *)pkt->u.raw; 191 (struct iwl_scanresults_notification *)pkt->u.raw;
192 192
@@ -213,7 +213,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
213 struct iwl_rx_mem_buffer *rxb) 213 struct iwl_rx_mem_buffer *rxb)
214{ 214{
215#ifdef CONFIG_IWLWIFI_DEBUG 215#ifdef CONFIG_IWLWIFI_DEBUG
216 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
217 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 217 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
218 218
219 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 219 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
index 022bcf115731..1ea5cd345fe8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
@@ -177,7 +177,7 @@ static int iwl_get_measurement(struct iwl_priv *priv,
177static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 177static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb) 178 struct iwl_rx_mem_buffer *rxb)
179{ 179{
180 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 180 struct iwl_rx_packet *pkt = rxb_addr(rxb);
181 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); 181 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
182 182
183 if (!report->state) { 183 if (!report->state) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index c6633fec8216..dc74c16d36a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -99,32 +99,25 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
99 99
100static void iwl_add_sta_callback(struct iwl_priv *priv, 100static void iwl_add_sta_callback(struct iwl_priv *priv,
101 struct iwl_device_cmd *cmd, 101 struct iwl_device_cmd *cmd,
102 struct sk_buff *skb) 102 struct iwl_rx_packet *pkt)
103{ 103{
104 struct iwl_rx_packet *res = NULL;
105 struct iwl_addsta_cmd *addsta = 104 struct iwl_addsta_cmd *addsta =
106 (struct iwl_addsta_cmd *)cmd->cmd.payload; 105 (struct iwl_addsta_cmd *)cmd->cmd.payload;
107 u8 sta_id = addsta->sta.sta_id; 106 u8 sta_id = addsta->sta.sta_id;
108 107
109 if (!skb) { 108 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
110 IWL_ERR(priv, "Error: Response NULL in REPLY_ADD_STA.\n");
111 return;
112 }
113
114 res = (struct iwl_rx_packet *)skb->data;
115 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
116 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 109 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
117 res->hdr.flags); 110 pkt->hdr.flags);
118 return; 111 return;
119 } 112 }
120 113
121 switch (res->u.add_sta.status) { 114 switch (pkt->u.add_sta.status) {
122 case ADD_STA_SUCCESS_MSK: 115 case ADD_STA_SUCCESS_MSK:
123 iwl_sta_ucode_activate(priv, sta_id); 116 iwl_sta_ucode_activate(priv, sta_id);
124 /* fall through */ 117 /* fall through */
125 default: 118 default:
126 IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", 119 IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
127 res->u.add_sta.status); 120 pkt->u.add_sta.status);
128 break; 121 break;
129 } 122 }
130} 123}
@@ -132,7 +125,7 @@ static void iwl_add_sta_callback(struct iwl_priv *priv,
132int iwl_send_add_sta(struct iwl_priv *priv, 125int iwl_send_add_sta(struct iwl_priv *priv,
133 struct iwl_addsta_cmd *sta, u8 flags) 126 struct iwl_addsta_cmd *sta, u8 flags)
134{ 127{
135 struct iwl_rx_packet *res = NULL; 128 struct iwl_rx_packet *pkt = NULL;
136 int ret = 0; 129 int ret = 0;
137 u8 data[sizeof(*sta)]; 130 u8 data[sizeof(*sta)];
138 struct iwl_host_cmd cmd = { 131 struct iwl_host_cmd cmd = {
@@ -152,15 +145,15 @@ int iwl_send_add_sta(struct iwl_priv *priv,
152 if (ret || (flags & CMD_ASYNC)) 145 if (ret || (flags & CMD_ASYNC))
153 return ret; 146 return ret;
154 147
155 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 148 pkt = (struct iwl_rx_packet *)cmd.reply_page;
156 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 149 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
157 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 150 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
158 res->hdr.flags); 151 pkt->hdr.flags);
159 ret = -EIO; 152 ret = -EIO;
160 } 153 }
161 154
162 if (ret == 0) { 155 if (ret == 0) {
163 switch (res->u.add_sta.status) { 156 switch (pkt->u.add_sta.status) {
164 case ADD_STA_SUCCESS_MSK: 157 case ADD_STA_SUCCESS_MSK:
165 iwl_sta_ucode_activate(priv, sta->sta.sta_id); 158 iwl_sta_ucode_activate(priv, sta->sta.sta_id);
166 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); 159 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
@@ -172,8 +165,8 @@ int iwl_send_add_sta(struct iwl_priv *priv,
172 } 165 }
173 } 166 }
174 167
175 priv->alloc_rxb_skb--; 168 priv->alloc_rxb_page--;
176 dev_kfree_skb_any(cmd.reply_skb); 169 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
177 170
178 return ret; 171 return ret;
179} 172}
@@ -324,26 +317,19 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
324 317
325static void iwl_remove_sta_callback(struct iwl_priv *priv, 318static void iwl_remove_sta_callback(struct iwl_priv *priv,
326 struct iwl_device_cmd *cmd, 319 struct iwl_device_cmd *cmd,
327 struct sk_buff *skb) 320 struct iwl_rx_packet *pkt)
328{ 321{
329 struct iwl_rx_packet *res = NULL;
330 struct iwl_rem_sta_cmd *rm_sta = 322 struct iwl_rem_sta_cmd *rm_sta =
331 (struct iwl_rem_sta_cmd *)cmd->cmd.payload; 323 (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
332 const char *addr = rm_sta->addr; 324 const char *addr = rm_sta->addr;
333 325
334 if (!skb) { 326 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
335 IWL_ERR(priv, "Error: Response NULL in REPLY_REMOVE_STA.\n");
336 return;
337 }
338
339 res = (struct iwl_rx_packet *)skb->data;
340 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
341 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 327 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
342 res->hdr.flags); 328 pkt->hdr.flags);
343 return; 329 return;
344 } 330 }
345 331
346 switch (res->u.rem_sta.status) { 332 switch (pkt->u.rem_sta.status) {
347 case REM_STA_SUCCESS_MSK: 333 case REM_STA_SUCCESS_MSK:
348 iwl_sta_ucode_deactivate(priv, addr); 334 iwl_sta_ucode_deactivate(priv, addr);
349 break; 335 break;
@@ -356,7 +342,7 @@ static void iwl_remove_sta_callback(struct iwl_priv *priv,
356static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr, 342static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
357 u8 flags) 343 u8 flags)
358{ 344{
359 struct iwl_rx_packet *res = NULL; 345 struct iwl_rx_packet *pkt;
360 int ret; 346 int ret;
361 347
362 struct iwl_rem_sta_cmd rm_sta_cmd; 348 struct iwl_rem_sta_cmd rm_sta_cmd;
@@ -381,15 +367,15 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
381 if (ret || (flags & CMD_ASYNC)) 367 if (ret || (flags & CMD_ASYNC))
382 return ret; 368 return ret;
383 369
384 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 370 pkt = (struct iwl_rx_packet *)cmd.reply_page;
385 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 371 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
386 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 372 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
387 res->hdr.flags); 373 pkt->hdr.flags);
388 ret = -EIO; 374 ret = -EIO;
389 } 375 }
390 376
391 if (!ret) { 377 if (!ret) {
392 switch (res->u.rem_sta.status) { 378 switch (pkt->u.rem_sta.status) {
393 case REM_STA_SUCCESS_MSK: 379 case REM_STA_SUCCESS_MSK:
394 iwl_sta_ucode_deactivate(priv, addr); 380 iwl_sta_ucode_deactivate(priv, addr);
395 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 381 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
@@ -401,8 +387,8 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
401 } 387 }
402 } 388 }
403 389
404 priv->alloc_rxb_skb--; 390 priv->alloc_rxb_page--;
405 dev_kfree_skb_any(cmd.reply_skb); 391 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
406 392
407 return ret; 393 return ret;
408} 394}
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index c832ba085dba..8ae4c9b614e7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -131,7 +131,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
131 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 131 struct iwl_tx_queue *txq = &priv->txq[txq_id];
132 struct iwl_queue *q = &txq->q; 132 struct iwl_queue *q = &txq->q;
133 struct pci_dev *dev = priv->pci_dev; 133 struct pci_dev *dev = priv->pci_dev;
134 int i, len; 134 int i;
135 135
136 if (q->n_bd == 0) 136 if (q->n_bd == 0)
137 return; 137 return;
@@ -141,8 +141,6 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
141 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) 141 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
142 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 142 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
143 143
144 len = sizeof(struct iwl_device_cmd) * q->n_window;
145
146 /* De-alloc array of command/tx buffers */ 144 /* De-alloc array of command/tx buffers */
147 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 145 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
148 kfree(txq->cmd[i]); 146 kfree(txq->cmd[i]);
@@ -180,14 +178,11 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
180 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 178 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
181 struct iwl_queue *q = &txq->q; 179 struct iwl_queue *q = &txq->q;
182 struct pci_dev *dev = priv->pci_dev; 180 struct pci_dev *dev = priv->pci_dev;
183 int i, len; 181 int i;
184 182
185 if (q->n_bd == 0) 183 if (q->n_bd == 0)
186 return; 184 return;
187 185
188 len = sizeof(struct iwl_device_cmd) * q->n_window;
189 len += IWL_MAX_SCAN_SIZE;
190
191 /* De-alloc array of command/tx buffers */ 186 /* De-alloc array of command/tx buffers */
192 for (i = 0; i <= TFD_CMD_SLOTS; i++) 187 for (i = 0; i <= TFD_CMD_SLOTS; i++)
193 kfree(txq->cmd[i]); 188 kfree(txq->cmd[i]);
@@ -405,15 +400,19 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
405 int txq_id; 400 int txq_id;
406 401
407 /* Tx queues */ 402 /* Tx queues */
408 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 403 if (priv->txq)
409 if (txq_id == IWL_CMD_QUEUE_NUM) 404 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
410 iwl_cmd_queue_free(priv); 405 txq_id++)
411 else 406 if (txq_id == IWL_CMD_QUEUE_NUM)
412 iwl_tx_queue_free(priv, txq_id); 407 iwl_cmd_queue_free(priv);
413 408 else
409 iwl_tx_queue_free(priv, txq_id);
414 iwl_free_dma_ptr(priv, &priv->kw); 410 iwl_free_dma_ptr(priv, &priv->kw);
415 411
416 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); 412 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
413
414 /* free tx queue structure */
415 iwl_free_txq_mem(priv);
417} 416}
418EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 417EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
419 418
@@ -445,6 +444,12 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
445 IWL_ERR(priv, "Keep Warm allocation failed\n"); 444 IWL_ERR(priv, "Keep Warm allocation failed\n");
446 goto error_kw; 445 goto error_kw;
447 } 446 }
447
448 /* allocate tx queue structure */
449 ret = iwl_alloc_txq_mem(priv);
450 if (ret)
451 goto error;
452
448 spin_lock_irqsave(&priv->lock, flags); 453 spin_lock_irqsave(&priv->lock, flags);
449 454
450 /* Turn off all Tx DMA fifos */ 455 /* Turn off all Tx DMA fifos */
@@ -581,9 +586,7 @@ static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
581 u8 rate_plcp; 586 u8 rate_plcp;
582 587
583 /* Set retry limit on DATA packets and Probe Responses*/ 588 /* Set retry limit on DATA packets and Probe Responses*/
584 if (priv->data_retry_limit != -1) 589 if (ieee80211_is_probe_resp(fc))
585 data_retry_limit = priv->data_retry_limit;
586 else if (ieee80211_is_probe_resp(fc))
587 data_retry_limit = 3; 590 data_retry_limit = 3;
588 else 591 else
589 data_retry_limit = IWL_DEFAULT_TX_RETRY; 592 data_retry_limit = IWL_DEFAULT_TX_RETRY;
@@ -1145,7 +1148,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1145 */ 1148 */
1146void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1149void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1147{ 1150{
1148 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1151 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1149 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1152 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1150 int txq_id = SEQ_TO_QUEUE(sequence); 1153 int txq_id = SEQ_TO_QUEUE(sequence);
1151 int index = SEQ_TO_INDEX(sequence); 1154 int index = SEQ_TO_INDEX(sequence);
@@ -1172,10 +1175,10 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1172 1175
1173 /* Input error checking is done when commands are added to queue. */ 1176 /* Input error checking is done when commands are added to queue. */
1174 if (meta->flags & CMD_WANT_SKB) { 1177 if (meta->flags & CMD_WANT_SKB) {
1175 meta->source->reply_skb = rxb->skb; 1178 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
1176 rxb->skb = NULL; 1179 rxb->page = NULL;
1177 } else if (meta->callback) 1180 } else if (meta->callback)
1178 meta->callback(priv, cmd, rxb->skb); 1181 meta->callback(priv, cmd, pkt);
1179 1182
1180 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); 1183 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1181 1184
@@ -1434,7 +1437,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1434void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, 1437void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1435 struct iwl_rx_mem_buffer *rxb) 1438 struct iwl_rx_mem_buffer *rxb)
1436{ 1439{
1437 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1440 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1438 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 1441 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1439 struct iwl_tx_queue *txq = NULL; 1442 struct iwl_tx_queue *txq = NULL;
1440 struct iwl_ht_agg *agg; 1443 struct iwl_ht_agg *agg;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index c347d6631d85..bfd7f497157f 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -88,7 +88,6 @@ MODULE_LICENSE("GPL");
88 88
89 /* module parameters */ 89 /* module parameters */
90struct iwl_mod_params iwl3945_mod_params = { 90struct iwl_mod_params iwl3945_mod_params = {
91 .num_of_queues = IWL39_NUM_QUEUES, /* Not used */
92 .sw_crypto = 1, 91 .sw_crypto = 1,
93 .restart_fw = 1, 92 .restart_fw = 1,
94 /* the rest are 0 by default */ 93 /* the rest are 0 by default */
@@ -366,13 +365,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
366 struct sk_buff *skb_frag, 365 struct sk_buff *skb_frag,
367 int sta_id) 366 int sta_id)
368{ 367{
369 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 368 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
370 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; 369 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
371 370
372 switch (keyinfo->alg) { 371 switch (keyinfo->alg) {
373 case ALG_CCMP: 372 case ALG_CCMP:
374 tx->sec_ctl = TX_CMD_SEC_CCM; 373 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
375 memcpy(tx->key, keyinfo->key, keyinfo->keylen); 374 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
376 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 375 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
377 break; 376 break;
378 377
@@ -380,13 +379,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
380 break; 379 break;
381 380
382 case ALG_WEP: 381 case ALG_WEP:
383 tx->sec_ctl = TX_CMD_SEC_WEP | 382 tx_cmd->sec_ctl = TX_CMD_SEC_WEP |
384 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 383 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
385 384
386 if (keyinfo->keylen == 13) 385 if (keyinfo->keylen == 13)
387 tx->sec_ctl |= TX_CMD_SEC_KEY128; 386 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
388 387
389 memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen); 388 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
390 389
391 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 390 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
392 "with key %d\n", info->control.hw_key->hw_key_idx); 391 "with key %d\n", info->control.hw_key->hw_key_idx);
@@ -406,12 +405,11 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
406 struct ieee80211_tx_info *info, 405 struct ieee80211_tx_info *info,
407 struct ieee80211_hdr *hdr, u8 std_id) 406 struct ieee80211_hdr *hdr, u8 std_id)
408{ 407{
409 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 408 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
410 __le32 tx_flags = tx->tx_flags; 409 __le32 tx_flags = tx_cmd->tx_flags;
411 __le16 fc = hdr->frame_control; 410 __le16 fc = hdr->frame_control;
412 u8 rc_flags = info->control.rates[0].flags;
413 411
414 tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 412 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
415 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 413 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
416 tx_flags |= TX_CMD_FLG_ACK_MSK; 414 tx_flags |= TX_CMD_FLG_ACK_MSK;
417 if (ieee80211_is_mgmt(fc)) 415 if (ieee80211_is_mgmt(fc))
@@ -424,25 +422,19 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
424 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 422 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
425 } 423 }
426 424
427 tx->sta_id = std_id; 425 tx_cmd->sta_id = std_id;
428 if (ieee80211_has_morefrags(fc)) 426 if (ieee80211_has_morefrags(fc))
429 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 427 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
430 428
431 if (ieee80211_is_data_qos(fc)) { 429 if (ieee80211_is_data_qos(fc)) {
432 u8 *qc = ieee80211_get_qos_ctl(hdr); 430 u8 *qc = ieee80211_get_qos_ctl(hdr);
433 tx->tid_tspec = qc[0] & 0xf; 431 tx_cmd->tid_tspec = qc[0] & 0xf;
434 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 432 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
435 } else { 433 } else {
436 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 434 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
437 } 435 }
438 436
439 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 437 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
440 tx_flags |= TX_CMD_FLG_RTS_MSK;
441 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
442 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
443 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
444 tx_flags |= TX_CMD_FLG_CTS_MSK;
445 }
446 438
447 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) 439 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
448 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 440 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
@@ -450,16 +442,16 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
450 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 442 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
451 if (ieee80211_is_mgmt(fc)) { 443 if (ieee80211_is_mgmt(fc)) {
452 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 444 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
453 tx->timeout.pm_frame_timeout = cpu_to_le16(3); 445 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
454 else 446 else
455 tx->timeout.pm_frame_timeout = cpu_to_le16(2); 447 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
456 } else { 448 } else {
457 tx->timeout.pm_frame_timeout = 0; 449 tx_cmd->timeout.pm_frame_timeout = 0;
458 } 450 }
459 451
460 tx->driver_txop = 0; 452 tx_cmd->driver_txop = 0;
461 tx->tx_flags = tx_flags; 453 tx_cmd->tx_flags = tx_flags;
462 tx->next_frame_len = 0; 454 tx_cmd->next_frame_len = 0;
463} 455}
464 456
465/* 457/*
@@ -469,7 +461,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
469{ 461{
470 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 462 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
471 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 463 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
472 struct iwl3945_tx_cmd *tx; 464 struct iwl3945_tx_cmd *tx_cmd;
473 struct iwl_tx_queue *txq = NULL; 465 struct iwl_tx_queue *txq = NULL;
474 struct iwl_queue *q = NULL; 466 struct iwl_queue *q = NULL;
475 struct iwl_device_cmd *out_cmd; 467 struct iwl_device_cmd *out_cmd;
@@ -568,9 +560,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
568 /* Init first empty entry in queue's array of Tx/cmd buffers */ 560 /* Init first empty entry in queue's array of Tx/cmd buffers */
569 out_cmd = txq->cmd[idx]; 561 out_cmd = txq->cmd[idx];
570 out_meta = &txq->meta[idx]; 562 out_meta = &txq->meta[idx];
571 tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; 563 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
572 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 564 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
573 memset(tx, 0, sizeof(*tx)); 565 memset(tx_cmd, 0, sizeof(*tx_cmd));
574 566
575 /* 567 /*
576 * Set up the Tx-command (not MAC!) header. 568 * Set up the Tx-command (not MAC!) header.
@@ -583,7 +575,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
583 INDEX_TO_SEQ(q->write_ptr))); 575 INDEX_TO_SEQ(q->write_ptr)));
584 576
585 /* Copy MAC header from skb into command buffer */ 577 /* Copy MAC header from skb into command buffer */
586 memcpy(tx->hdr, hdr, hdr_len); 578 memcpy(tx_cmd->hdr, hdr, hdr_len);
587 579
588 580
589 if (info->control.hw_key) 581 if (info->control.hw_key)
@@ -597,12 +589,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
597 589
598 /* Total # bytes to be transmitted */ 590 /* Total # bytes to be transmitted */
599 len = (u16)skb->len; 591 len = (u16)skb->len;
600 tx->len = cpu_to_le16(len); 592 tx_cmd->len = cpu_to_le16(len);
601 593
602 iwl_dbg_log_tx_data_frame(priv, len, hdr); 594 iwl_dbg_log_tx_data_frame(priv, len, hdr);
603 iwl_update_stats(priv, true, fc, len); 595 iwl_update_stats(priv, true, fc, len);
604 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 596 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
605 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 597 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
606 598
607 if (!ieee80211_has_morefrags(hdr->frame_control)) { 599 if (!ieee80211_has_morefrags(hdr->frame_control)) {
608 txq->need_update = 1; 600 txq->need_update = 1;
@@ -615,9 +607,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
615 607
616 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", 608 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
617 le16_to_cpu(out_cmd->hdr.sequence)); 609 le16_to_cpu(out_cmd->hdr.sequence));
618 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags)); 610 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
619 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx)); 611 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
620 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr, 612 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
621 ieee80211_hdrlen(fc)); 613 ieee80211_hdrlen(fc));
622 614
623 /* 615 /*
@@ -753,7 +745,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
753 u8 type) 745 u8 type)
754{ 746{
755 struct iwl_spectrum_cmd spectrum; 747 struct iwl_spectrum_cmd spectrum;
756 struct iwl_rx_packet *res; 748 struct iwl_rx_packet *pkt;
757 struct iwl_host_cmd cmd = { 749 struct iwl_host_cmd cmd = {
758 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 750 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
759 .data = (void *)&spectrum, 751 .data = (void *)&spectrum,
@@ -798,18 +790,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
798 if (rc) 790 if (rc)
799 return rc; 791 return rc;
800 792
801 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 793 pkt = (struct iwl_rx_packet *)cmd.reply_page;
802 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 794 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
803 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); 795 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
804 rc = -EIO; 796 rc = -EIO;
805 } 797 }
806 798
807 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); 799 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
808 switch (spectrum_resp_status) { 800 switch (spectrum_resp_status) {
809 case 0: /* Command will be handled */ 801 case 0: /* Command will be handled */
810 if (res->u.spectrum.id != 0xff) { 802 if (pkt->u.spectrum.id != 0xff) {
811 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", 803 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
812 res->u.spectrum.id); 804 pkt->u.spectrum.id);
813 priv->measurement_status &= ~MEASUREMENT_READY; 805 priv->measurement_status &= ~MEASUREMENT_READY;
814 } 806 }
815 priv->measurement_status |= MEASUREMENT_ACTIVE; 807 priv->measurement_status |= MEASUREMENT_ACTIVE;
@@ -821,7 +813,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
821 break; 813 break;
822 } 814 }
823 815
824 dev_kfree_skb_any(cmd.reply_skb); 816 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
825 817
826 return rc; 818 return rc;
827} 819}
@@ -830,7 +822,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
830static void iwl3945_rx_reply_alive(struct iwl_priv *priv, 822static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
831 struct iwl_rx_mem_buffer *rxb) 823 struct iwl_rx_mem_buffer *rxb)
832{ 824{
833 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 825 struct iwl_rx_packet *pkt = rxb_addr(rxb);
834 struct iwl_alive_resp *palive; 826 struct iwl_alive_resp *palive;
835 struct delayed_work *pwork; 827 struct delayed_work *pwork;
836 828
@@ -867,7 +859,7 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
867 struct iwl_rx_mem_buffer *rxb) 859 struct iwl_rx_mem_buffer *rxb)
868{ 860{
869#ifdef CONFIG_IWLWIFI_DEBUG 861#ifdef CONFIG_IWLWIFI_DEBUG
870 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 862 struct iwl_rx_packet *pkt = rxb_addr(rxb);
871#endif 863#endif
872 864
873 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 865 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
@@ -903,7 +895,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
903 struct iwl_rx_mem_buffer *rxb) 895 struct iwl_rx_mem_buffer *rxb)
904{ 896{
905#ifdef CONFIG_IWLWIFI_DEBUG 897#ifdef CONFIG_IWLWIFI_DEBUG
906 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 898 struct iwl_rx_packet *pkt = rxb_addr(rxb);
907 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 899 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
908 u8 rate = beacon->beacon_notify_hdr.rate; 900 u8 rate = beacon->beacon_notify_hdr.rate;
909 901
@@ -926,7 +918,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
926static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, 918static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
927 struct iwl_rx_mem_buffer *rxb) 919 struct iwl_rx_mem_buffer *rxb)
928{ 920{
929 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 921 struct iwl_rx_packet *pkt = rxb_addr(rxb);
930 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 922 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
931 unsigned long status = priv->status; 923 unsigned long status = priv->status;
932 924
@@ -1090,7 +1082,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
1090 list_del(element); 1082 list_del(element);
1091 1083
1092 /* Point to Rx buffer via next RBD in circular buffer */ 1084 /* Point to Rx buffer via next RBD in circular buffer */
1093 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr); 1085 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
1094 rxq->queue[rxq->write] = rxb; 1086 rxq->queue[rxq->write] = rxb;
1095 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 1087 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
1096 rxq->free_count--; 1088 rxq->free_count--;
@@ -1130,8 +1122,9 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1130 struct iwl_rx_queue *rxq = &priv->rxq; 1122 struct iwl_rx_queue *rxq = &priv->rxq;
1131 struct list_head *element; 1123 struct list_head *element;
1132 struct iwl_rx_mem_buffer *rxb; 1124 struct iwl_rx_mem_buffer *rxb;
1133 struct sk_buff *skb; 1125 struct page *page;
1134 unsigned long flags; 1126 unsigned long flags;
1127 gfp_t gfp_mask = priority;
1135 1128
1136 while (1) { 1129 while (1) {
1137 spin_lock_irqsave(&rxq->lock, flags); 1130 spin_lock_irqsave(&rxq->lock, flags);
@@ -1143,10 +1136,14 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1143 spin_unlock_irqrestore(&rxq->lock, flags); 1136 spin_unlock_irqrestore(&rxq->lock, flags);
1144 1137
1145 if (rxq->free_count > RX_LOW_WATERMARK) 1138 if (rxq->free_count > RX_LOW_WATERMARK)
1146 priority |= __GFP_NOWARN; 1139 gfp_mask |= __GFP_NOWARN;
1140
1141 if (priv->hw_params.rx_page_order > 0)
1142 gfp_mask |= __GFP_COMP;
1143
1147 /* Alloc a new receive buffer */ 1144 /* Alloc a new receive buffer */
1148 skb = alloc_skb(priv->hw_params.rx_buf_size, priority); 1145 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1149 if (!skb) { 1146 if (!page) {
1150 if (net_ratelimit()) 1147 if (net_ratelimit())
1151 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); 1148 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1152 if ((rxq->free_count <= RX_LOW_WATERMARK) && 1149 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
@@ -1163,7 +1160,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1163 spin_lock_irqsave(&rxq->lock, flags); 1160 spin_lock_irqsave(&rxq->lock, flags);
1164 if (list_empty(&rxq->rx_used)) { 1161 if (list_empty(&rxq->rx_used)) {
1165 spin_unlock_irqrestore(&rxq->lock, flags); 1162 spin_unlock_irqrestore(&rxq->lock, flags);
1166 dev_kfree_skb_any(skb); 1163 __free_pages(page, priv->hw_params.rx_page_order);
1167 return; 1164 return;
1168 } 1165 }
1169 element = rxq->rx_used.next; 1166 element = rxq->rx_used.next;
@@ -1171,26 +1168,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1171 list_del(element); 1168 list_del(element);
1172 spin_unlock_irqrestore(&rxq->lock, flags); 1169 spin_unlock_irqrestore(&rxq->lock, flags);
1173 1170
1174 rxb->skb = skb; 1171 rxb->page = page;
1175
1176 /* If radiotap head is required, reserve some headroom here.
1177 * The physical head count is a variable rx_stats->phy_count.
1178 * We reserve 4 bytes here. Plus these extra bytes, the
1179 * headroom of the physical head should be enough for the
1180 * radiotap head that iwl3945 supported. See iwl3945_rt.
1181 */
1182 skb_reserve(rxb->skb, 4);
1183
1184 /* Get physical address of RB/SKB */ 1172 /* Get physical address of RB/SKB */
1185 rxb->real_dma_addr = pci_map_single(priv->pci_dev, 1173 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1186 rxb->skb->data, 1174 PAGE_SIZE << priv->hw_params.rx_page_order,
1187 priv->hw_params.rx_buf_size, 1175 PCI_DMA_FROMDEVICE);
1188 PCI_DMA_FROMDEVICE);
1189 1176
1190 spin_lock_irqsave(&rxq->lock, flags); 1177 spin_lock_irqsave(&rxq->lock, flags);
1178
1191 list_add_tail(&rxb->list, &rxq->rx_free); 1179 list_add_tail(&rxb->list, &rxq->rx_free);
1192 priv->alloc_rxb_skb++;
1193 rxq->free_count++; 1180 rxq->free_count++;
1181 priv->alloc_rxb_page++;
1182
1194 spin_unlock_irqrestore(&rxq->lock, flags); 1183 spin_unlock_irqrestore(&rxq->lock, flags);
1195 } 1184 }
1196} 1185}
@@ -1206,14 +1195,14 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1206 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 1195 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1207 /* In the reset function, these buffers may have been allocated 1196 /* In the reset function, these buffers may have been allocated
1208 * to an SKB, so we need to unmap and free potential storage */ 1197 * to an SKB, so we need to unmap and free potential storage */
1209 if (rxq->pool[i].skb != NULL) { 1198 if (rxq->pool[i].page != NULL) {
1210 pci_unmap_single(priv->pci_dev, 1199 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1211 rxq->pool[i].real_dma_addr, 1200 PAGE_SIZE << priv->hw_params.rx_page_order,
1212 priv->hw_params.rx_buf_size, 1201 PCI_DMA_FROMDEVICE);
1213 PCI_DMA_FROMDEVICE); 1202 priv->alloc_rxb_page--;
1214 priv->alloc_rxb_skb--; 1203 __free_pages(rxq->pool[i].page,
1215 dev_kfree_skb(rxq->pool[i].skb); 1204 priv->hw_params.rx_page_order);
1216 rxq->pool[i].skb = NULL; 1205 rxq->pool[i].page = NULL;
1217 } 1206 }
1218 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1207 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1219 } 1208 }
@@ -1221,8 +1210,8 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1221 /* Set us so that we have processed and used all buffers, but have 1210 /* Set us so that we have processed and used all buffers, but have
1222 * not restocked the Rx queue with fresh buffers */ 1211 * not restocked the Rx queue with fresh buffers */
1223 rxq->read = rxq->write = 0; 1212 rxq->read = rxq->write = 0;
1224 rxq->free_count = 0;
1225 rxq->write_actual = 0; 1213 rxq->write_actual = 0;
1214 rxq->free_count = 0;
1226 spin_unlock_irqrestore(&rxq->lock, flags); 1215 spin_unlock_irqrestore(&rxq->lock, flags);
1227} 1216}
1228 1217
@@ -1255,12 +1244,14 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1255{ 1244{
1256 int i; 1245 int i;
1257 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 1246 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1258 if (rxq->pool[i].skb != NULL) { 1247 if (rxq->pool[i].page != NULL) {
1259 pci_unmap_single(priv->pci_dev, 1248 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1260 rxq->pool[i].real_dma_addr, 1249 PAGE_SIZE << priv->hw_params.rx_page_order,
1261 priv->hw_params.rx_buf_size, 1250 PCI_DMA_FROMDEVICE);
1262 PCI_DMA_FROMDEVICE); 1251 __free_pages(rxq->pool[i].page,
1263 dev_kfree_skb(rxq->pool[i].skb); 1252 priv->hw_params.rx_page_order);
1253 rxq->pool[i].page = NULL;
1254 priv->alloc_rxb_page--;
1264 } 1255 }
1265 } 1256 }
1266 1257
@@ -1376,7 +1367,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1376 i = rxq->read; 1367 i = rxq->read;
1377 1368
1378 /* calculate total frames need to be restock after handling RX */ 1369 /* calculate total frames need to be restock after handling RX */
1379 total_empty = r - priv->rxq.write_actual; 1370 total_empty = r - rxq->write_actual;
1380 if (total_empty < 0) 1371 if (total_empty < 0)
1381 total_empty += RX_QUEUE_SIZE; 1372 total_empty += RX_QUEUE_SIZE;
1382 1373
@@ -1396,10 +1387,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1396 1387
1397 rxq->queue[i] = NULL; 1388 rxq->queue[i] = NULL;
1398 1389
1399 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, 1390 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1400 priv->hw_params.rx_buf_size, 1391 PAGE_SIZE << priv->hw_params.rx_page_order,
1401 PCI_DMA_FROMDEVICE); 1392 PCI_DMA_FROMDEVICE);
1402 pkt = (struct iwl_rx_packet *)rxb->skb->data; 1393 pkt = rxb_addr(rxb);
1403 1394
1404 trace_iwlwifi_dev_rx(priv, pkt, 1395 trace_iwlwifi_dev_rx(priv, pkt,
1405 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 1396 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
@@ -1420,44 +1411,55 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1420 if (priv->rx_handlers[pkt->hdr.cmd]) { 1411 if (priv->rx_handlers[pkt->hdr.cmd]) {
1421 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, 1412 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1422 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1413 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1423 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1424 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 1414 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1415 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1425 } else { 1416 } else {
1426 /* No handling needed */ 1417 /* No handling needed */
1427 IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n", 1418 IWL_DEBUG_RX(priv,
1419 "r %d i %d No handler needed for %s, 0x%02x\n",
1428 r, i, get_cmd_string(pkt->hdr.cmd), 1420 r, i, get_cmd_string(pkt->hdr.cmd),
1429 pkt->hdr.cmd); 1421 pkt->hdr.cmd);
1430 } 1422 }
1431 1423
1424 /*
1425 * XXX: After here, we should always check rxb->page
1426 * against NULL before touching it or its virtual
1427 * memory (pkt). Because some rx_handler might have
1428 * already taken or freed the pages.
1429 */
1430
1432 if (reclaim) { 1431 if (reclaim) {
1433 /* Invoke any callbacks, transfer the skb to caller, and 1432 /* Invoke any callbacks, transfer the buffer to caller,
1434 * fire off the (possibly) blocking iwl_send_cmd() 1433 * and fire off the (possibly) blocking iwl_send_cmd()
1435 * as we reclaim the driver command queue */ 1434 * as we reclaim the driver command queue */
1436 if (rxb && rxb->skb) 1435 if (rxb->page)
1437 iwl_tx_cmd_complete(priv, rxb); 1436 iwl_tx_cmd_complete(priv, rxb);
1438 else 1437 else
1439 IWL_WARN(priv, "Claim null rxb?\n"); 1438 IWL_WARN(priv, "Claim null rxb?\n");
1440 } 1439 }
1441 1440
1442 /* For now we just don't re-use anything. We can tweak this 1441 /* Reuse the page if possible. For notification packets and
1443 * later to try and re-use notification packets and SKBs that 1442 * SKBs that fail to Rx correctly, add them back into the
1444 * fail to Rx correctly */ 1443 * rx_free list for reuse later. */
1445 if (rxb->skb != NULL) {
1446 priv->alloc_rxb_skb--;
1447 dev_kfree_skb_any(rxb->skb);
1448 rxb->skb = NULL;
1449 }
1450
1451 spin_lock_irqsave(&rxq->lock, flags); 1444 spin_lock_irqsave(&rxq->lock, flags);
1452 list_add_tail(&rxb->list, &priv->rxq.rx_used); 1445 if (rxb->page != NULL) {
1446 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1447 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1448 PCI_DMA_FROMDEVICE);
1449 list_add_tail(&rxb->list, &rxq->rx_free);
1450 rxq->free_count++;
1451 } else
1452 list_add_tail(&rxb->list, &rxq->rx_used);
1453
1453 spin_unlock_irqrestore(&rxq->lock, flags); 1454 spin_unlock_irqrestore(&rxq->lock, flags);
1455
1454 i = (i + 1) & RX_QUEUE_MASK; 1456 i = (i + 1) & RX_QUEUE_MASK;
1455 /* If there are a lot of unused frames, 1457 /* If there are a lot of unused frames,
1456 * restock the Rx queue so ucode won't assert. */ 1458 * restock the Rx queue so ucode won't assert. */
1457 if (fill_rx) { 1459 if (fill_rx) {
1458 count++; 1460 count++;
1459 if (count >= 8) { 1461 if (count >= 8) {
1460 priv->rxq.read = i; 1462 rxq->read = i;
1461 iwl3945_rx_replenish_now(priv); 1463 iwl3945_rx_replenish_now(priv);
1462 count = 0; 1464 count = 0;
1463 } 1465 }
@@ -1465,7 +1467,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1465 } 1467 }
1466 1468
1467 /* Backtrack one entry */ 1469 /* Backtrack one entry */
1468 priv->rxq.read = i; 1470 rxq->read = i;
1469 if (fill_rx) 1471 if (fill_rx)
1470 iwl3945_rx_replenish_now(priv); 1472 iwl3945_rx_replenish_now(priv);
1471 else 1473 else
@@ -1686,6 +1688,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1686 } 1688 }
1687#endif 1689#endif
1688 1690
1691 spin_unlock_irqrestore(&priv->lock, flags);
1692
1689 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 1693 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1690 * atomic, make sure that inta covers all the interrupts that 1694 * atomic, make sure that inta covers all the interrupts that
1691 * we've discovered, even if FH interrupt came in just after 1695 * we've discovered, even if FH interrupt came in just after
@@ -1707,8 +1711,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1707 1711
1708 handled |= CSR_INT_BIT_HW_ERR; 1712 handled |= CSR_INT_BIT_HW_ERR;
1709 1713
1710 spin_unlock_irqrestore(&priv->lock, flags);
1711
1712 return; 1714 return;
1713 } 1715 }
1714 1716
@@ -1800,7 +1802,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1800 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1802 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1801 } 1803 }
1802#endif 1804#endif
1803 spin_unlock_irqrestore(&priv->lock, flags);
1804} 1805}
1805 1806
1806static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, 1807static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
@@ -2563,11 +2564,6 @@ static void __iwl3945_down(struct iwl_priv *priv)
2563 test_bit(STATUS_EXIT_PENDING, &priv->status) << 2564 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2564 STATUS_EXIT_PENDING; 2565 STATUS_EXIT_PENDING;
2565 2566
2566 priv->cfg->ops->lib->apm_ops.reset(priv);
2567 spin_lock_irqsave(&priv->lock, flags);
2568 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2569 spin_unlock_irqrestore(&priv->lock, flags);
2570
2571 iwl3945_hw_txq_ctx_stop(priv); 2567 iwl3945_hw_txq_ctx_stop(priv);
2572 iwl3945_hw_rxq_stop(priv); 2568 iwl3945_hw_rxq_stop(priv);
2573 2569
@@ -2576,10 +2572,8 @@ static void __iwl3945_down(struct iwl_priv *priv)
2576 2572
2577 udelay(5); 2573 udelay(5);
2578 2574
2579 if (exit_pending) 2575 /* Stop the device, and put it in low power state */
2580 priv->cfg->ops->lib->apm_ops.stop(priv); 2576 priv->cfg->ops->lib->apm_ops.stop(priv);
2581 else
2582 priv->cfg->ops->lib->apm_ops.reset(priv);
2583 2577
2584 exit: 2578 exit:
2585 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2579 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2724,19 +2718,34 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
2724 mutex_unlock(&priv->mutex); 2718 mutex_unlock(&priv->mutex);
2725} 2719}
2726 2720
2721/*
2722 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2723 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2724 * *is* readable even when device has been SW_RESET into low power mode
2725 * (e.g. during RF KILL).
2726 */
2727static void iwl3945_rfkill_poll(struct work_struct *data) 2727static void iwl3945_rfkill_poll(struct work_struct *data)
2728{ 2728{
2729 struct iwl_priv *priv = 2729 struct iwl_priv *priv =
2730 container_of(data, struct iwl_priv, rfkill_poll.work); 2730 container_of(data, struct iwl_priv, rfkill_poll.work);
2731 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2732 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2733 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2731 2734
2732 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 2735 if (new_rfkill != old_rfkill) {
2733 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2736 if (new_rfkill)
2734 else 2737 set_bit(STATUS_RF_KILL_HW, &priv->status);
2735 set_bit(STATUS_RF_KILL_HW, &priv->status); 2738 else
2739 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2736 2740
2737 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 2741 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2738 test_bit(STATUS_RF_KILL_HW, &priv->status));
2739 2742
2743 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2744 new_rfkill ? "disable radio" : "enable radio");
2745 }
2746
2747 /* Keep this running, even if radio now enabled. This will be
2748 * cancelled in mac_start() if system decides to start again */
2740 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 2749 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
2741 round_jiffies_relative(2 * HZ)); 2750 round_jiffies_relative(2 * HZ));
2742 2751
@@ -3797,7 +3806,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3797 /* Clear the driver's (not device's) station table */ 3806 /* Clear the driver's (not device's) station table */
3798 iwl_clear_stations_table(priv); 3807 iwl_clear_stations_table(priv);
3799 3808
3800 priv->data_retry_limit = -1;
3801 priv->ieee_channels = NULL; 3809 priv->ieee_channels = NULL;
3802 priv->ieee_rates = NULL; 3810 priv->ieee_rates = NULL;
3803 priv->band = IEEE80211_BAND_2GHZ; 3811 priv->band = IEEE80211_BAND_2GHZ;
@@ -4056,6 +4064,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4056 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]); 4064 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]);
4057 iwl3945_setup_deferred_work(priv); 4065 iwl3945_setup_deferred_work(priv);
4058 iwl3945_setup_rx_handlers(priv); 4066 iwl3945_setup_rx_handlers(priv);
4067 iwl_power_initialize(priv);
4059 4068
4060 /********************************* 4069 /*********************************
4061 * 8. Setup and Register mac80211 4070 * 8. Setup and Register mac80211
@@ -4126,6 +4135,15 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4126 iwl3945_down(priv); 4135 iwl3945_down(priv);
4127 } 4136 }
4128 4137
4138 /*
4139 * Make sure device is reset to low power before unloading driver.
4140 * This may be redundant with iwl_down(), but there are paths to
4141 * run iwl_down() without calling apm_ops.stop(), and there are
4142 * paths to avoid running iwl_down() at all before leaving driver.
4143 * This (inexpensive) call *makes sure* device is reset.
4144 */
4145 priv->cfg->ops->lib->apm_ops.stop(priv);
4146
4129 /* make sure we flush any pending irq or 4147 /* make sure we flush any pending irq or
4130 * tasklet for the driver 4148 * tasklet for the driver
4131 */ 4149 */
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index c25a04371ca8..9606b3100fde 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -3,6 +3,7 @@ config IWM
3 depends on MMC && WLAN_80211 && EXPERIMENTAL 3 depends on MMC && WLAN_80211 && EXPERIMENTAL
4 depends on CFG80211 4 depends on CFG80211
5 select FW_LOADER 5 select FW_LOADER
6 select IWMC3200TOP
6 help 7 help
7 The Intel Wireless Multicomm 3200 hardware is a combo 8 The Intel Wireless Multicomm 3200 hardware is a combo
8 card with GPS, Bluetooth, WiMax and 802.11 radios. It 9 card with GPS, Bluetooth, WiMax and 802.11 radios. It
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a56a2b0ac99a..af72cc746f15 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -404,39 +404,21 @@ static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
404{ 404{
405 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 405 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
406 struct ieee80211_channel *chan = params->channel; 406 struct ieee80211_channel *chan = params->channel;
407 struct cfg80211_bss *bss;
408 407
409 if (!test_bit(IWM_STATUS_READY, &iwm->status)) 408 if (!test_bit(IWM_STATUS_READY, &iwm->status))
410 return -EIO; 409 return -EIO;
411 410
412 /* UMAC doesn't support creating IBSS network with specified bssid. 411 /* UMAC doesn't support creating or joining an IBSS network
413 * This should be removed after we have join only mode supported. */ 412 * with specified bssid. */
414 if (params->bssid) 413 if (params->bssid)
415 return -EOPNOTSUPP; 414 return -EOPNOTSUPP;
416 415
417 bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
418 params->ssid, params->ssid_len);
419 if (!bss) {
420 iwm_scan_one_ssid(iwm, params->ssid, params->ssid_len);
421 schedule_timeout_interruptible(2 * HZ);
422 bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
423 params->ssid, params->ssid_len);
424 }
425 /* IBSS join only mode is not supported by UMAC ATM */
426 if (bss) {
427 cfg80211_put_bss(bss);
428 return -EOPNOTSUPP;
429 }
430
431 iwm->channel = ieee80211_frequency_to_channel(chan->center_freq); 416 iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
432 iwm->umac_profile->ibss.band = chan->band; 417 iwm->umac_profile->ibss.band = chan->band;
433 iwm->umac_profile->ibss.channel = iwm->channel; 418 iwm->umac_profile->ibss.channel = iwm->channel;
434 iwm->umac_profile->ssid.ssid_len = params->ssid_len; 419 iwm->umac_profile->ssid.ssid_len = params->ssid_len;
435 memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len); 420 memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
436 421
437 if (params->bssid)
438 memcpy(&iwm->umac_profile->bssid[0], params->bssid, ETH_ALEN);
439
440 return iwm_send_mlme_profile(iwm); 422 return iwm_send_mlme_profile(iwm);
441} 423}
442 424
@@ -489,12 +471,12 @@ static int iwm_set_wpa_version(struct iwm_priv *iwm, u32 wpa_version)
489 return 0; 471 return 0;
490 } 472 }
491 473
474 if (wpa_version & NL80211_WPA_VERSION_1)
475 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
476
492 if (wpa_version & NL80211_WPA_VERSION_2) 477 if (wpa_version & NL80211_WPA_VERSION_2)
493 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK; 478 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
494 479
495 if (wpa_version & NL80211_WPA_VERSION_1)
496 iwm->umac_profile->sec.flags |= UMAC_SEC_FLG_WPA_ON_MSK;
497
498 return 0; 480 return 0;
499} 481}
500 482
@@ -645,6 +627,13 @@ static int iwm_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
645 iwm->default_key = sme->key_idx; 627 iwm->default_key = sme->key_idx;
646 } 628 }
647 629
630 /* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */
631 if ((iwm->umac_profile->sec.flags &
632 (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) &&
633 iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) {
634 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK;
635 }
636
648 ret = iwm_send_mlme_profile(iwm); 637 ret = iwm_send_mlme_profile(iwm);
649 638
650 if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK || 639 if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK ||
@@ -681,9 +670,19 @@ static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
681static int iwm_cfg80211_set_txpower(struct wiphy *wiphy, 670static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
682 enum tx_power_setting type, int dbm) 671 enum tx_power_setting type, int dbm)
683{ 672{
673 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
674 int ret;
675
684 switch (type) { 676 switch (type) {
685 case TX_POWER_AUTOMATIC: 677 case TX_POWER_AUTOMATIC:
686 return 0; 678 return 0;
679 case TX_POWER_FIXED:
680 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
681 CFG_TX_PWR_LIMIT_USR, dbm * 2);
682 if (ret < 0)
683 return ret;
684
685 return iwm_tx_power_trigger(iwm);
687 default: 686 default:
688 return -EOPNOTSUPP; 687 return -EOPNOTSUPP;
689 } 688 }
@@ -695,7 +694,7 @@ static int iwm_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
695{ 694{
696 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 695 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
697 696
698 *dbm = iwm->txpower; 697 *dbm = iwm->txpower >> 1;
699 698
700 return 0; 699 return 0;
701} 700}
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 23b52fa2605f..cad511afd907 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -76,6 +76,11 @@ int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
76 int ret; 76 int ret;
77 u8 oid = hdr->oid; 77 u8 oid = hdr->oid;
78 78
79 if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
80 IWM_ERR(iwm, "Interface is not ready yet");
81 return -EAGAIN;
82 }
83
79 umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER; 84 umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER;
80 umac_cmd.resp = resp; 85 umac_cmd.resp = resp;
81 86
@@ -274,6 +279,17 @@ int iwm_send_calib_results(struct iwm_priv *iwm)
274 return ret; 279 return ret;
275} 280}
276 281
282int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit)
283{
284 struct iwm_ct_kill_cfg_cmd cmd;
285
286 cmd.entry_threshold = entry;
287 cmd.exit_threshold = exit;
288
289 return iwm_send_lmac_ptrough_cmd(iwm, REPLY_CT_KILL_CONFIG_CMD, &cmd,
290 sizeof(struct iwm_ct_kill_cfg_cmd), 0);
291}
292
277int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp) 293int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp)
278{ 294{
279 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; 295 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
@@ -777,11 +793,24 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
777 return ret; 793 return ret;
778 794
779 ret = wait_event_interruptible_timeout(iwm->mlme_queue, 795 ret = wait_event_interruptible_timeout(iwm->mlme_queue,
780 (iwm->umac_profile_active == 0), 2 * HZ); 796 (iwm->umac_profile_active == 0), 5 * HZ);
781 797
782 return ret ? 0 : -EBUSY; 798 return ret ? 0 : -EBUSY;
783} 799}
784 800
801int iwm_tx_power_trigger(struct iwm_priv *iwm)
802{
803 struct iwm_umac_pwr_trigger pwr_trigger;
804
805 pwr_trigger.hdr.oid = UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER;
806 pwr_trigger.hdr.buf_size =
807 cpu_to_le16(sizeof(struct iwm_umac_pwr_trigger) -
808 sizeof(struct iwm_umac_wifi_if));
809
810
811 return iwm_send_wifi_if_cmd(iwm, &pwr_trigger, sizeof(pwr_trigger), 1);
812}
813
785int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags) 814int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags)
786{ 815{
787 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; 816 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index e24d5b633997..b36be2b23a3c 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -102,7 +102,6 @@ enum {
102 CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN, 102 CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN,
103 CFG_TLC_SUPPORTED_TX_HT_RATES, 103 CFG_TLC_SUPPORTED_TX_HT_RATES,
104 CFG_TLC_SUPPORTED_TX_RATES, 104 CFG_TLC_SUPPORTED_TX_RATES,
105 CFG_TLC_VALID_ANTENNA,
106 CFG_TLC_SPATIAL_STREAM_SUPPORTED, 105 CFG_TLC_SPATIAL_STREAM_SUPPORTED,
107 CFG_TLC_RETRY_PER_RATE, 106 CFG_TLC_RETRY_PER_RATE,
108 CFG_TLC_RETRY_PER_HT_RATE, 107 CFG_TLC_RETRY_PER_HT_RATE,
@@ -136,6 +135,10 @@ enum {
136 CFG_TLC_RENEW_ADDBA_DELAY, 135 CFG_TLC_RENEW_ADDBA_DELAY,
137 CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD, 136 CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD,
138 CFG_TLC_IS_STABLE_IN_HT, 137 CFG_TLC_IS_STABLE_IN_HT,
138 CFG_TLC_SR_SIC_1ST_FAIL,
139 CFG_TLC_SR_SIC_1ST_PASS,
140 CFG_TLC_SR_SIC_TOTAL_FAIL,
141 CFG_TLC_SR_SIC_TOTAL_PASS,
139 CFG_RLC_CHAIN_CTRL, 142 CFG_RLC_CHAIN_CTRL,
140 CFG_TRK_TABLE_OP_MODE, 143 CFG_TRK_TABLE_OP_MODE,
141 CFG_TRK_TABLE_RSSI_THRESHOLD, 144 CFG_TRK_TABLE_RSSI_THRESHOLD,
@@ -147,6 +150,58 @@ enum {
147 CFG_MLME_DBG_NOTIF_BLOCK, 150 CFG_MLME_DBG_NOTIF_BLOCK,
148 CFG_BT_OFF_BECONS_INTERVALS, 151 CFG_BT_OFF_BECONS_INTERVALS,
149 CFG_BT_FRAG_DURATION, 152 CFG_BT_FRAG_DURATION,
153 CFG_ACTIVE_CHAINS,
154 CFG_CALIB_CTRL,
155 CFG_CAPABILITY_SUPPORTED_HT_RATES,
156 CFG_HT_MAC_PARAM_INFO,
157 CFG_MIMO_PS_MODE,
158 CFG_HT_DEFAULT_CAPABILIES_INFO,
159 CFG_LED_SC_RESOLUTION_FACTOR,
160 CFG_PTAM_ENERGY_CCK_DET_DEFAULT,
161 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_DEFAULT,
162 CFG_PTAM_CORR40_4_TH_ADD_MIN_DEFAULT,
163 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_DEFAULT,
164 CFG_PTAM_CORR32_4_TH_ADD_MIN_DEFAULT,
165 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_DEFAULT,
166 CFG_PTAM_CORR32_1_TH_ADD_MIN_DEFAULT,
167 CFG_PTAM_ENERGY_CCK_DET_MIN_VAL,
168 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MIN_VAL,
169 CFG_PTAM_CORR40_4_TH_ADD_MIN_MIN_VAL,
170 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MIN_VAL,
171 CFG_PTAM_CORR32_4_TH_ADD_MIN_MIN_VAL,
172 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MIN_VAL,
173 CFG_PTAM_CORR32_1_TH_ADD_MIN_MIN_VAL,
174 CFG_PTAM_ENERGY_CCK_DET_MAX_VAL,
175 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MAX_VAL,
176 CFG_PTAM_CORR40_4_TH_ADD_MIN_MAX_VAL,
177 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MAX_VAL,
178 CFG_PTAM_CORR32_4_TH_ADD_MIN_MAX_VAL,
179 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MAX_VAL,
180 CFG_PTAM_CORR32_1_TH_ADD_MIN_MAX_VAL,
181 CFG_PTAM_ENERGY_CCK_DET_STEP_VAL,
182 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_STEP_VAL,
183 CFG_PTAM_CORR40_4_TH_ADD_MIN_STEP_VAL,
184 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_STEP_VAL,
185 CFG_PTAM_CORR32_4_TH_ADD_MIN_STEP_VAL,
186 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_STEP_VAL,
187 CFG_PTAM_CORR32_1_TH_ADD_MIN_STEP_VAL,
188 CFG_PTAM_LINK_SENS_FA_OFDM_MAX,
189 CFG_PTAM_LINK_SENS_FA_OFDM_MIN,
190 CFG_PTAM_LINK_SENS_FA_CCK_MAX,
191 CFG_PTAM_LINK_SENS_FA_CCK_MIN,
192 CFG_PTAM_LINK_SENS_NRG_DIFF,
193 CFG_PTAM_LINK_SENS_NRG_MARGIN,
194 CFG_PTAM_LINK_SENS_MAX_NUMBER_OF_TIMES_IN_CCK_NO_FA,
195 CFG_PTAM_LINK_SENS_AUTO_CORR_MAX_TH_CCK,
196 CFG_AGG_MGG_TID_LOAD_ADDBA_THRESHOLD,
197 CFG_AGG_MGG_TID_LOAD_DELBA_THRESHOLD,
198 CFG_AGG_MGG_ADDBA_BUF_SIZE,
199 CFG_AGG_MGG_ADDBA_INACTIVE_TIMEOUT,
200 CFG_AGG_MGG_ADDBA_DEBUG_FLAGS,
201 CFG_SCAN_PERIODIC_RSSI_HIGH_THRESHOLD,
202 CFG_SCAN_PERIODIC_COEF_RSSI_HIGH,
203 CFG_11D_ENABLED,
204 CFG_11H_FEATURE_FLAGS,
150 205
151 /* <-- LAST --> */ 206 /* <-- LAST --> */
152 CFG_TBL_FIX_LAST 207 CFG_TBL_FIX_LAST
@@ -155,7 +210,8 @@ enum {
155/* variable size table */ 210/* variable size table */
156enum { 211enum {
157 CFG_NET_ADDR = 0, 212 CFG_NET_ADDR = 0,
158 CFG_PROFILE, 213 CFG_LED_PATTERN_TABLE,
214
159 /* <-- LAST --> */ 215 /* <-- LAST --> */
160 CFG_TBL_VAR_LAST 216 CFG_TBL_VAR_LAST
161}; 217};
@@ -288,6 +344,9 @@ struct iwm_umac_cmd_scan_request {
288/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */ 344/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */
289#define UMAC_SEC_FLG_WSC_ON_POS 2 345#define UMAC_SEC_FLG_WSC_ON_POS 2
290#define UMAC_SEC_FLG_WSC_ON_SEED 1 346#define UMAC_SEC_FLG_WSC_ON_SEED 1
347#define UMAC_SEC_FLG_WSC_ON_MSK (UMAC_SEC_FLG_WSC_ON_SEED << \
348 UMAC_SEC_FLG_WSC_ON_POS)
349
291 350
292/* Legacy profile can use only WEP40 and WEP104 for encryption and 351/* Legacy profile can use only WEP40 and WEP104 for encryption and
293 * OPEN or PSK for authentication */ 352 * OPEN or PSK for authentication */
@@ -382,6 +441,11 @@ struct iwm_umac_tx_key_id {
382 u8 reserved[3]; 441 u8 reserved[3];
383} __attribute__ ((packed)); 442} __attribute__ ((packed));
384 443
444struct iwm_umac_pwr_trigger {
445 struct iwm_umac_wifi_if hdr;
446 __le32 reseved;
447} __attribute__ ((packed));
448
385struct iwm_umac_cmd_stats_req { 449struct iwm_umac_cmd_stats_req {
386 __le32 flags; 450 __le32 flags;
387} __attribute__ ((packed)); 451} __attribute__ ((packed));
@@ -393,6 +457,7 @@ int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
393int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested); 457int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
394int iwm_send_calib_results(struct iwm_priv *iwm); 458int iwm_send_calib_results(struct iwm_priv *iwm);
395int iwm_store_rxiq_calib_result(struct iwm_priv *iwm); 459int iwm_store_rxiq_calib_result(struct iwm_priv *iwm);
460int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit);
396 461
397/* UMAC commands */ 462/* UMAC commands */
398int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size, 463int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
@@ -407,6 +472,7 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
407int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id); 472int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
408int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx); 473int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
409int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key); 474int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key);
475int iwm_tx_power_trigger(struct iwm_priv *iwm);
410int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags); 476int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags);
411int iwm_send_umac_channel_list(struct iwm_priv *iwm); 477int iwm_send_umac_channel_list(struct iwm_priv *iwm);
412int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids, 478int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
index 6b0bcad758ca..49067092d336 100644
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ b/drivers/net/wireless/iwmc3200wifi/fw.c
@@ -217,6 +217,13 @@ static int iwm_load_img(struct iwm_priv *iwm, const char *img_name)
217 IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date), 217 IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date),
218 IWM_BUILD_DAY(build_date)); 218 IWM_BUILD_DAY(build_date));
219 219
220 if (!strcmp(img_name, iwm->bus_ops->umac_name))
221 sprintf(iwm->umac_version, "%02X.%02X",
222 ver->major, ver->minor);
223
224 if (!strcmp(img_name, iwm->bus_ops->lmac_name))
225 sprintf(iwm->lmac_version, "%02X.%02X",
226 ver->major, ver->minor);
220 227
221 err_release_fw: 228 err_release_fw:
222 release_firmware(fw); 229 release_firmware(fw);
@@ -398,6 +405,8 @@ int iwm_load_fw(struct iwm_priv *iwm)
398 iwm_send_prio_table(iwm); 405 iwm_send_prio_table(iwm);
399 iwm_send_calib_results(iwm); 406 iwm_send_calib_results(iwm);
400 iwm_send_periodic_calib_cfg(iwm, periodic_calib_map); 407 iwm_send_periodic_calib_cfg(iwm, periodic_calib_map);
408 iwm_send_ct_kill_cfg(iwm, iwm->conf.ct_kill_entry,
409 iwm->conf.ct_kill_exit);
401 410
402 return 0; 411 return 0;
403 412
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 1b02a4e2a1ac..a9bf6bc97bea 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -65,6 +65,8 @@ struct iwm_conf {
65 u32 sdio_ior_timeout; 65 u32 sdio_ior_timeout;
66 unsigned long calib_map; 66 unsigned long calib_map;
67 unsigned long expected_calib_map; 67 unsigned long expected_calib_map;
68 u8 ct_kill_entry;
69 u8 ct_kill_exit;
68 bool reset_on_fatal_err; 70 bool reset_on_fatal_err;
69 bool auto_connect; 71 bool auto_connect;
70 bool wimax_not_present; 72 bool wimax_not_present;
@@ -276,12 +278,14 @@ struct iwm_priv {
276 struct iw_statistics wstats; 278 struct iw_statistics wstats;
277 struct delayed_work stats_request; 279 struct delayed_work stats_request;
278 struct delayed_work disconnect; 280 struct delayed_work disconnect;
281 struct delayed_work ct_kill_delay;
279 282
280 struct iwm_debugfs dbg; 283 struct iwm_debugfs dbg;
281 284
282 u8 *eeprom; 285 u8 *eeprom;
283 struct timer_list watchdog; 286 struct timer_list watchdog;
284 struct work_struct reset_worker; 287 struct work_struct reset_worker;
288 struct work_struct auth_retry_worker;
285 struct mutex mutex; 289 struct mutex mutex;
286 290
287 u8 *req_ie; 291 u8 *req_ie;
@@ -290,6 +294,8 @@ struct iwm_priv {
290 int resp_ie_len; 294 int resp_ie_len;
291 295
292 struct iwm_fw_error_hdr *last_fw_err; 296 struct iwm_fw_error_hdr *last_fw_err;
297 char umac_version[8];
298 char lmac_version[8];
293 299
294 char private[0] __attribute__((__aligned__(NETDEV_ALIGN))); 300 char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
295}; 301};
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index 6c1a14c4480f..a3a79b5e2898 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -187,6 +187,14 @@ struct iwm_coex_prio_table_cmd {
187 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \ 187 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
188 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK) 188 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
189 189
190/* CT kill config command */
191struct iwm_ct_kill_cfg_cmd {
192 u32 exit_threshold;
193 u32 reserved;
194 u32 entry_threshold;
195} __attribute__ ((packed));
196
197
190/* LMAC OP CODES */ 198/* LMAC OP CODES */
191#define REPLY_PAD 0x0 199#define REPLY_PAD 0x0
192#define REPLY_ALIVE 0x1 200#define REPLY_ALIVE 0x1
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 170f33706490..f93e9139b0f2 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -63,6 +63,8 @@ static struct iwm_conf def_iwm_conf = {
63 BIT(PHY_CALIBRATE_TX_IQ_CMD) | 63 BIT(PHY_CALIBRATE_TX_IQ_CMD) |
64 BIT(PHY_CALIBRATE_RX_IQ_CMD) | 64 BIT(PHY_CALIBRATE_RX_IQ_CMD) |
65 BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD), 65 BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
66 .ct_kill_entry = 110,
67 .ct_kill_exit = 110,
66 .reset_on_fatal_err = 1, 68 .reset_on_fatal_err = 1,
67 .auto_connect = 1, 69 .auto_connect = 1,
68 .wimax_not_present = 0, 70 .wimax_not_present = 0,
@@ -133,6 +135,17 @@ static void iwm_disconnect_work(struct work_struct *work)
133 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL); 135 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL);
134} 136}
135 137
138static void iwm_ct_kill_work(struct work_struct *work)
139{
140 struct iwm_priv *iwm =
141 container_of(work, struct iwm_priv, ct_kill_delay.work);
142 struct wiphy *wiphy = iwm_to_wiphy(iwm);
143
144 IWM_INFO(iwm, "CT kill delay timeout\n");
145
146 wiphy_rfkill_set_hw_state(wiphy, false);
147}
148
136static int __iwm_up(struct iwm_priv *iwm); 149static int __iwm_up(struct iwm_priv *iwm);
137static int __iwm_down(struct iwm_priv *iwm); 150static int __iwm_down(struct iwm_priv *iwm);
138 151
@@ -194,6 +207,33 @@ static void iwm_reset_worker(struct work_struct *work)
194 mutex_unlock(&iwm->mutex); 207 mutex_unlock(&iwm->mutex);
195} 208}
196 209
210static void iwm_auth_retry_worker(struct work_struct *work)
211{
212 struct iwm_priv *iwm;
213 int i, ret;
214
215 iwm = container_of(work, struct iwm_priv, auth_retry_worker);
216 if (iwm->umac_profile_active) {
217 ret = iwm_invalidate_mlme_profile(iwm);
218 if (ret < 0)
219 return;
220 }
221
222 iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
223
224 ret = iwm_send_mlme_profile(iwm);
225 if (ret < 0)
226 return;
227
228 for (i = 0; i < IWM_NUM_KEYS; i++)
229 if (iwm->keys[i].key_len)
230 iwm_set_key(iwm, 0, &iwm->keys[i]);
231
232 iwm_set_tx_key(iwm, iwm->default_key);
233}
234
235
236
197static void iwm_watchdog(unsigned long data) 237static void iwm_watchdog(unsigned long data)
198{ 238{
199 struct iwm_priv *iwm = (struct iwm_priv *)data; 239 struct iwm_priv *iwm = (struct iwm_priv *)data;
@@ -225,7 +265,9 @@ int iwm_priv_init(struct iwm_priv *iwm)
225 iwm->scan_id = 1; 265 iwm->scan_id = 1;
226 INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request); 266 INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request);
227 INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work); 267 INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work);
268 INIT_DELAYED_WORK(&iwm->ct_kill_delay, iwm_ct_kill_work);
228 INIT_WORK(&iwm->reset_worker, iwm_reset_worker); 269 INIT_WORK(&iwm->reset_worker, iwm_reset_worker);
270 INIT_WORK(&iwm->auth_retry_worker, iwm_auth_retry_worker);
229 INIT_LIST_HEAD(&iwm->bss_list); 271 INIT_LIST_HEAD(&iwm->bss_list);
230 272
231 skb_queue_head_init(&iwm->rx_list); 273 skb_queue_head_init(&iwm->rx_list);
@@ -586,6 +628,7 @@ static int __iwm_up(struct iwm_priv *iwm)
586{ 628{
587 int ret; 629 int ret;
588 struct iwm_notif *notif_reboot, *notif_ack = NULL; 630 struct iwm_notif *notif_reboot, *notif_ack = NULL;
631 struct wiphy *wiphy = iwm_to_wiphy(iwm);
589 632
590 ret = iwm_bus_enable(iwm); 633 ret = iwm_bus_enable(iwm);
591 if (ret) { 634 if (ret) {
@@ -647,6 +690,9 @@ static int __iwm_up(struct iwm_priv *iwm)
647 goto err_disable; 690 goto err_disable;
648 } 691 }
649 692
693 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "L%s_U%s",
694 iwm->lmac_version, iwm->umac_version);
695
650 /* We configure the UMAC and enable the wifi module */ 696 /* We configure the UMAC and enable the wifi module */
651 ret = iwm_send_umac_config(iwm, 697 ret = iwm_send_umac_config(iwm,
652 cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) | 698 cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) |
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 35ec006c2d2c..4f8dbdd7b917 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -152,6 +152,7 @@ void iwm_if_free(struct iwm_priv *iwm)
152 if (!iwm_to_ndev(iwm)) 152 if (!iwm_to_ndev(iwm))
153 return; 153 return;
154 154
155 cancel_delayed_work_sync(&iwm->ct_kill_delay);
155 free_netdev(iwm_to_ndev(iwm)); 156 free_netdev(iwm_to_ndev(iwm));
156 iwm_priv_deinit(iwm); 157 iwm_priv_deinit(iwm);
157 kfree(iwm->umac_profile); 158 kfree(iwm->umac_profile);
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 40dbcbc16593..3ad95dc0dd8d 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -422,7 +422,9 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
422 if (IS_ERR(ticket_node)) 422 if (IS_ERR(ticket_node))
423 return PTR_ERR(ticket_node); 423 return PTR_ERR(ticket_node);
424 424
425 IWM_DBG_RX(iwm, DBG, "TICKET RELEASE(%d)\n", 425 IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
426 ticket->action == IWM_RX_TICKET_RELEASE ?
427 "RELEASE" : "DROP",
426 ticket->id); 428 ticket->id);
427 list_add_tail(&ticket_node->node, &iwm->rx_tickets); 429 list_add_tail(&ticket_node->node, &iwm->rx_tickets);
428 430
@@ -499,6 +501,18 @@ static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf,
499 return 0; 501 return 0;
500} 502}
501 503
504static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm)
505{
506 if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
507 iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
508 (iwm->umac_profile->sec.ucast_cipher ==
509 iwm->umac_profile->sec.mcast_cipher) &&
510 (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN))
511 return 1;
512
513 return 0;
514}
515
502static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf, 516static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
503 unsigned long buf_size, 517 unsigned long buf_size,
504 struct iwm_wifi_cmd *cmd) 518 struct iwm_wifi_cmd *cmd)
@@ -564,11 +578,17 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
564 goto ibss; 578 goto ibss;
565 579
566 if (!test_bit(IWM_STATUS_RESETTING, &iwm->status)) 580 if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
567 cfg80211_connect_result(iwm_to_ndev(iwm), 581 if (!iwm_is_open_wep_profile(iwm)) {
568 complete->bssid, 582 cfg80211_connect_result(iwm_to_ndev(iwm),
569 NULL, 0, NULL, 0, 583 complete->bssid,
570 WLAN_STATUS_UNSPECIFIED_FAILURE, 584 NULL, 0, NULL, 0,
571 GFP_KERNEL); 585 WLAN_STATUS_UNSPECIFIED_FAILURE,
586 GFP_KERNEL);
587 } else {
588 /* Let's try shared WEP auth */
589 IWM_ERR(iwm, "Trying WEP shared auth\n");
590 schedule_work(&iwm->auth_retry_worker);
591 }
572 else 592 else
573 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, 593 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0,
574 GFP_KERNEL); 594 GFP_KERNEL);
@@ -712,6 +732,19 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
712 return 0; 732 return 0;
713} 733}
714 734
735static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf,
736 unsigned long buf_size,
737 struct iwm_wifi_cmd *cmd)
738{
739 struct wiphy *wiphy = iwm_to_wiphy(iwm);
740
741 IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n");
742
743 wiphy_rfkill_set_hw_state(wiphy, true);
744
745 return 0;
746}
747
715static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf, 748static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
716 unsigned long buf_size, 749 unsigned long buf_size,
717 struct iwm_wifi_cmd *cmd) 750 struct iwm_wifi_cmd *cmd)
@@ -898,6 +931,8 @@ static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf,
898 case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED: 931 case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED:
899 IWM_DBG_MLME(iwm, DBG, "Extended IE required\n"); 932 IWM_DBG_MLME(iwm, DBG, "Extended IE required\n");
900 break; 933 break;
934 case WIFI_IF_NTFY_RADIO_PREEMPTION:
935 return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd);
901 case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED: 936 case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED:
902 return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd); 937 return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd);
903 case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED: 938 case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED:
@@ -1055,8 +1090,14 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
1055 unsigned long buf_size, 1090 unsigned long buf_size,
1056 struct iwm_wifi_cmd *cmd) 1091 struct iwm_wifi_cmd *cmd)
1057{ 1092{
1058 struct iwm_umac_wifi_if *hdr = 1093 struct iwm_umac_wifi_if *hdr;
1059 (struct iwm_umac_wifi_if *)cmd->buf.payload; 1094
1095 if (cmd == NULL) {
1096 IWM_ERR(iwm, "Couldn't find expected wifi command\n");
1097 return -EINVAL;
1098 }
1099
1100 hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload;
1060 1101
1061 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: " 1102 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
1062 "oid is 0x%x\n", hdr->oid); 1103 "oid is 0x%x\n", hdr->oid);
@@ -1078,6 +1119,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
1078 return 0; 1119 return 0;
1079} 1120}
1080 1121
1122#define CT_KILL_DELAY (30 * HZ)
1081static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf, 1123static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
1082 unsigned long buf_size, struct iwm_wifi_cmd *cmd) 1124 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
1083{ 1125{
@@ -1090,7 +1132,20 @@ static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
1090 flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF", 1132 flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF",
1091 flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF"); 1133 flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF");
1092 1134
1093 wiphy_rfkill_set_hw_state(wiphy, flags & IWM_CARD_STATE_HW_DISABLED); 1135 if (flags & IWM_CARD_STATE_CTKILL_DISABLED) {
1136 /*
1137 * We got a CTKILL event: We bring the interface down in
1138 * oder to cool the device down, and try to bring it up
1139 * 30 seconds later. If it's still too hot, we'll go through
1140 * this code path again.
1141 */
1142 cancel_delayed_work_sync(&iwm->ct_kill_delay);
1143 schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY);
1144 }
1145
1146 wiphy_rfkill_set_hw_state(wiphy, flags &
1147 (IWM_CARD_STATE_HW_DISABLED |
1148 IWM_CARD_STATE_CTKILL_DISABLED));
1094 1149
1095 return 0; 1150 return 0;
1096} 1151}
@@ -1281,6 +1336,14 @@ int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size)
1281 1336
1282 switch (le32_to_cpu(hdr->cmd)) { 1337 switch (le32_to_cpu(hdr->cmd)) {
1283 case UMAC_REBOOT_BARKER: 1338 case UMAC_REBOOT_BARKER:
1339 if (test_bit(IWM_STATUS_READY, &iwm->status)) {
1340 IWM_ERR(iwm, "Unexpected BARKER\n");
1341
1342 schedule_work(&iwm->reset_worker);
1343
1344 return 0;
1345 }
1346
1284 return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION, 1347 return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION,
1285 IWM_SRC_UDMA, buf, buf_size); 1348 IWM_SRC_UDMA, buf, buf_size);
1286 case UMAC_ACK_BARKER: 1349 case UMAC_ACK_BARKER:
@@ -1443,7 +1506,8 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
1443 } 1506 }
1444 break; 1507 break;
1445 case IWM_RX_TICKET_DROP: 1508 case IWM_RX_TICKET_DROP:
1446 IWM_DBG_RX(iwm, DBG, "DROP packet\n"); 1509 IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n",
1510 le16_to_cpu(ticket_node->ticket->flags));
1447 kfree_skb(packet->skb); 1511 kfree_skb(packet->skb);
1448 break; 1512 break;
1449 default: 1513 default:
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 8b1de84003ca..cf86294f719b 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -224,8 +224,6 @@ static int if_sdio_disable(struct iwm_priv *iwm)
224 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); 224 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
225 int ret; 225 int ret;
226 226
227 iwm_reset(iwm);
228
229 sdio_claim_host(hw->func); 227 sdio_claim_host(hw->func);
230 sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret); 228 sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
231 if (ret < 0) 229 if (ret < 0)
@@ -237,6 +235,8 @@ static int if_sdio_disable(struct iwm_priv *iwm)
237 235
238 iwm_sdio_rx_free(hw); 236 iwm_sdio_rx_free(hw);
239 237
238 iwm_reset(iwm);
239
240 IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n"); 240 IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n");
241 241
242 return 0; 242 return 0;
@@ -493,8 +493,10 @@ static void iwm_sdio_remove(struct sdio_func *func)
493} 493}
494 494
495static const struct sdio_device_id iwm_sdio_ids[] = { 495static const struct sdio_device_id iwm_sdio_ids[] = {
496 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 496 /* Global/AGN SKU */
497 SDIO_DEVICE_ID_INTEL_IWMC3200WIFI) }, 497 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1403) },
498 /* BGN SKU */
499 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1408) },
498 { /* end: all zeroes */ }, 500 { /* end: all zeroes */ },
499}; 501};
500MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids); 502MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids);
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index c5a14ae3160a..be903543bb47 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -687,6 +687,9 @@ struct iwm_umac_notif_rx_ticket {
687/* Tx/Rx rates window (number of max of last update window per second) */ 687/* Tx/Rx rates window (number of max of last update window per second) */
688#define UMAC_NTF_RATE_SAMPLE_NR 4 688#define UMAC_NTF_RATE_SAMPLE_NR 4
689 689
690/* Max numbers of bits required to go through all antennae in bitmasks */
691#define UMAC_PHY_NUM_CHAINS 3
692
690#define IWM_UMAC_MGMT_TID 8 693#define IWM_UMAC_MGMT_TID 8
691#define IWM_UMAC_TID_NR 8 694#define IWM_UMAC_TID_NR 8
692 695
@@ -697,9 +700,11 @@ struct iwm_umac_notif_stats {
697 __le16 tid_load[IWM_UMAC_TID_NR + 2]; /* 1 non-QoS + 1 dword align */ 700 __le16 tid_load[IWM_UMAC_TID_NR + 2]; /* 1 non-QoS + 1 dword align */
698 __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR]; 701 __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR];
699 __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR]; 702 __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR];
703 __le32 chain_energy[UMAC_PHY_NUM_CHAINS];
700 s32 rssi_dbm; 704 s32 rssi_dbm;
701 s32 noise_dbm; 705 s32 noise_dbm;
702 __le32 supp_rates; 706 __le32 supp_rates;
707 __le32 supp_ht_rates;
703 __le32 missed_beacons; 708 __le32 missed_beacons;
704 __le32 rx_beacons; 709 __le32 rx_beacons;
705 __le32 rx_dir_pkts; 710 __le32 rx_dir_pkts;
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
deleted file mode 100644
index 5c6968101f0d..000000000000
--- a/drivers/net/wireless/libertas/11d.c
+++ /dev/null
@@ -1,696 +0,0 @@
1/**
2 * This file contains functions for 802.11D.
3 */
4#include <linux/ctype.h>
5#include <linux/kernel.h>
6#include <linux/wireless.h>
7
8#include "host.h"
9#include "decl.h"
10#include "11d.h"
11#include "dev.h"
12#include "wext.h"
13
14#define TX_PWR_DEFAULT 10
15
16static struct region_code_mapping region_code_mapping[] = {
17 {"US ", 0x10}, /* US FCC */
18 {"CA ", 0x10}, /* IC Canada */
19 {"SG ", 0x10}, /* Singapore */
20 {"EU ", 0x30}, /* ETSI */
21 {"AU ", 0x30}, /* Australia */
22 {"KR ", 0x30}, /* Republic Of Korea */
23 {"ES ", 0x31}, /* Spain */
24 {"FR ", 0x32}, /* France */
25 {"JP ", 0x40}, /* Japan */
26};
27
28/* Following 2 structure defines the supported channels */
29static struct chan_freq_power channel_freq_power_UN_BG[] = {
30 {1, 2412, TX_PWR_DEFAULT},
31 {2, 2417, TX_PWR_DEFAULT},
32 {3, 2422, TX_PWR_DEFAULT},
33 {4, 2427, TX_PWR_DEFAULT},
34 {5, 2432, TX_PWR_DEFAULT},
35 {6, 2437, TX_PWR_DEFAULT},
36 {7, 2442, TX_PWR_DEFAULT},
37 {8, 2447, TX_PWR_DEFAULT},
38 {9, 2452, TX_PWR_DEFAULT},
39 {10, 2457, TX_PWR_DEFAULT},
40 {11, 2462, TX_PWR_DEFAULT},
41 {12, 2467, TX_PWR_DEFAULT},
42 {13, 2472, TX_PWR_DEFAULT},
43 {14, 2484, TX_PWR_DEFAULT}
44};
45
46static u8 lbs_region_2_code(u8 *region)
47{
48 u8 i;
49
50 for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++)
51 region[i] = toupper(region[i]);
52
53 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
54 if (!memcmp(region, region_code_mapping[i].region,
55 COUNTRY_CODE_LEN))
56 return (region_code_mapping[i].code);
57 }
58
59 /* default is US */
60 return (region_code_mapping[0].code);
61}
62
63static u8 *lbs_code_2_region(u8 code)
64{
65 u8 i;
66
67 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
68 if (region_code_mapping[i].code == code)
69 return (region_code_mapping[i].region);
70 }
71 /* default is US */
72 return (region_code_mapping[0].region);
73}
74
75/**
76 * @brief This function finds the nrchan-th chan after the firstchan
77 * @param band band
78 * @param firstchan first channel number
79 * @param nrchan number of channels
80 * @return the nrchan-th chan number
81*/
82static u8 lbs_get_chan_11d(u8 firstchan, u8 nrchan, u8 *chan)
83/*find the nrchan-th chan after the firstchan*/
84{
85 u8 i;
86 struct chan_freq_power *cfp;
87 u8 cfp_no;
88
89 cfp = channel_freq_power_UN_BG;
90 cfp_no = ARRAY_SIZE(channel_freq_power_UN_BG);
91
92 for (i = 0; i < cfp_no; i++) {
93 if ((cfp + i)->channel == firstchan) {
94 lbs_deb_11d("firstchan found\n");
95 break;
96 }
97 }
98
99 if (i < cfp_no) {
100 /*if beyond the boundary */
101 if (i + nrchan < cfp_no) {
102 *chan = (cfp + i + nrchan)->channel;
103 return 1;
104 }
105 }
106
107 return 0;
108}
109
110/**
111 * @brief This function Checks if chan txpwr is learned from AP/IBSS
112 * @param chan chan number
113 * @param parsed_region_chan pointer to parsed_region_chan_11d
114 * @return TRUE; FALSE
115*/
116static u8 lbs_channel_known_11d(u8 chan,
117 struct parsed_region_chan_11d * parsed_region_chan)
118{
119 struct chan_power_11d *chanpwr = parsed_region_chan->chanpwr;
120 u8 nr_chan = parsed_region_chan->nr_chan;
121 u8 i = 0;
122
123 lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)chanpwr,
124 sizeof(struct chan_power_11d) * nr_chan);
125
126 for (i = 0; i < nr_chan; i++) {
127 if (chan == chanpwr[i].chan) {
128 lbs_deb_11d("found chan %d\n", chan);
129 return 1;
130 }
131 }
132
133 lbs_deb_11d("chan %d not found\n", chan);
134 return 0;
135}
136
137u32 lbs_chan_2_freq(u8 chan)
138{
139 struct chan_freq_power *cf;
140 u16 i;
141 u32 freq = 0;
142
143 cf = channel_freq_power_UN_BG;
144
145 for (i = 0; i < ARRAY_SIZE(channel_freq_power_UN_BG); i++) {
146 if (chan == cf[i].channel)
147 freq = cf[i].freq;
148 }
149
150 return freq;
151}
152
153static int generate_domain_info_11d(struct parsed_region_chan_11d
154 *parsed_region_chan,
155 struct lbs_802_11d_domain_reg *domaininfo)
156{
157 u8 nr_subband = 0;
158
159 u8 nr_chan = parsed_region_chan->nr_chan;
160 u8 nr_parsedchan = 0;
161
162 u8 firstchan = 0, nextchan = 0, maxpwr = 0;
163
164 u8 i, flag = 0;
165
166 memcpy(domaininfo->countrycode, parsed_region_chan->countrycode,
167 COUNTRY_CODE_LEN);
168
169 lbs_deb_11d("nrchan %d\n", nr_chan);
170 lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)parsed_region_chan,
171 sizeof(struct parsed_region_chan_11d));
172
173 for (i = 0; i < nr_chan; i++) {
174 if (!flag) {
175 flag = 1;
176 nextchan = firstchan =
177 parsed_region_chan->chanpwr[i].chan;
178 maxpwr = parsed_region_chan->chanpwr[i].pwr;
179 nr_parsedchan = 1;
180 continue;
181 }
182
183 if (parsed_region_chan->chanpwr[i].chan == nextchan + 1 &&
184 parsed_region_chan->chanpwr[i].pwr == maxpwr) {
185 nextchan++;
186 nr_parsedchan++;
187 } else {
188 domaininfo->subband[nr_subband].firstchan = firstchan;
189 domaininfo->subband[nr_subband].nrchan =
190 nr_parsedchan;
191 domaininfo->subband[nr_subband].maxtxpwr = maxpwr;
192 nr_subband++;
193 nextchan = firstchan =
194 parsed_region_chan->chanpwr[i].chan;
195 maxpwr = parsed_region_chan->chanpwr[i].pwr;
196 }
197 }
198
199 if (flag) {
200 domaininfo->subband[nr_subband].firstchan = firstchan;
201 domaininfo->subband[nr_subband].nrchan = nr_parsedchan;
202 domaininfo->subband[nr_subband].maxtxpwr = maxpwr;
203 nr_subband++;
204 }
205 domaininfo->nr_subband = nr_subband;
206
207 lbs_deb_11d("nr_subband=%x\n", domaininfo->nr_subband);
208 lbs_deb_hex(LBS_DEB_11D, "domaininfo", (char *)domaininfo,
209 COUNTRY_CODE_LEN + 1 +
210 sizeof(struct ieee_subbandset) * nr_subband);
211 return 0;
212}
213
214/**
215 * @brief This function generates parsed_region_chan from Domain Info learned from AP/IBSS
216 * @param region_chan pointer to struct region_channel
217 * @param *parsed_region_chan pointer to parsed_region_chan_11d
218 * @return N/A
219*/
220static void lbs_generate_parsed_region_chan_11d(struct region_channel *region_chan,
221 struct parsed_region_chan_11d *
222 parsed_region_chan)
223{
224 u8 i;
225 struct chan_freq_power *cfp;
226
227 if (region_chan == NULL) {
228 lbs_deb_11d("region_chan is NULL\n");
229 return;
230 }
231
232 cfp = region_chan->CFP;
233 if (cfp == NULL) {
234 lbs_deb_11d("cfp is NULL \n");
235 return;
236 }
237
238 parsed_region_chan->band = region_chan->band;
239 parsed_region_chan->region = region_chan->region;
240 memcpy(parsed_region_chan->countrycode,
241 lbs_code_2_region(region_chan->region), COUNTRY_CODE_LEN);
242
243 lbs_deb_11d("region 0x%x, band %d\n", parsed_region_chan->region,
244 parsed_region_chan->band);
245
246 for (i = 0; i < region_chan->nrcfp; i++, cfp++) {
247 parsed_region_chan->chanpwr[i].chan = cfp->channel;
248 parsed_region_chan->chanpwr[i].pwr = cfp->maxtxpower;
249 lbs_deb_11d("chan %d, pwr %d\n",
250 parsed_region_chan->chanpwr[i].chan,
251 parsed_region_chan->chanpwr[i].pwr);
252 }
253 parsed_region_chan->nr_chan = region_chan->nrcfp;
254
255 lbs_deb_11d("nrchan %d\n", parsed_region_chan->nr_chan);
256
257 return;
258}
259
260/**
261 * @brief generate parsed_region_chan from Domain Info learned from AP/IBSS
262 * @param region region ID
263 * @param band band
264 * @param chan chan
265 * @return TRUE;FALSE
266*/
267static u8 lbs_region_chan_supported_11d(u8 region, u8 chan)
268{
269 struct chan_freq_power *cfp;
270 int cfp_no;
271 u8 idx;
272 int ret = 0;
273
274 lbs_deb_enter(LBS_DEB_11D);
275
276 cfp = lbs_get_region_cfp_table(region, &cfp_no);
277 if (cfp == NULL)
278 return 0;
279
280 for (idx = 0; idx < cfp_no; idx++) {
281 if (chan == (cfp + idx)->channel) {
282 /* If Mrvl Chip Supported? */
283 if ((cfp + idx)->unsupported) {
284 ret = 0;
285 } else {
286 ret = 1;
287 }
288 goto done;
289 }
290 }
291
292 /*chan is not in the region table */
293
294done:
295 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
296 return ret;
297}
298
299/**
300 * @brief This function checks if chan txpwr is learned from AP/IBSS
301 * @param chan chan number
302 * @param parsed_region_chan pointer to parsed_region_chan_11d
303 * @return 0
304*/
305static int parse_domain_info_11d(struct ieee_ie_country_info_full_set *countryinfo,
306 u8 band,
307 struct parsed_region_chan_11d *parsed_region_chan)
308{
309 u8 nr_subband, nrchan;
310 u8 lastchan, firstchan;
311 u8 region;
312 u8 curchan = 0;
313
314 u8 idx = 0; /*chan index in parsed_region_chan */
315
316 u8 j, i;
317
318 lbs_deb_enter(LBS_DEB_11D);
319
320 /*validation Rules:
321 1. valid region Code
322 2. First Chan increment
323 3. channel range no overlap
324 4. channel is valid?
325 5. channel is supported by region?
326 6. Others
327 */
328
329 lbs_deb_hex(LBS_DEB_11D, "countryinfo", (u8 *) countryinfo, 30);
330
331 if ((*(countryinfo->countrycode)) == 0
332 || (countryinfo->header.len <= COUNTRY_CODE_LEN)) {
333 /* No region Info or Wrong region info: treat as No 11D info */
334 goto done;
335 }
336
337 /*Step1: check region_code */
338 parsed_region_chan->region = region =
339 lbs_region_2_code(countryinfo->countrycode);
340
341 lbs_deb_11d("regioncode=%x\n", (u8) parsed_region_chan->region);
342 lbs_deb_hex(LBS_DEB_11D, "countrycode", (char *)countryinfo->countrycode,
343 COUNTRY_CODE_LEN);
344
345 parsed_region_chan->band = band;
346
347 memcpy(parsed_region_chan->countrycode, countryinfo->countrycode,
348 COUNTRY_CODE_LEN);
349
350 nr_subband = (countryinfo->header.len - COUNTRY_CODE_LEN) /
351 sizeof(struct ieee_subbandset);
352
353 for (j = 0, lastchan = 0; j < nr_subband; j++) {
354
355 if (countryinfo->subband[j].firstchan <= lastchan) {
356 /*Step2&3. Check First Chan Num increment and no overlap */
357 lbs_deb_11d("chan %d>%d, overlap\n",
358 countryinfo->subband[j].firstchan, lastchan);
359 continue;
360 }
361
362 firstchan = countryinfo->subband[j].firstchan;
363 nrchan = countryinfo->subband[j].nrchan;
364
365 for (i = 0; idx < MAX_NO_OF_CHAN && i < nrchan; i++) {
366 /*step4: channel is supported? */
367
368 if (!lbs_get_chan_11d(firstchan, i, &curchan)) {
369 /* Chan is not found in UN table */
370 lbs_deb_11d("chan is not supported: %d \n", i);
371 break;
372 }
373
374 lastchan = curchan;
375
376 if (lbs_region_chan_supported_11d(region, curchan)) {
377 /*step5: Check if curchan is supported by mrvl in region */
378 parsed_region_chan->chanpwr[idx].chan = curchan;
379 parsed_region_chan->chanpwr[idx].pwr =
380 countryinfo->subband[j].maxtxpwr;
381 idx++;
382 } else {
383 /*not supported and ignore the chan */
384 lbs_deb_11d(
385 "i %d, chan %d unsupported in region %x, band %d\n",
386 i, curchan, region, band);
387 }
388 }
389
390 /*Step6: Add other checking if any */
391
392 }
393
394 parsed_region_chan->nr_chan = idx;
395
396 lbs_deb_11d("nrchan=%x\n", parsed_region_chan->nr_chan);
397 lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (u8 *) parsed_region_chan,
398 2 + COUNTRY_CODE_LEN + sizeof(struct parsed_region_chan_11d) * idx);
399
400done:
401 lbs_deb_enter(LBS_DEB_11D);
402 return 0;
403}
404
405/**
406 * @brief This function calculates the scan type for channels
407 * @param chan chan number
408 * @param parsed_region_chan pointer to parsed_region_chan_11d
409 * @return PASSIVE if chan is unknown; ACTIVE if chan is known
410*/
411u8 lbs_get_scan_type_11d(u8 chan,
412 struct parsed_region_chan_11d * parsed_region_chan)
413{
414 u8 scan_type = CMD_SCAN_TYPE_PASSIVE;
415
416 lbs_deb_enter(LBS_DEB_11D);
417
418 if (lbs_channel_known_11d(chan, parsed_region_chan)) {
419 lbs_deb_11d("found, do active scan\n");
420 scan_type = CMD_SCAN_TYPE_ACTIVE;
421 } else {
422 lbs_deb_11d("not found, do passive scan\n");
423 }
424
425 lbs_deb_leave_args(LBS_DEB_11D, "ret scan_type %d", scan_type);
426 return scan_type;
427
428}
429
430void lbs_init_11d(struct lbs_private *priv)
431{
432 priv->enable11d = 0;
433 memset(&(priv->parsed_region_chan), 0,
434 sizeof(struct parsed_region_chan_11d));
435 return;
436}
437
438/**
439 * @brief This function sets DOMAIN INFO to FW
440 * @param priv pointer to struct lbs_private
441 * @return 0; -1
442*/
443static int set_domain_info_11d(struct lbs_private *priv)
444{
445 int ret;
446
447 if (!priv->enable11d) {
448 lbs_deb_11d("dnld domain Info with 11d disabled\n");
449 return 0;
450 }
451
452 ret = lbs_prepare_and_send_command(priv, CMD_802_11D_DOMAIN_INFO,
453 CMD_ACT_SET,
454 CMD_OPTION_WAITFORRSP, 0, NULL);
455 if (ret)
456 lbs_deb_11d("fail to dnld domain info\n");
457
458 return ret;
459}
460
461/**
462 * @brief This function setups scan channels
463 * @param priv pointer to struct lbs_private
464 * @param band band
465 * @return 0
466*/
467int lbs_set_universaltable(struct lbs_private *priv, u8 band)
468{
469 u16 size = sizeof(struct chan_freq_power);
470 u16 i = 0;
471
472 memset(priv->universal_channel, 0,
473 sizeof(priv->universal_channel));
474
475 priv->universal_channel[i].nrcfp =
476 sizeof(channel_freq_power_UN_BG) / size;
477 lbs_deb_11d("BG-band nrcfp %d\n",
478 priv->universal_channel[i].nrcfp);
479
480 priv->universal_channel[i].CFP = channel_freq_power_UN_BG;
481 priv->universal_channel[i].valid = 1;
482 priv->universal_channel[i].region = UNIVERSAL_REGION_CODE;
483 priv->universal_channel[i].band = band;
484 i++;
485
486 return 0;
487}
488
489/**
490 * @brief This function implements command CMD_802_11D_DOMAIN_INFO
491 * @param priv pointer to struct lbs_private
492 * @param cmd pointer to cmd buffer
493 * @param cmdno cmd ID
494 * @param cmdOption cmd action
495 * @return 0
496*/
497int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
498 struct cmd_ds_command *cmd, u16 cmdno,
499 u16 cmdoption)
500{
501 struct cmd_ds_802_11d_domain_info *pdomaininfo =
502 &cmd->params.domaininfo;
503 struct mrvl_ie_domain_param_set *domain = &pdomaininfo->domain;
504 u8 nr_subband = priv->domainreg.nr_subband;
505
506 lbs_deb_enter(LBS_DEB_11D);
507
508 lbs_deb_11d("nr_subband=%x\n", nr_subband);
509
510 cmd->command = cpu_to_le16(cmdno);
511 pdomaininfo->action = cpu_to_le16(cmdoption);
512 if (cmdoption == CMD_ACT_GET) {
513 cmd->size =
514 cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN);
515 lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd,
516 le16_to_cpu(cmd->size));
517 goto done;
518 }
519
520 domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN);
521 memcpy(domain->countrycode, priv->domainreg.countrycode,
522 sizeof(domain->countrycode));
523
524 domain->header.len =
525 cpu_to_le16(nr_subband * sizeof(struct ieee_subbandset) +
526 sizeof(domain->countrycode));
527
528 if (nr_subband) {
529 memcpy(domain->subband, priv->domainreg.subband,
530 nr_subband * sizeof(struct ieee_subbandset));
531
532 cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
533 le16_to_cpu(domain->header.len) +
534 sizeof(struct mrvl_ie_header) +
535 S_DS_GEN);
536 } else {
537 cmd->size =
538 cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN);
539 }
540
541 lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd, le16_to_cpu(cmd->size));
542
543done:
544 lbs_deb_enter(LBS_DEB_11D);
545 return 0;
546}
547
548/**
549 * @brief This function parses countryinfo from AP and download country info to FW
550 * @param priv pointer to struct lbs_private
551 * @param resp pointer to command response buffer
552 * @return 0; -1
553 */
554int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
555{
556 struct cmd_ds_802_11d_domain_info *domaininfo = &resp->params.domaininforesp;
557 struct mrvl_ie_domain_param_set *domain = &domaininfo->domain;
558 u16 action = le16_to_cpu(domaininfo->action);
559 s16 ret = 0;
560 u8 nr_subband = 0;
561
562 lbs_deb_enter(LBS_DEB_11D);
563
564 lbs_deb_hex(LBS_DEB_11D, "domain info resp", (u8 *) resp,
565 (int)le16_to_cpu(resp->size));
566
567 nr_subband = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) /
568 sizeof(struct ieee_subbandset);
569
570 lbs_deb_11d("domain info resp: nr_subband %d\n", nr_subband);
571
572 if (nr_subband > MRVDRV_MAX_SUBBAND_802_11D) {
573 lbs_deb_11d("Invalid Numrer of Subband returned!!\n");
574 return -1;
575 }
576
577 switch (action) {
578 case CMD_ACT_SET: /*Proc Set action */
579 break;
580
581 case CMD_ACT_GET:
582 break;
583 default:
584 lbs_deb_11d("Invalid action:%d\n", domaininfo->action);
585 ret = -1;
586 break;
587 }
588
589 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
590 return ret;
591}
592
593/**
594 * @brief This function parses countryinfo from AP and download country info to FW
595 * @param priv pointer to struct lbs_private
596 * @return 0; -1
597 */
598int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
599 struct bss_descriptor * bss)
600{
601 int ret;
602
603 lbs_deb_enter(LBS_DEB_11D);
604 if (priv->enable11d) {
605 memset(&priv->parsed_region_chan, 0,
606 sizeof(struct parsed_region_chan_11d));
607 ret = parse_domain_info_11d(&bss->countryinfo, 0,
608 &priv->parsed_region_chan);
609
610 if (ret == -1) {
611 lbs_deb_11d("error parsing domain_info from AP\n");
612 goto done;
613 }
614
615 memset(&priv->domainreg, 0,
616 sizeof(struct lbs_802_11d_domain_reg));
617 generate_domain_info_11d(&priv->parsed_region_chan,
618 &priv->domainreg);
619
620 ret = set_domain_info_11d(priv);
621
622 if (ret) {
623 lbs_deb_11d("error setting domain info\n");
624 goto done;
625 }
626 }
627 ret = 0;
628
629done:
630 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
631 return ret;
632}
633
634/**
635 * @brief This function generates 11D info from user specified regioncode and download to FW
636 * @param priv pointer to struct lbs_private
637 * @return 0; -1
638 */
639int lbs_create_dnld_countryinfo_11d(struct lbs_private *priv)
640{
641 int ret;
642 struct region_channel *region_chan;
643 u8 j;
644
645 lbs_deb_enter(LBS_DEB_11D);
646 lbs_deb_11d("curbssparams.band %d\n", priv->curbssparams.band);
647
648 if (priv->enable11d) {
649 /* update parsed_region_chan_11; dnld domaininf to FW */
650
651 for (j = 0; j < ARRAY_SIZE(priv->region_channel); j++) {
652 region_chan = &priv->region_channel[j];
653
654 lbs_deb_11d("%d region_chan->band %d\n", j,
655 region_chan->band);
656
657 if (!region_chan || !region_chan->valid
658 || !region_chan->CFP)
659 continue;
660 if (region_chan->band != priv->curbssparams.band)
661 continue;
662 break;
663 }
664
665 if (j >= ARRAY_SIZE(priv->region_channel)) {
666 lbs_deb_11d("region_chan not found, band %d\n",
667 priv->curbssparams.band);
668 ret = -1;
669 goto done;
670 }
671
672 memset(&priv->parsed_region_chan, 0,
673 sizeof(struct parsed_region_chan_11d));
674 lbs_generate_parsed_region_chan_11d(region_chan,
675 &priv->
676 parsed_region_chan);
677
678 memset(&priv->domainreg, 0,
679 sizeof(struct lbs_802_11d_domain_reg));
680 generate_domain_info_11d(&priv->parsed_region_chan,
681 &priv->domainreg);
682
683 ret = set_domain_info_11d(priv);
684
685 if (ret) {
686 lbs_deb_11d("error setting domain info\n");
687 goto done;
688 }
689
690 }
691 ret = 0;
692
693done:
694 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
695 return ret;
696}
diff --git a/drivers/net/wireless/libertas/11d.h b/drivers/net/wireless/libertas/11d.h
deleted file mode 100644
index fb75d3e321a0..000000000000
--- a/drivers/net/wireless/libertas/11d.h
+++ /dev/null
@@ -1,105 +0,0 @@
1/**
2 * This header file contains data structures and
3 * function declarations of 802.11d
4 */
5#ifndef _LBS_11D_
6#define _LBS_11D_
7
8#include "types.h"
9#include "defs.h"
10
11#define UNIVERSAL_REGION_CODE 0xff
12
13/** (Beaconsize(256)-5(IEId,len,contrystr(3))/3(FirstChan,NoOfChan,MaxPwr)
14 */
15#define MRVDRV_MAX_SUBBAND_802_11D 83
16
17#define COUNTRY_CODE_LEN 3
18#define MAX_NO_OF_CHAN 40
19
20struct cmd_ds_command;
21
22/** Data structure for Country IE*/
23struct ieee_subbandset {
24 u8 firstchan;
25 u8 nrchan;
26 u8 maxtxpwr;
27} __attribute__ ((packed));
28
29struct ieee_ie_country_info_set {
30 struct ieee_ie_header header;
31
32 u8 countrycode[COUNTRY_CODE_LEN];
33 struct ieee_subbandset subband[1];
34};
35
36struct ieee_ie_country_info_full_set {
37 struct ieee_ie_header header;
38
39 u8 countrycode[COUNTRY_CODE_LEN];
40 struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
41} __attribute__ ((packed));
42
43struct mrvl_ie_domain_param_set {
44 struct mrvl_ie_header header;
45
46 u8 countrycode[COUNTRY_CODE_LEN];
47 struct ieee_subbandset subband[1];
48} __attribute__ ((packed));
49
50struct cmd_ds_802_11d_domain_info {
51 __le16 action;
52 struct mrvl_ie_domain_param_set domain;
53} __attribute__ ((packed));
54
55/** domain regulatory information */
56struct lbs_802_11d_domain_reg {
57 /** country Code*/
58 u8 countrycode[COUNTRY_CODE_LEN];
59 /** No. of subband*/
60 u8 nr_subband;
61 struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
62};
63
64struct chan_power_11d {
65 u8 chan;
66 u8 pwr;
67} __attribute__ ((packed));
68
69struct parsed_region_chan_11d {
70 u8 band;
71 u8 region;
72 s8 countrycode[COUNTRY_CODE_LEN];
73 struct chan_power_11d chanpwr[MAX_NO_OF_CHAN];
74 u8 nr_chan;
75} __attribute__ ((packed));
76
77struct region_code_mapping {
78 u8 region[COUNTRY_CODE_LEN];
79 u8 code;
80};
81
82struct lbs_private;
83
84u8 lbs_get_scan_type_11d(u8 chan,
85 struct parsed_region_chan_11d *parsed_region_chan);
86
87u32 lbs_chan_2_freq(u8 chan);
88
89void lbs_init_11d(struct lbs_private *priv);
90
91int lbs_set_universaltable(struct lbs_private *priv, u8 band);
92
93int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
94 struct cmd_ds_command *cmd, u16 cmdno,
95 u16 cmdOption);
96
97int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp);
98
99struct bss_descriptor;
100int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
101 struct bss_descriptor * bss);
102
103int lbs_create_dnld_countryinfo_11d(struct lbs_private *priv);
104
105#endif
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index e5584dd1c79a..fa37039e0eae 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,4 +1,3 @@
1libertas-y += 11d.o
2libertas-y += assoc.o 1libertas-y += assoc.o
3libertas-y += cfg.o 2libertas-y += cfg.o
4libertas-y += cmd.o 3libertas-y += cmd.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index dd8732611ba9..751067369ba8 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -23,6 +23,13 @@ static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) =
23 */ 23 */
24#define CAPINFO_MASK (~(0xda00)) 24#define CAPINFO_MASK (~(0xda00))
25 25
26/**
27 * 802.11b/g supported bitrates (in 500Kb/s units)
28 */
29u8 lbs_bg_rates[MAX_RATES] =
30 { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
310x00, 0x00 };
32
26 33
27/** 34/**
28 * @brief This function finds common rates between rates and card rates. 35 * @brief This function finds common rates between rates and card rates.
@@ -147,6 +154,397 @@ static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth
147} 154}
148 155
149 156
157int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
158 struct assoc_request *assoc)
159{
160 struct cmd_ds_802_11_set_wep cmd;
161 int ret = 0;
162
163 lbs_deb_enter(LBS_DEB_CMD);
164
165 memset(&cmd, 0, sizeof(cmd));
166 cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
167 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
168
169 cmd.action = cpu_to_le16(cmd_action);
170
171 if (cmd_action == CMD_ACT_ADD) {
172 int i;
173
174 /* default tx key index */
175 cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
176 CMD_WEP_KEY_INDEX_MASK);
177
178 /* Copy key types and material to host command structure */
179 for (i = 0; i < 4; i++) {
180 struct enc_key *pkey = &assoc->wep_keys[i];
181
182 switch (pkey->len) {
183 case KEY_LEN_WEP_40:
184 cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
185 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
186 lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
187 break;
188 case KEY_LEN_WEP_104:
189 cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
190 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
191 lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
192 break;
193 case 0:
194 break;
195 default:
196 lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
197 i, pkey->len);
198 ret = -1;
199 goto done;
200 break;
201 }
202 }
203 } else if (cmd_action == CMD_ACT_REMOVE) {
204 /* ACT_REMOVE clears _all_ WEP keys */
205
206 /* default tx key index */
207 cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
208 CMD_WEP_KEY_INDEX_MASK);
209 lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
210 }
211
212 ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
213done:
214 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
215 return ret;
216}
217
218int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
219 uint16_t *enable)
220{
221 struct cmd_ds_802_11_enable_rsn cmd;
222 int ret;
223
224 lbs_deb_enter(LBS_DEB_CMD);
225
226 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
227 cmd.action = cpu_to_le16(cmd_action);
228
229 if (cmd_action == CMD_ACT_GET)
230 cmd.enable = 0;
231 else {
232 if (*enable)
233 cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
234 else
235 cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
236 lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
237 }
238
239 ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
240 if (!ret && cmd_action == CMD_ACT_GET)
241 *enable = le16_to_cpu(cmd.enable);
242
243 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
244 return ret;
245}
246
247static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
248 struct enc_key *key)
249{
250 lbs_deb_enter(LBS_DEB_CMD);
251
252 if (key->flags & KEY_INFO_WPA_ENABLED)
253 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
254 if (key->flags & KEY_INFO_WPA_UNICAST)
255 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
256 if (key->flags & KEY_INFO_WPA_MCAST)
257 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
258
259 keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
260 keyparam->keytypeid = cpu_to_le16(key->type);
261 keyparam->keylen = cpu_to_le16(key->len);
262 memcpy(keyparam->key, key->key, key->len);
263
264 /* Length field doesn't include the {type,length} header */
265 keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
266 lbs_deb_leave(LBS_DEB_CMD);
267}
268
269int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
270 struct assoc_request *assoc)
271{
272 struct cmd_ds_802_11_key_material cmd;
273 int ret = 0;
274 int index = 0;
275
276 lbs_deb_enter(LBS_DEB_CMD);
277
278 cmd.action = cpu_to_le16(cmd_action);
279 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
280
281 if (cmd_action == CMD_ACT_GET) {
282 cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_header) + 2);
283 } else {
284 memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
285
286 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
287 set_one_wpa_key(&cmd.keyParamSet[index],
288 &assoc->wpa_unicast_key);
289 index++;
290 }
291
292 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
293 set_one_wpa_key(&cmd.keyParamSet[index],
294 &assoc->wpa_mcast_key);
295 index++;
296 }
297
298 /* The common header and as many keys as we included */
299 cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
300 keyParamSet[index]));
301 }
302 ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
303 /* Copy the returned key to driver private data */
304 if (!ret && cmd_action == CMD_ACT_GET) {
305 void *buf_ptr = cmd.keyParamSet;
306 void *resp_end = &(&cmd)[1];
307
308 while (buf_ptr < resp_end) {
309 struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
310 struct enc_key *key;
311 uint16_t param_set_len = le16_to_cpu(keyparam->length);
312 uint16_t key_len = le16_to_cpu(keyparam->keylen);
313 uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
314 uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
315 void *end;
316
317 end = (void *)keyparam + sizeof(keyparam->type)
318 + sizeof(keyparam->length) + param_set_len;
319
320 /* Make sure we don't access past the end of the IEs */
321 if (end > resp_end)
322 break;
323
324 if (key_flags & KEY_INFO_WPA_UNICAST)
325 key = &priv->wpa_unicast_key;
326 else if (key_flags & KEY_INFO_WPA_MCAST)
327 key = &priv->wpa_mcast_key;
328 else
329 break;
330
331 /* Copy returned key into driver */
332 memset(key, 0, sizeof(struct enc_key));
333 if (key_len > sizeof(key->key))
334 break;
335 key->type = key_type;
336 key->flags = key_flags;
337 key->len = key_len;
338 memcpy(key->key, keyparam->key, key->len);
339
340 buf_ptr = end + 1;
341 }
342 }
343
344 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
345 return ret;
346}
347
348static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
349{
350/* Bit Rate
351* 15:13 Reserved
352* 12 54 Mbps
353* 11 48 Mbps
354* 10 36 Mbps
355* 9 24 Mbps
356* 8 18 Mbps
357* 7 12 Mbps
358* 6 9 Mbps
359* 5 6 Mbps
360* 4 Reserved
361* 3 11 Mbps
362* 2 5.5 Mbps
363* 1 2 Mbps
364* 0 1 Mbps
365**/
366
367 uint16_t ratemask;
368 int i = lbs_data_rate_to_fw_index(rate);
369 if (lower_rates_ok)
370 ratemask = (0x1fef >> (12 - i));
371 else
372 ratemask = (1 << i);
373 return cpu_to_le16(ratemask);
374}
375
376int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
377 uint16_t cmd_action)
378{
379 struct cmd_ds_802_11_rate_adapt_rateset cmd;
380 int ret;
381
382 lbs_deb_enter(LBS_DEB_CMD);
383
384 if (!priv->cur_rate && !priv->enablehwauto)
385 return -EINVAL;
386
387 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
388
389 cmd.action = cpu_to_le16(cmd_action);
390 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
391 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
392 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
393 if (!ret && cmd_action == CMD_ACT_GET) {
394 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
395 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
396 }
397
398 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
399 return ret;
400}
401
402/**
403 * @brief Set the data rate
404 *
405 * @param priv A pointer to struct lbs_private structure
406 * @param rate The desired data rate, or 0 to clear a locked rate
407 *
408 * @return 0 on success, error on failure
409 */
410int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
411{
412 struct cmd_ds_802_11_data_rate cmd;
413 int ret = 0;
414
415 lbs_deb_enter(LBS_DEB_CMD);
416
417 memset(&cmd, 0, sizeof(cmd));
418 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
419
420 if (rate > 0) {
421 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
422 cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
423 if (cmd.rates[0] == 0) {
424 lbs_deb_cmd("DATA_RATE: invalid requested rate of"
425 " 0x%02X\n", rate);
426 ret = 0;
427 goto out;
428 }
429 lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
430 } else {
431 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
432 lbs_deb_cmd("DATA_RATE: setting auto\n");
433 }
434
435 ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
436 if (ret)
437 goto out;
438
439 lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof(cmd));
440
441 /* FIXME: get actual rates FW can do if this command actually returns
442 * all data rates supported.
443 */
444 priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
445 lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
446
447out:
448 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
449 return ret;
450}
451
452
453int lbs_cmd_802_11_rssi(struct lbs_private *priv,
454 struct cmd_ds_command *cmd)
455{
456
457 lbs_deb_enter(LBS_DEB_CMD);
458 cmd->command = cpu_to_le16(CMD_802_11_RSSI);
459 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) +
460 sizeof(struct cmd_header));
461 cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
462
463 /* reset Beacon SNR/NF/RSSI values */
464 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
465 priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
466 priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
467 priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
468 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
469 priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
470
471 lbs_deb_leave(LBS_DEB_CMD);
472 return 0;
473}
474
475int lbs_ret_802_11_rssi(struct lbs_private *priv,
476 struct cmd_ds_command *resp)
477{
478 struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
479
480 lbs_deb_enter(LBS_DEB_CMD);
481
482 /* store the non average value */
483 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
484 priv->NF[TYPE_BEACON][TYPE_NOAVG] =
485 get_unaligned_le16(&rssirsp->noisefloor);
486
487 priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
488 priv->NF[TYPE_BEACON][TYPE_AVG] =
489 get_unaligned_le16(&rssirsp->avgnoisefloor);
490
491 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
492 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
493 priv->NF[TYPE_BEACON][TYPE_NOAVG]);
494
495 priv->RSSI[TYPE_BEACON][TYPE_AVG] =
496 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
497 priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
498
499 lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
500 priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
501 priv->RSSI[TYPE_BEACON][TYPE_AVG]);
502
503 lbs_deb_leave(LBS_DEB_CMD);
504 return 0;
505}
506
507
508int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
509 struct cmd_ds_command *cmd,
510 u16 cmd_action)
511{
512 struct cmd_ds_802_11_beacon_control
513 *bcn_ctrl = &cmd->params.bcn_ctrl;
514
515 lbs_deb_enter(LBS_DEB_CMD);
516 cmd->size =
517 cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
518 + sizeof(struct cmd_header));
519 cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
520
521 bcn_ctrl->action = cpu_to_le16(cmd_action);
522 bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
523 bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
524
525 lbs_deb_leave(LBS_DEB_CMD);
526 return 0;
527}
528
529int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
530 struct cmd_ds_command *resp)
531{
532 struct cmd_ds_802_11_beacon_control *bcn_ctrl =
533 &resp->params.bcn_ctrl;
534
535 lbs_deb_enter(LBS_DEB_CMD);
536
537 if (bcn_ctrl->action == CMD_ACT_GET) {
538 priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
539 priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
540 }
541
542 lbs_deb_enter(LBS_DEB_CMD);
543 return 0;
544}
545
546
547
150static int lbs_assoc_post(struct lbs_private *priv, 548static int lbs_assoc_post(struct lbs_private *priv,
151 struct cmd_ds_802_11_associate_response *resp) 549 struct cmd_ds_802_11_associate_response *resp)
152{ 550{
@@ -226,7 +624,7 @@ static int lbs_assoc_post(struct lbs_private *priv,
226 priv->connect_status = LBS_CONNECTED; 624 priv->connect_status = LBS_CONNECTED;
227 625
228 /* Update current SSID and BSSID */ 626 /* Update current SSID and BSSID */
229 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE); 627 memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
230 priv->curbssparams.ssid_len = bss->ssid_len; 628 priv->curbssparams.ssid_len = bss->ssid_len;
231 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN); 629 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
232 630
@@ -369,12 +767,7 @@ static int lbs_associate(struct lbs_private *priv,
369 (u16)(pos - (u8 *) &cmd.iebuf)); 767 (u16)(pos - (u8 *) &cmd.iebuf));
370 768
371 /* update curbssparams */ 769 /* update curbssparams */
372 priv->curbssparams.channel = bss->phy.ds.channel; 770 priv->channel = bss->phy.ds.channel;
373
374 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
375 ret = -1;
376 goto done;
377 }
378 771
379 ret = lbs_cmd_with_response(priv, command, &cmd); 772 ret = lbs_cmd_with_response(priv, command, &cmd);
380 if (ret == 0) { 773 if (ret == 0) {
@@ -472,7 +865,7 @@ static int lbs_adhoc_post(struct lbs_private *priv,
472 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN); 865 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
473 866
474 /* Set the new SSID to current SSID */ 867 /* Set the new SSID to current SSID */
475 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE); 868 memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
476 priv->curbssparams.ssid_len = bss->ssid_len; 869 priv->curbssparams.ssid_len = bss->ssid_len;
477 870
478 netif_carrier_on(priv->dev); 871 netif_carrier_on(priv->dev);
@@ -487,7 +880,7 @@ static int lbs_adhoc_post(struct lbs_private *priv,
487 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n", 880 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
488 print_ssid(ssid, bss->ssid, bss->ssid_len), 881 print_ssid(ssid, bss->ssid, bss->ssid_len),
489 priv->curbssparams.bssid, 882 priv->curbssparams.bssid,
490 priv->curbssparams.channel); 883 priv->channel);
491 884
492done: 885done:
493 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); 886 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
@@ -560,7 +953,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
560 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band); 953 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
561 954
562 priv->adhoccreate = 0; 955 priv->adhoccreate = 0;
563 priv->curbssparams.channel = bss->channel; 956 priv->channel = bss->channel;
564 957
565 /* Build the join command */ 958 /* Build the join command */
566 memset(&cmd, 0, sizeof(cmd)); 959 memset(&cmd, 0, sizeof(cmd));
@@ -633,11 +1026,6 @@ static int lbs_adhoc_join(struct lbs_private *priv,
633 } 1026 }
634 } 1027 }
635 1028
636 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
637 ret = -1;
638 goto out;
639 }
640
641 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd); 1029 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
642 if (ret == 0) { 1030 if (ret == 0) {
643 ret = lbs_adhoc_post(priv, 1031 ret = lbs_adhoc_post(priv,
@@ -737,12 +1125,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
737 lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n", 1125 lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
738 cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]); 1126 cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
739 1127
740 if (lbs_create_dnld_countryinfo_11d(priv)) {
741 lbs_deb_join("ADHOC_START: dnld_countryinfo_11d failed\n");
742 ret = -1;
743 goto out;
744 }
745
746 lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n", 1128 lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
747 assoc_req->channel, assoc_req->band); 1129 assoc_req->channel, assoc_req->band);
748 1130
@@ -1099,7 +1481,7 @@ static int assoc_helper_essid(struct lbs_private *priv,
1099 /* else send START command */ 1481 /* else send START command */
1100 lbs_deb_assoc("SSID not found, creating adhoc network\n"); 1482 lbs_deb_assoc("SSID not found, creating adhoc network\n");
1101 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid, 1483 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
1102 IW_ESSID_MAX_SIZE); 1484 IEEE80211_MAX_SSID_LEN);
1103 assoc_req->bss.ssid_len = assoc_req->ssid_len; 1485 assoc_req->bss.ssid_len = assoc_req->ssid_len;
1104 lbs_adhoc_start(priv, assoc_req); 1486 lbs_adhoc_start(priv, assoc_req);
1105 } 1487 }
@@ -1185,7 +1567,8 @@ static int assoc_helper_mode(struct lbs_private *priv,
1185 } 1567 }
1186 1568
1187 priv->mode = assoc_req->mode; 1569 priv->mode = assoc_req->mode;
1188 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, assoc_req->mode); 1570 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE,
1571 assoc_req->mode == IW_MODE_ADHOC ? 2 : 1);
1189 1572
1190done: 1573done:
1191 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 1574 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1205,7 +1588,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1205 goto done; 1588 goto done;
1206 } 1589 }
1207 1590
1208 if (assoc_req->channel == priv->curbssparams.channel) 1591 if (assoc_req->channel == priv->channel)
1209 goto done; 1592 goto done;
1210 1593
1211 if (priv->mesh_dev) { 1594 if (priv->mesh_dev) {
@@ -1217,7 +1600,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1217 } 1600 }
1218 1601
1219 lbs_deb_assoc("ASSOC: channel: %d -> %d\n", 1602 lbs_deb_assoc("ASSOC: channel: %d -> %d\n",
1220 priv->curbssparams.channel, assoc_req->channel); 1603 priv->channel, assoc_req->channel);
1221 1604
1222 ret = lbs_set_channel(priv, assoc_req->channel); 1605 ret = lbs_set_channel(priv, assoc_req->channel);
1223 if (ret < 0) 1606 if (ret < 0)
@@ -1232,7 +1615,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1232 goto done; 1615 goto done;
1233 } 1616 }
1234 1617
1235 if (assoc_req->channel != priv->curbssparams.channel) { 1618 if (assoc_req->channel != priv->channel) {
1236 lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n", 1619 lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n",
1237 assoc_req->channel); 1620 assoc_req->channel);
1238 goto restore_mesh; 1621 goto restore_mesh;
@@ -1253,7 +1636,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1253 restore_mesh: 1636 restore_mesh:
1254 if (priv->mesh_dev) 1637 if (priv->mesh_dev)
1255 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1638 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1256 priv->curbssparams.channel); 1639 priv->channel);
1257 1640
1258 done: 1641 done:
1259 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 1642 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1475,7 +1858,7 @@ static int should_stop_adhoc(struct lbs_private *priv,
1475 } 1858 }
1476 1859
1477 if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) { 1860 if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
1478 if (assoc_req->channel != priv->curbssparams.channel) 1861 if (assoc_req->channel != priv->channel)
1479 return 1; 1862 return 1;
1480 } 1863 }
1481 1864
@@ -1557,7 +1940,7 @@ static int lbs_find_best_network_ssid(struct lbs_private *priv,
1557 1940
1558 found = lbs_find_best_ssid_in_list(priv, preferred_mode); 1941 found = lbs_find_best_ssid_in_list(priv, preferred_mode);
1559 if (found && (found->ssid_len > 0)) { 1942 if (found && (found->ssid_len > 0)) {
1560 memcpy(out_ssid, &found->ssid, IW_ESSID_MAX_SIZE); 1943 memcpy(out_ssid, &found->ssid, IEEE80211_MAX_SSID_LEN);
1561 *out_ssid_len = found->ssid_len; 1944 *out_ssid_len = found->ssid_len;
1562 *out_mode = found->mode; 1945 *out_mode = found->mode;
1563 ret = 0; 1946 ret = 0;
@@ -1775,12 +2158,12 @@ struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
1775 assoc_req = priv->pending_assoc_req; 2158 assoc_req = priv->pending_assoc_req;
1776 if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { 2159 if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
1777 memcpy(&assoc_req->ssid, &priv->curbssparams.ssid, 2160 memcpy(&assoc_req->ssid, &priv->curbssparams.ssid,
1778 IW_ESSID_MAX_SIZE); 2161 IEEE80211_MAX_SSID_LEN);
1779 assoc_req->ssid_len = priv->curbssparams.ssid_len; 2162 assoc_req->ssid_len = priv->curbssparams.ssid_len;
1780 } 2163 }
1781 2164
1782 if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) 2165 if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
1783 assoc_req->channel = priv->curbssparams.channel; 2166 assoc_req->channel = priv->channel;
1784 2167
1785 if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags)) 2168 if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags))
1786 assoc_req->band = priv->curbssparams.band; 2169 assoc_req->band = priv->curbssparams.band;
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index 6e765e9f91a3..40621b789fc5 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -3,7 +3,126 @@
3#ifndef _LBS_ASSOC_H_ 3#ifndef _LBS_ASSOC_H_
4#define _LBS_ASSOC_H_ 4#define _LBS_ASSOC_H_
5 5
6#include "dev.h" 6
7#include "defs.h"
8#include "host.h"
9
10
11struct lbs_private;
12
13/*
14 * In theory, the IE is limited to the IE length, 255,
15 * but in practice 64 bytes are enough.
16 */
17#define MAX_WPA_IE_LEN 64
18
19
20
21struct lbs_802_11_security {
22 u8 WPAenabled;
23 u8 WPA2enabled;
24 u8 wep_enabled;
25 u8 auth_mode;
26 u32 key_mgmt;
27};
28
29/** Current Basic Service Set State Structure */
30struct current_bss_params {
31 /** bssid */
32 u8 bssid[ETH_ALEN];
33 /** ssid */
34 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
35 u8 ssid_len;
36
37 /** band */
38 u8 band;
39 /** channel is directly in priv->channel */
40 /** zero-terminated array of supported data rates */
41 u8 rates[MAX_RATES + 1];
42};
43
44/**
45 * @brief Structure used to store information for each beacon/probe response
46 */
47struct bss_descriptor {
48 u8 bssid[ETH_ALEN];
49
50 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
51 u8 ssid_len;
52
53 u16 capability;
54 u32 rssi;
55 u32 channel;
56 u16 beaconperiod;
57 __le16 atimwindow;
58
59 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
60 u8 mode;
61
62 /* zero-terminated array of supported data rates */
63 u8 rates[MAX_RATES + 1];
64
65 unsigned long last_scanned;
66
67 union ieee_phy_param_set phy;
68 union ieee_ss_param_set ss;
69
70 u8 wpa_ie[MAX_WPA_IE_LEN];
71 size_t wpa_ie_len;
72 u8 rsn_ie[MAX_WPA_IE_LEN];
73 size_t rsn_ie_len;
74
75 u8 mesh;
76
77 struct list_head list;
78};
79
80/** Association request
81 *
82 * Encapsulates all the options that describe a specific assocation request
83 * or configuration of the wireless card's radio, mode, and security settings.
84 */
85struct assoc_request {
86#define ASSOC_FLAG_SSID 1
87#define ASSOC_FLAG_CHANNEL 2
88#define ASSOC_FLAG_BAND 3
89#define ASSOC_FLAG_MODE 4
90#define ASSOC_FLAG_BSSID 5
91#define ASSOC_FLAG_WEP_KEYS 6
92#define ASSOC_FLAG_WEP_TX_KEYIDX 7
93#define ASSOC_FLAG_WPA_MCAST_KEY 8
94#define ASSOC_FLAG_WPA_UCAST_KEY 9
95#define ASSOC_FLAG_SECINFO 10
96#define ASSOC_FLAG_WPA_IE 11
97 unsigned long flags;
98
99 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
100 u8 ssid_len;
101 u8 channel;
102 u8 band;
103 u8 mode;
104 u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
105
106 /** WEP keys */
107 struct enc_key wep_keys[4];
108 u16 wep_tx_keyidx;
109
110 /** WPA keys */
111 struct enc_key wpa_mcast_key;
112 struct enc_key wpa_unicast_key;
113
114 struct lbs_802_11_security secinfo;
115
116 /** WPA Information Elements*/
117 u8 wpa_ie[MAX_WPA_IE_LEN];
118 u8 wpa_ie_len;
119
120 /* BSS to associate with for infrastructure of Ad-Hoc join */
121 struct bss_descriptor bss;
122};
123
124
125extern u8 lbs_bg_rates[MAX_RATES];
7 126
8void lbs_association_worker(struct work_struct *work); 127void lbs_association_worker(struct work_struct *work);
9struct assoc_request *lbs_get_association_request(struct lbs_private *priv); 128struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
@@ -13,4 +132,24 @@ int lbs_adhoc_stop(struct lbs_private *priv);
13int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, 132int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
14 u8 bssid[ETH_ALEN], u16 reason); 133 u8 bssid[ETH_ALEN], u16 reason);
15 134
135int lbs_cmd_802_11_rssi(struct lbs_private *priv,
136 struct cmd_ds_command *cmd);
137int lbs_ret_802_11_rssi(struct lbs_private *priv,
138 struct cmd_ds_command *resp);
139
140int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
141 struct cmd_ds_command *cmd,
142 u16 cmd_action);
143int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
144 struct cmd_ds_command *resp);
145
146int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
147 struct assoc_request *assoc);
148
149int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
150 uint16_t *enable);
151
152int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
153 struct assoc_request *assoc);
154
16#endif /* _LBS_ASSOC_H */ 155#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 0fb312576b8d..1065ce29cd08 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -3,18 +3,20 @@
3 * It prepares command and sends it to firmware when it is ready. 3 * It prepares command and sends it to firmware when it is ready.
4 */ 4 */
5 5
6#include <net/iw_handler.h>
7#include <net/lib80211.h> 6#include <net/lib80211.h>
8#include <linux/kfifo.h> 7#include <linux/kfifo.h>
8#include <linux/sched.h>
9
9#include "host.h" 10#include "host.h"
10#include "hostcmd.h"
11#include "decl.h" 11#include "decl.h"
12#include "defs.h" 12#include "defs.h"
13#include "dev.h" 13#include "dev.h"
14#include "assoc.h" 14#include "assoc.h"
15#include "wext.h" 15#include "wext.h"
16#include "scan.h"
16#include "cmd.h" 17#include "cmd.h"
17 18
19
18static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv); 20static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv);
19 21
20/** 22/**
@@ -191,11 +193,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
191 goto out; 193 goto out;
192 } 194 }
193 195
194 if (lbs_set_universaltable(priv, 0)) {
195 ret = -1;
196 goto out;
197 }
198
199out: 196out:
200 lbs_deb_leave(LBS_DEB_CMD); 197 lbs_deb_leave(LBS_DEB_CMD);
201 return ret; 198 return ret;
@@ -244,7 +241,7 @@ static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd,
244 241
245 cmd->command = cpu_to_le16(CMD_802_11_PS_MODE); 242 cmd->command = cpu_to_le16(CMD_802_11_PS_MODE);
246 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) + 243 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) +
247 S_DS_GEN); 244 sizeof(struct cmd_header));
248 psm->action = cpu_to_le16(cmd_action); 245 psm->action = cpu_to_le16(cmd_action);
249 psm->multipledtim = 0; 246 psm->multipledtim = 0;
250 switch (cmd_action) { 247 switch (cmd_action) {
@@ -273,33 +270,6 @@ static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd,
273 return 0; 270 return 0;
274} 271}
275 272
276int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
277 uint16_t cmd_action, uint16_t *timeout)
278{
279 struct cmd_ds_802_11_inactivity_timeout cmd;
280 int ret;
281
282 lbs_deb_enter(LBS_DEB_CMD);
283
284 cmd.hdr.command = cpu_to_le16(CMD_802_11_INACTIVITY_TIMEOUT);
285 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
286
287 cmd.action = cpu_to_le16(cmd_action);
288
289 if (cmd_action == CMD_ACT_SET)
290 cmd.timeout = cpu_to_le16(*timeout);
291 else
292 cmd.timeout = 0;
293
294 ret = lbs_cmd_with_response(priv, CMD_802_11_INACTIVITY_TIMEOUT, &cmd);
295
296 if (!ret)
297 *timeout = le16_to_cpu(cmd.timeout);
298
299 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
300 return 0;
301}
302
303int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, 273int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
304 struct sleep_params *sp) 274 struct sleep_params *sp)
305{ 275{
@@ -396,197 +366,6 @@ int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep)
396 return ret; 366 return ret;
397} 367}
398 368
399int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
400 struct assoc_request *assoc)
401{
402 struct cmd_ds_802_11_set_wep cmd;
403 int ret = 0;
404
405 lbs_deb_enter(LBS_DEB_CMD);
406
407 memset(&cmd, 0, sizeof(cmd));
408 cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
409 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
410
411 cmd.action = cpu_to_le16(cmd_action);
412
413 if (cmd_action == CMD_ACT_ADD) {
414 int i;
415
416 /* default tx key index */
417 cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
418 CMD_WEP_KEY_INDEX_MASK);
419
420 /* Copy key types and material to host command structure */
421 for (i = 0; i < 4; i++) {
422 struct enc_key *pkey = &assoc->wep_keys[i];
423
424 switch (pkey->len) {
425 case KEY_LEN_WEP_40:
426 cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
427 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
428 lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
429 break;
430 case KEY_LEN_WEP_104:
431 cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
432 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
433 lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
434 break;
435 case 0:
436 break;
437 default:
438 lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
439 i, pkey->len);
440 ret = -1;
441 goto done;
442 break;
443 }
444 }
445 } else if (cmd_action == CMD_ACT_REMOVE) {
446 /* ACT_REMOVE clears _all_ WEP keys */
447
448 /* default tx key index */
449 cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
450 CMD_WEP_KEY_INDEX_MASK);
451 lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
452 }
453
454 ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
455done:
456 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
457 return ret;
458}
459
460int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
461 uint16_t *enable)
462{
463 struct cmd_ds_802_11_enable_rsn cmd;
464 int ret;
465
466 lbs_deb_enter(LBS_DEB_CMD);
467
468 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
469 cmd.action = cpu_to_le16(cmd_action);
470
471 if (cmd_action == CMD_ACT_GET)
472 cmd.enable = 0;
473 else {
474 if (*enable)
475 cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
476 else
477 cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
478 lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
479 }
480
481 ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
482 if (!ret && cmd_action == CMD_ACT_GET)
483 *enable = le16_to_cpu(cmd.enable);
484
485 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
486 return ret;
487}
488
489static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
490 struct enc_key *key)
491{
492 lbs_deb_enter(LBS_DEB_CMD);
493
494 if (key->flags & KEY_INFO_WPA_ENABLED)
495 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
496 if (key->flags & KEY_INFO_WPA_UNICAST)
497 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
498 if (key->flags & KEY_INFO_WPA_MCAST)
499 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
500
501 keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
502 keyparam->keytypeid = cpu_to_le16(key->type);
503 keyparam->keylen = cpu_to_le16(key->len);
504 memcpy(keyparam->key, key->key, key->len);
505
506 /* Length field doesn't include the {type,length} header */
507 keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
508 lbs_deb_leave(LBS_DEB_CMD);
509}
510
511int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
512 struct assoc_request *assoc)
513{
514 struct cmd_ds_802_11_key_material cmd;
515 int ret = 0;
516 int index = 0;
517
518 lbs_deb_enter(LBS_DEB_CMD);
519
520 cmd.action = cpu_to_le16(cmd_action);
521 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
522
523 if (cmd_action == CMD_ACT_GET) {
524 cmd.hdr.size = cpu_to_le16(S_DS_GEN + 2);
525 } else {
526 memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
527
528 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
529 set_one_wpa_key(&cmd.keyParamSet[index],
530 &assoc->wpa_unicast_key);
531 index++;
532 }
533
534 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
535 set_one_wpa_key(&cmd.keyParamSet[index],
536 &assoc->wpa_mcast_key);
537 index++;
538 }
539
540 /* The common header and as many keys as we included */
541 cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
542 keyParamSet[index]));
543 }
544 ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
545 /* Copy the returned key to driver private data */
546 if (!ret && cmd_action == CMD_ACT_GET) {
547 void *buf_ptr = cmd.keyParamSet;
548 void *resp_end = &(&cmd)[1];
549
550 while (buf_ptr < resp_end) {
551 struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
552 struct enc_key *key;
553 uint16_t param_set_len = le16_to_cpu(keyparam->length);
554 uint16_t key_len = le16_to_cpu(keyparam->keylen);
555 uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
556 uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
557 void *end;
558
559 end = (void *)keyparam + sizeof(keyparam->type)
560 + sizeof(keyparam->length) + param_set_len;
561
562 /* Make sure we don't access past the end of the IEs */
563 if (end > resp_end)
564 break;
565
566 if (key_flags & KEY_INFO_WPA_UNICAST)
567 key = &priv->wpa_unicast_key;
568 else if (key_flags & KEY_INFO_WPA_MCAST)
569 key = &priv->wpa_mcast_key;
570 else
571 break;
572
573 /* Copy returned key into driver */
574 memset(key, 0, sizeof(struct enc_key));
575 if (key_len > sizeof(key->key))
576 break;
577 key->type = key_type;
578 key->flags = key_flags;
579 key->len = key_len;
580 memcpy(key->key, keyparam->key, key->len);
581
582 buf_ptr = end + 1;
583 }
584 }
585
586 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
587 return ret;
588}
589
590/** 369/**
591 * @brief Set an SNMP MIB value 370 * @brief Set an SNMP MIB value
592 * 371 *
@@ -611,7 +390,7 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val)
611 switch (oid) { 390 switch (oid) {
612 case SNMP_MIB_OID_BSS_TYPE: 391 case SNMP_MIB_OID_BSS_TYPE:
613 cmd.bufsize = cpu_to_le16(sizeof(u8)); 392 cmd.bufsize = cpu_to_le16(sizeof(u8));
614 cmd.value[0] = (val == IW_MODE_ADHOC) ? 2 : 1; 393 cmd.value[0] = val;
615 break; 394 break;
616 case SNMP_MIB_OID_11D_ENABLE: 395 case SNMP_MIB_OID_11D_ENABLE:
617 case SNMP_MIB_OID_FRAG_THRESHOLD: 396 case SNMP_MIB_OID_FRAG_THRESHOLD:
@@ -664,13 +443,7 @@ int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val)
664 443
665 switch (le16_to_cpu(cmd.bufsize)) { 444 switch (le16_to_cpu(cmd.bufsize)) {
666 case sizeof(u8): 445 case sizeof(u8):
667 if (oid == SNMP_MIB_OID_BSS_TYPE) { 446 *out_val = cmd.value[0];
668 if (cmd.value[0] == 2)
669 *out_val = IW_MODE_ADHOC;
670 else
671 *out_val = IW_MODE_INFRA;
672 } else
673 *out_val = cmd.value[0];
674 break; 447 break;
675 case sizeof(u16): 448 case sizeof(u16):
676 *out_val = le16_to_cpu(*((__le16 *)(&cmd.value))); 449 *out_val = le16_to_cpu(*((__le16 *)(&cmd.value)));
@@ -757,7 +530,7 @@ static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
757 cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE); 530 cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE);
758 cmd->size = 531 cmd->size =
759 cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) + 532 cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) +
760 S_DS_GEN); 533 sizeof(struct cmd_header));
761 534
762 monitor->action = cpu_to_le16(cmd_action); 535 monitor->action = cpu_to_le16(cmd_action);
763 if (cmd_action == CMD_ACT_SET) { 536 if (cmd_action == CMD_ACT_SET) {
@@ -768,111 +541,6 @@ static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
768 return 0; 541 return 0;
769} 542}
770 543
771static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
772{
773/* Bit Rate
774* 15:13 Reserved
775* 12 54 Mbps
776* 11 48 Mbps
777* 10 36 Mbps
778* 9 24 Mbps
779* 8 18 Mbps
780* 7 12 Mbps
781* 6 9 Mbps
782* 5 6 Mbps
783* 4 Reserved
784* 3 11 Mbps
785* 2 5.5 Mbps
786* 1 2 Mbps
787* 0 1 Mbps
788**/
789
790 uint16_t ratemask;
791 int i = lbs_data_rate_to_fw_index(rate);
792 if (lower_rates_ok)
793 ratemask = (0x1fef >> (12 - i));
794 else
795 ratemask = (1 << i);
796 return cpu_to_le16(ratemask);
797}
798
799int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
800 uint16_t cmd_action)
801{
802 struct cmd_ds_802_11_rate_adapt_rateset cmd;
803 int ret;
804
805 lbs_deb_enter(LBS_DEB_CMD);
806
807 if (!priv->cur_rate && !priv->enablehwauto)
808 return -EINVAL;
809
810 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
811
812 cmd.action = cpu_to_le16(cmd_action);
813 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
814 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
815 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
816 if (!ret && cmd_action == CMD_ACT_GET) {
817 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
818 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
819 }
820
821 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
822 return ret;
823}
824EXPORT_SYMBOL_GPL(lbs_cmd_802_11_rate_adapt_rateset);
825
826/**
827 * @brief Set the data rate
828 *
829 * @param priv A pointer to struct lbs_private structure
830 * @param rate The desired data rate, or 0 to clear a locked rate
831 *
832 * @return 0 on success, error on failure
833 */
834int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
835{
836 struct cmd_ds_802_11_data_rate cmd;
837 int ret = 0;
838
839 lbs_deb_enter(LBS_DEB_CMD);
840
841 memset(&cmd, 0, sizeof(cmd));
842 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
843
844 if (rate > 0) {
845 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
846 cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
847 if (cmd.rates[0] == 0) {
848 lbs_deb_cmd("DATA_RATE: invalid requested rate of"
849 " 0x%02X\n", rate);
850 ret = 0;
851 goto out;
852 }
853 lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
854 } else {
855 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
856 lbs_deb_cmd("DATA_RATE: setting auto\n");
857 }
858
859 ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
860 if (ret)
861 goto out;
862
863 lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof (cmd));
864
865 /* FIXME: get actual rates FW can do if this command actually returns
866 * all data rates supported.
867 */
868 priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
869 lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
870
871out:
872 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
873 return ret;
874}
875
876/** 544/**
877 * @brief Get the radio channel 545 * @brief Get the radio channel
878 * 546 *
@@ -880,7 +548,7 @@ out:
880 * 548 *
881 * @return The channel on success, error on failure 549 * @return The channel on success, error on failure
882 */ 550 */
883int lbs_get_channel(struct lbs_private *priv) 551static int lbs_get_channel(struct lbs_private *priv)
884{ 552{
885 struct cmd_ds_802_11_rf_channel cmd; 553 struct cmd_ds_802_11_rf_channel cmd;
886 int ret = 0; 554 int ret = 0;
@@ -912,7 +580,7 @@ int lbs_update_channel(struct lbs_private *priv)
912 580
913 ret = lbs_get_channel(priv); 581 ret = lbs_get_channel(priv);
914 if (ret > 0) { 582 if (ret > 0) {
915 priv->curbssparams.channel = ret; 583 priv->channel = ret;
916 ret = 0; 584 ret = 0;
917 } 585 }
918 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 586 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -931,7 +599,7 @@ int lbs_set_channel(struct lbs_private *priv, u8 channel)
931{ 599{
932 struct cmd_ds_802_11_rf_channel cmd; 600 struct cmd_ds_802_11_rf_channel cmd;
933#ifdef DEBUG 601#ifdef DEBUG
934 u8 old_channel = priv->curbssparams.channel; 602 u8 old_channel = priv->channel;
935#endif 603#endif
936 int ret = 0; 604 int ret = 0;
937 605
@@ -946,36 +614,15 @@ int lbs_set_channel(struct lbs_private *priv, u8 channel)
946 if (ret) 614 if (ret)
947 goto out; 615 goto out;
948 616
949 priv->curbssparams.channel = (uint8_t) le16_to_cpu(cmd.channel); 617 priv->channel = (uint8_t) le16_to_cpu(cmd.channel);
950 lbs_deb_cmd("channel switch from %d to %d\n", old_channel, 618 lbs_deb_cmd("channel switch from %d to %d\n", old_channel,
951 priv->curbssparams.channel); 619 priv->channel);
952 620
953out: 621out:
954 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 622 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
955 return ret; 623 return ret;
956} 624}
957 625
958static int lbs_cmd_802_11_rssi(struct lbs_private *priv,
959 struct cmd_ds_command *cmd)
960{
961
962 lbs_deb_enter(LBS_DEB_CMD);
963 cmd->command = cpu_to_le16(CMD_802_11_RSSI);
964 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) + S_DS_GEN);
965 cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
966
967 /* reset Beacon SNR/NF/RSSI values */
968 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
969 priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
970 priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
971 priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
972 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
973 priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
974
975 lbs_deb_leave(LBS_DEB_CMD);
976 return 0;
977}
978
979static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr, 626static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
980 u8 cmd_action, void *pdata_buf) 627 u8 cmd_action, void *pdata_buf)
981{ 628{
@@ -992,7 +639,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
992 639
993 cmdptr->size = 640 cmdptr->size =
994 cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access) 641 cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access)
995 + S_DS_GEN); 642 + sizeof(struct cmd_header));
996 macreg = 643 macreg =
997 (struct cmd_ds_mac_reg_access *)&cmdptr->params. 644 (struct cmd_ds_mac_reg_access *)&cmdptr->params.
998 macreg; 645 macreg;
@@ -1011,7 +658,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
1011 cmdptr->size = 658 cmdptr->size =
1012 cpu_to_le16(sizeof 659 cpu_to_le16(sizeof
1013 (struct cmd_ds_bbp_reg_access) 660 (struct cmd_ds_bbp_reg_access)
1014 + S_DS_GEN); 661 + sizeof(struct cmd_header));
1015 bbpreg = 662 bbpreg =
1016 (struct cmd_ds_bbp_reg_access *)&cmdptr->params. 663 (struct cmd_ds_bbp_reg_access *)&cmdptr->params.
1017 bbpreg; 664 bbpreg;
@@ -1030,7 +677,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
1030 cmdptr->size = 677 cmdptr->size =
1031 cpu_to_le16(sizeof 678 cpu_to_le16(sizeof
1032 (struct cmd_ds_rf_reg_access) + 679 (struct cmd_ds_rf_reg_access) +
1033 S_DS_GEN); 680 sizeof(struct cmd_header));
1034 rfreg = 681 rfreg =
1035 (struct cmd_ds_rf_reg_access *)&cmdptr->params. 682 (struct cmd_ds_rf_reg_access *)&cmdptr->params.
1036 rfreg; 683 rfreg;
@@ -1057,7 +704,8 @@ static int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
1057 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); 704 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
1058 705
1059 cmd->command = cpu_to_le16(CMD_BT_ACCESS); 706 cmd->command = cpu_to_le16(CMD_BT_ACCESS);
1060 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) + S_DS_GEN); 707 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) +
708 sizeof(struct cmd_header));
1061 cmd->result = 0; 709 cmd->result = 0;
1062 bt_access->action = cpu_to_le16(cmd_action); 710 bt_access->action = cpu_to_le16(cmd_action);
1063 711
@@ -1094,7 +742,8 @@ static int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
1094 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); 742 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
1095 743
1096 cmd->command = cpu_to_le16(CMD_FWT_ACCESS); 744 cmd->command = cpu_to_le16(CMD_FWT_ACCESS);
1097 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) + S_DS_GEN); 745 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) +
746 sizeof(struct cmd_header));
1098 cmd->result = 0; 747 cmd->result = 0;
1099 748
1100 if (pdata_buf) 749 if (pdata_buf)
@@ -1200,7 +849,7 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1200 ie->val.mesh_id_len = priv->mesh_ssid_len; 849 ie->val.mesh_id_len = priv->mesh_ssid_len;
1201 memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len); 850 memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
1202 ie->len = sizeof(struct mrvl_meshie_val) - 851 ie->len = sizeof(struct mrvl_meshie_val) -
1203 IW_ESSID_MAX_SIZE + priv->mesh_ssid_len; 852 IEEE80211_MAX_SSID_LEN + priv->mesh_ssid_len;
1204 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val)); 853 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
1205 break; 854 break;
1206 case CMD_ACT_MESH_CONFIG_STOP: 855 case CMD_ACT_MESH_CONFIG_STOP:
@@ -1215,27 +864,6 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1215 return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv); 864 return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
1216} 865}
1217 866
1218static int lbs_cmd_bcn_ctrl(struct lbs_private * priv,
1219 struct cmd_ds_command *cmd,
1220 u16 cmd_action)
1221{
1222 struct cmd_ds_802_11_beacon_control
1223 *bcn_ctrl = &cmd->params.bcn_ctrl;
1224
1225 lbs_deb_enter(LBS_DEB_CMD);
1226 cmd->size =
1227 cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
1228 + S_DS_GEN);
1229 cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
1230
1231 bcn_ctrl->action = cpu_to_le16(cmd_action);
1232 bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
1233 bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
1234
1235 lbs_deb_leave(LBS_DEB_CMD);
1236 return 0;
1237}
1238
1239static void lbs_queue_cmd(struct lbs_private *priv, 867static void lbs_queue_cmd(struct lbs_private *priv,
1240 struct cmd_ctrl_node *cmdnode) 868 struct cmd_ctrl_node *cmdnode)
1241{ 869{
@@ -1531,7 +1159,7 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1531 1159
1532 cmdptr->command = cpu_to_le16(cmd_no); 1160 cmdptr->command = cpu_to_le16(cmd_no);
1533 cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) + 1161 cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) +
1534 S_DS_GEN); 1162 sizeof(struct cmd_header));
1535 1163
1536 memmove(&cmdptr->params.afc, 1164 memmove(&cmdptr->params.afc,
1537 pdata_buf, sizeof(struct cmd_ds_802_11_afc)); 1165 pdata_buf, sizeof(struct cmd_ds_802_11_afc));
@@ -1539,45 +1167,17 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1539 ret = 0; 1167 ret = 0;
1540 goto done; 1168 goto done;
1541 1169
1542 case CMD_802_11D_DOMAIN_INFO:
1543 ret = lbs_cmd_802_11d_domain_info(priv, cmdptr,
1544 cmd_no, cmd_action);
1545 break;
1546
1547 case CMD_802_11_TPC_CFG: 1170 case CMD_802_11_TPC_CFG:
1548 cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG); 1171 cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG);
1549 cmdptr->size = 1172 cmdptr->size =
1550 cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) + 1173 cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) +
1551 S_DS_GEN); 1174 sizeof(struct cmd_header));
1552 1175
1553 memmove(&cmdptr->params.tpccfg, 1176 memmove(&cmdptr->params.tpccfg,
1554 pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg)); 1177 pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg));
1555 1178
1556 ret = 0; 1179 ret = 0;
1557 break; 1180 break;
1558 case CMD_802_11_LED_GPIO_CTRL:
1559 {
1560 struct mrvl_ie_ledgpio *gpio =
1561 (struct mrvl_ie_ledgpio*)
1562 cmdptr->params.ledgpio.data;
1563
1564 memmove(&cmdptr->params.ledgpio,
1565 pdata_buf,
1566 sizeof(struct cmd_ds_802_11_led_ctrl));
1567
1568 cmdptr->command =
1569 cpu_to_le16(CMD_802_11_LED_GPIO_CTRL);
1570
1571#define ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN 8
1572 cmdptr->size =
1573 cpu_to_le16(le16_to_cpu(gpio->header.len)
1574 + S_DS_GEN
1575 + ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN);
1576 gpio->header.len = gpio->header.len;
1577
1578 ret = 0;
1579 break;
1580 }
1581 1181
1582 case CMD_BT_ACCESS: 1182 case CMD_BT_ACCESS:
1583 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf); 1183 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
@@ -1587,18 +1187,12 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1587 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf); 1187 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
1588 break; 1188 break;
1589 1189
1590 case CMD_GET_TSF:
1591 cmdptr->command = cpu_to_le16(CMD_GET_TSF);
1592 cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_get_tsf) +
1593 S_DS_GEN);
1594 ret = 0;
1595 break;
1596 case CMD_802_11_BEACON_CTRL: 1190 case CMD_802_11_BEACON_CTRL:
1597 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action); 1191 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
1598 break; 1192 break;
1599 case CMD_802_11_DEEP_SLEEP: 1193 case CMD_802_11_DEEP_SLEEP:
1600 cmdptr->command = cpu_to_le16(CMD_802_11_DEEP_SLEEP); 1194 cmdptr->command = cpu_to_le16(CMD_802_11_DEEP_SLEEP);
1601 cmdptr->size = cpu_to_le16(S_DS_GEN); 1195 cmdptr->size = cpu_to_le16(sizeof(struct cmd_header));
1602 break; 1196 break;
1603 default: 1197 default:
1604 lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no); 1198 lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no);
@@ -1917,30 +1511,6 @@ done:
1917 return ret; 1511 return ret;
1918} 1512}
1919 1513
1920void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
1921{
1922 union iwreq_data iwrq;
1923 u8 buf[50];
1924
1925 lbs_deb_enter(LBS_DEB_WEXT);
1926
1927 memset(&iwrq, 0, sizeof(union iwreq_data));
1928 memset(buf, 0, sizeof(buf));
1929
1930 snprintf(buf, sizeof(buf) - 1, "%s", str);
1931
1932 iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
1933
1934 /* Send Event to upper layer */
1935 lbs_deb_wext("event indication string %s\n", (char *)buf);
1936 lbs_deb_wext("event indication length %d\n", iwrq.data.length);
1937 lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
1938
1939 wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
1940
1941 lbs_deb_leave(LBS_DEB_WEXT);
1942}
1943
1944static void lbs_send_confirmsleep(struct lbs_private *priv) 1514static void lbs_send_confirmsleep(struct lbs_private *priv)
1945{ 1515{
1946 unsigned long flags; 1516 unsigned long flags;
@@ -2118,7 +1688,7 @@ int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
2118} 1688}
2119 1689
2120 1690
2121static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, 1691struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
2122 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size, 1692 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
2123 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 1693 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
2124 unsigned long callback_arg) 1694 unsigned long callback_arg)
@@ -2216,5 +1786,3 @@ done:
2216 return ret; 1786 return ret;
2217} 1787}
2218EXPORT_SYMBOL_GPL(__lbs_cmd); 1788EXPORT_SYMBOL_GPL(__lbs_cmd);
2219
2220
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 392e578ca095..2862748aef70 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -3,11 +3,30 @@
3#ifndef _LBS_CMD_H_ 3#ifndef _LBS_CMD_H_
4#define _LBS_CMD_H_ 4#define _LBS_CMD_H_
5 5
6#include "hostcmd.h" 6#include "host.h"
7#include "dev.h" 7#include "dev.h"
8 8
9
10/* Command & response transfer between host and card */
11
12struct cmd_ctrl_node {
13 struct list_head list;
14 int result;
15 /* command response */
16 int (*callback)(struct lbs_private *,
17 unsigned long,
18 struct cmd_header *);
19 unsigned long callback_arg;
20 /* command data */
21 struct cmd_header *cmdbuf;
22 /* wait queue */
23 u16 cmdwaitqwoken;
24 wait_queue_head_t cmdwait_q;
25};
26
27
9/* lbs_cmd() infers the size of the buffer to copy data back into, from 28/* lbs_cmd() infers the size of the buffer to copy data back into, from
10 the size of the target of the pointer. Since the command to be sent 29 the size of the target of the pointer. Since the command to be sent
11 may often be smaller, that size is set in cmd->size by the caller.*/ 30 may often be smaller, that size is set in cmd->size by the caller.*/
12#define lbs_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \ 31#define lbs_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \
13 uint16_t __sz = le16_to_cpu((cmd)->hdr.size); \ 32 uint16_t __sz = le16_to_cpu((cmd)->hdr.size); \
@@ -18,6 +37,11 @@
18#define lbs_cmd_with_response(priv, cmdnr, cmd) \ 37#define lbs_cmd_with_response(priv, cmdnr, cmd) \
19 lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd)) 38 lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd))
20 39
40int lbs_prepare_and_send_command(struct lbs_private *priv,
41 u16 cmd_no,
42 u16 cmd_action,
43 u16 wait_option, u32 cmd_oid, void *pdata_buf);
44
21void lbs_cmd_async(struct lbs_private *priv, uint16_t command, 45void lbs_cmd_async(struct lbs_private *priv, uint16_t command,
22 struct cmd_header *in_cmd, int in_cmd_size); 46 struct cmd_header *in_cmd, int in_cmd_size);
23 47
@@ -26,62 +50,93 @@ int __lbs_cmd(struct lbs_private *priv, uint16_t command,
26 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 50 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
27 unsigned long callback_arg); 51 unsigned long callback_arg);
28 52
29int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, 53struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
30 int8_t p1, int8_t p2); 54 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
55 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
56 unsigned long callback_arg);
31 57
32int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, 58int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
33 int8_t p2, int usesnr); 59 struct cmd_header *resp);
34 60
35int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, 61int lbs_allocate_cmd_buffer(struct lbs_private *priv);
36 int8_t p1, int8_t p2); 62int lbs_free_cmd_buffer(struct lbs_private *priv);
37 63
38int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, 64int lbs_execute_next_command(struct lbs_private *priv);
39 int8_t p2, int usesnr); 65void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
66 int result);
67int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
40 68
41int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
42 struct cmd_header *resp);
43 69
44int lbs_update_hw_spec(struct lbs_private *priv); 70/* From cmdresp.c */
45 71
46int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, 72void lbs_mac_event_disconnected(struct lbs_private *priv);
47 struct cmd_ds_mesh_access *cmd);
48 73
49int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
50 74
51int lbs_get_channel(struct lbs_private *priv); 75
76/* Events */
77
78int lbs_process_event(struct lbs_private *priv, u32 event);
79
80
81/* Actual commands */
82
83int lbs_update_hw_spec(struct lbs_private *priv);
84
52int lbs_set_channel(struct lbs_private *priv, u8 channel); 85int lbs_set_channel(struct lbs_private *priv, u8 channel);
53 86
54int lbs_mesh_config_send(struct lbs_private *priv, 87int lbs_update_channel(struct lbs_private *priv);
55 struct cmd_ds_mesh_config *cmd,
56 uint16_t action, uint16_t type);
57int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
58 88
59int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria, 89int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
60 struct wol_config *p_wol_config); 90 struct wol_config *p_wol_config);
61int lbs_suspend(struct lbs_private *priv);
62void lbs_resume(struct lbs_private *priv);
63 91
64int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
65 uint16_t cmd_action);
66int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
67 uint16_t cmd_action, uint16_t *timeout);
68int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, 92int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
69 struct sleep_params *sp); 93 struct sleep_params *sp);
70int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
71 struct assoc_request *assoc);
72int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
73 uint16_t *enable);
74int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
75 struct assoc_request *assoc);
76 94
77int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel, 95void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
78 s16 *maxlevel); 96
79int lbs_set_tx_power(struct lbs_private *priv, s16 dbm); 97void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
98
99void lbs_ps_confirm_sleep(struct lbs_private *priv);
80 100
81int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on); 101int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
82 102
103void lbs_set_mac_control(struct lbs_private *priv);
104
105int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
106 s16 *maxlevel);
107
83int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val); 108int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
84 109
85int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val); 110int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
86 111
112
113/* Mesh related */
114
115int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
116 struct cmd_ds_mesh_access *cmd);
117
118int lbs_mesh_config_send(struct lbs_private *priv,
119 struct cmd_ds_mesh_config *cmd,
120 uint16_t action, uint16_t type);
121
122int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
123
124
125/* Commands only used in wext.c, assoc. and scan.c */
126
127int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
128 int8_t p1, int8_t p2);
129
130int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
131 int8_t p2, int usesnr);
132
133int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
134
135int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
136 uint16_t cmd_action);
137
138int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
139
140int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
141
87#endif /* _LBS_CMD_H */ 142#endif /* _LBS_CMD_H */
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 9ee8bd11bda9..21d57690c20a 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -11,6 +11,7 @@
11 11
12#include "host.h" 12#include "host.h"
13#include "decl.h" 13#include "decl.h"
14#include "cmd.h"
14#include "defs.h" 15#include "defs.h"
15#include "dev.h" 16#include "dev.h"
16#include "assoc.h" 17#include "assoc.h"
@@ -26,23 +27,17 @@
26 */ 27 */
27void lbs_mac_event_disconnected(struct lbs_private *priv) 28void lbs_mac_event_disconnected(struct lbs_private *priv)
28{ 29{
29 union iwreq_data wrqu;
30
31 if (priv->connect_status != LBS_CONNECTED) 30 if (priv->connect_status != LBS_CONNECTED)
32 return; 31 return;
33 32
34 lbs_deb_enter(LBS_DEB_ASSOC); 33 lbs_deb_enter(LBS_DEB_ASSOC);
35 34
36 memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
37 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
38
39 /* 35 /*
40 * Cisco AP sends EAP failure and de-auth in less than 0.5 ms. 36 * Cisco AP sends EAP failure and de-auth in less than 0.5 ms.
41 * It causes problem in the Supplicant 37 * It causes problem in the Supplicant
42 */ 38 */
43
44 msleep_interruptible(1000); 39 msleep_interruptible(1000);
45 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 40 lbs_send_disconnect_notification(priv);
46 41
47 /* report disconnect to upper layer */ 42 /* report disconnect to upper layer */
48 netif_stop_queue(priv->dev); 43 netif_stop_queue(priv->dev);
@@ -67,7 +62,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
67 * no longer valid. 62 * no longer valid.
68 */ 63 */
69 memset(&priv->curbssparams.bssid, 0, ETH_ALEN); 64 memset(&priv->curbssparams.bssid, 0, ETH_ALEN);
70 memset(&priv->curbssparams.ssid, 0, IW_ESSID_MAX_SIZE); 65 memset(&priv->curbssparams.ssid, 0, IEEE80211_MAX_SSID_LEN);
71 priv->curbssparams.ssid_len = 0; 66 priv->curbssparams.ssid_len = 0;
72 67
73 if (priv->psstate != PS_STATE_FULL_POWER) { 68 if (priv->psstate != PS_STATE_FULL_POWER) {
@@ -78,32 +73,6 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
78 lbs_deb_leave(LBS_DEB_ASSOC); 73 lbs_deb_leave(LBS_DEB_ASSOC);
79} 74}
80 75
81/**
82 * @brief This function handles MIC failure event.
83 *
84 * @param priv A pointer to struct lbs_private structure
85 * @para event the event id
86 * @return n/a
87 */
88static void handle_mic_failureevent(struct lbs_private *priv, u32 event)
89{
90 char buf[50];
91
92 lbs_deb_enter(LBS_DEB_CMD);
93 memset(buf, 0, sizeof(buf));
94
95 sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
96
97 if (event == MACREG_INT_CODE_MIC_ERR_UNICAST) {
98 strcat(buf, "unicast ");
99 } else {
100 strcat(buf, "multicast ");
101 }
102
103 lbs_send_iwevcustom_event(priv, buf);
104 lbs_deb_leave(LBS_DEB_CMD);
105}
106
107static int lbs_ret_reg_access(struct lbs_private *priv, 76static int lbs_ret_reg_access(struct lbs_private *priv,
108 u16 type, struct cmd_ds_command *resp) 77 u16 type, struct cmd_ds_command *resp)
109{ 78{
@@ -147,53 +116,6 @@ static int lbs_ret_reg_access(struct lbs_private *priv,
147 return ret; 116 return ret;
148} 117}
149 118
150static int lbs_ret_802_11_rssi(struct lbs_private *priv,
151 struct cmd_ds_command *resp)
152{
153 struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
154
155 lbs_deb_enter(LBS_DEB_CMD);
156
157 /* store the non average value */
158 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
159 priv->NF[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->noisefloor);
160
161 priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
162 priv->NF[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgnoisefloor);
163
164 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
165 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
166 priv->NF[TYPE_BEACON][TYPE_NOAVG]);
167
168 priv->RSSI[TYPE_BEACON][TYPE_AVG] =
169 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
170 priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
171
172 lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
173 priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
174 priv->RSSI[TYPE_BEACON][TYPE_AVG]);
175
176 lbs_deb_leave(LBS_DEB_CMD);
177 return 0;
178}
179
180static int lbs_ret_802_11_bcn_ctrl(struct lbs_private * priv,
181 struct cmd_ds_command *resp)
182{
183 struct cmd_ds_802_11_beacon_control *bcn_ctrl =
184 &resp->params.bcn_ctrl;
185
186 lbs_deb_enter(LBS_DEB_CMD);
187
188 if (bcn_ctrl->action == CMD_ACT_GET) {
189 priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
190 priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
191 }
192
193 lbs_deb_enter(LBS_DEB_CMD);
194 return 0;
195}
196
197static inline int handle_cmd_response(struct lbs_private *priv, 119static inline int handle_cmd_response(struct lbs_private *priv,
198 struct cmd_header *cmd_response) 120 struct cmd_header *cmd_response)
199{ 121{
@@ -227,29 +149,13 @@ static inline int handle_cmd_response(struct lbs_private *priv,
227 ret = lbs_ret_802_11_rssi(priv, resp); 149 ret = lbs_ret_802_11_rssi(priv, resp);
228 break; 150 break;
229 151
230 case CMD_RET(CMD_802_11D_DOMAIN_INFO):
231 ret = lbs_ret_802_11d_domain_info(resp);
232 break;
233
234 case CMD_RET(CMD_802_11_TPC_CFG): 152 case CMD_RET(CMD_802_11_TPC_CFG):
235 spin_lock_irqsave(&priv->driver_lock, flags); 153 spin_lock_irqsave(&priv->driver_lock, flags);
236 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg, 154 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg,
237 sizeof(struct cmd_ds_802_11_tpc_cfg)); 155 sizeof(struct cmd_ds_802_11_tpc_cfg));
238 spin_unlock_irqrestore(&priv->driver_lock, flags); 156 spin_unlock_irqrestore(&priv->driver_lock, flags);
239 break; 157 break;
240 case CMD_RET(CMD_802_11_LED_GPIO_CTRL):
241 spin_lock_irqsave(&priv->driver_lock, flags);
242 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.ledgpio,
243 sizeof(struct cmd_ds_802_11_led_ctrl));
244 spin_unlock_irqrestore(&priv->driver_lock, flags);
245 break;
246 158
247 case CMD_RET(CMD_GET_TSF):
248 spin_lock_irqsave(&priv->driver_lock, flags);
249 memcpy((void *)priv->cur_cmd->callback_arg,
250 &resp->params.gettsf.tsfvalue, sizeof(u64));
251 spin_unlock_irqrestore(&priv->driver_lock, flags);
252 break;
253 case CMD_RET(CMD_BT_ACCESS): 159 case CMD_RET(CMD_BT_ACCESS):
254 spin_lock_irqsave(&priv->driver_lock, flags); 160 spin_lock_irqsave(&priv->driver_lock, flags);
255 if (priv->cur_cmd->callback_arg) 161 if (priv->cur_cmd->callback_arg)
@@ -545,12 +451,12 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
545 451
546 case MACREG_INT_CODE_MIC_ERR_UNICAST: 452 case MACREG_INT_CODE_MIC_ERR_UNICAST:
547 lbs_deb_cmd("EVENT: UNICAST MIC ERROR\n"); 453 lbs_deb_cmd("EVENT: UNICAST MIC ERROR\n");
548 handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_UNICAST); 454 lbs_send_mic_failureevent(priv, event);
549 break; 455 break;
550 456
551 case MACREG_INT_CODE_MIC_ERR_MULTICAST: 457 case MACREG_INT_CODE_MIC_ERR_MULTICAST:
552 lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n"); 458 lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n");
553 handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_MULTICAST); 459 lbs_send_mic_failureevent(priv, event);
554 break; 460 break;
555 461
556 case MACREG_INT_CODE_MIB_CHANGED: 462 case MACREG_INT_CODE_MIB_CHANGED:
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 893a55ca344a..587b0cb0088d 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -451,10 +451,12 @@ static ssize_t lbs_rdmac_read(struct file *file, char __user *userbuf,
451 CMD_MAC_REG_ACCESS, 0, 451 CMD_MAC_REG_ACCESS, 0,
452 CMD_OPTION_WAITFORRSP, 0, &offval); 452 CMD_OPTION_WAITFORRSP, 0, &offval);
453 mdelay(10); 453 mdelay(10);
454 pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n", 454 if (!ret) {
455 pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n",
455 priv->mac_offset, priv->offsetvalue.value); 456 priv->mac_offset, priv->offsetvalue.value);
456 457
457 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 458 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
459 }
458 free_page(addr); 460 free_page(addr);
459 return ret; 461 return ret;
460} 462}
@@ -514,7 +516,8 @@ static ssize_t lbs_wrmac_write(struct file *file,
514 CMD_OPTION_WAITFORRSP, 0, &offval); 516 CMD_OPTION_WAITFORRSP, 0, &offval);
515 mdelay(10); 517 mdelay(10);
516 518
517 res = count; 519 if (!res)
520 res = count;
518out_unlock: 521out_unlock:
519 free_page(addr); 522 free_page(addr);
520 return res; 523 return res;
@@ -539,10 +542,12 @@ static ssize_t lbs_rdbbp_read(struct file *file, char __user *userbuf,
539 CMD_BBP_REG_ACCESS, 0, 542 CMD_BBP_REG_ACCESS, 0,
540 CMD_OPTION_WAITFORRSP, 0, &offval); 543 CMD_OPTION_WAITFORRSP, 0, &offval);
541 mdelay(10); 544 mdelay(10);
542 pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n", 545 if (!ret) {
546 pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n",
543 priv->bbp_offset, priv->offsetvalue.value); 547 priv->bbp_offset, priv->offsetvalue.value);
544 548
545 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 549 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
550 }
546 free_page(addr); 551 free_page(addr);
547 552
548 return ret; 553 return ret;
@@ -603,7 +608,8 @@ static ssize_t lbs_wrbbp_write(struct file *file,
603 CMD_OPTION_WAITFORRSP, 0, &offval); 608 CMD_OPTION_WAITFORRSP, 0, &offval);
604 mdelay(10); 609 mdelay(10);
605 610
606 res = count; 611 if (!res)
612 res = count;
607out_unlock: 613out_unlock:
608 free_page(addr); 614 free_page(addr);
609 return res; 615 return res;
@@ -628,10 +634,12 @@ static ssize_t lbs_rdrf_read(struct file *file, char __user *userbuf,
628 CMD_RF_REG_ACCESS, 0, 634 CMD_RF_REG_ACCESS, 0,
629 CMD_OPTION_WAITFORRSP, 0, &offval); 635 CMD_OPTION_WAITFORRSP, 0, &offval);
630 mdelay(10); 636 mdelay(10);
631 pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n", 637 if (!ret) {
638 pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n",
632 priv->rf_offset, priv->offsetvalue.value); 639 priv->rf_offset, priv->offsetvalue.value);
633 640
634 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 641 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
642 }
635 free_page(addr); 643 free_page(addr);
636 644
637 return ret; 645 return ret;
@@ -692,7 +700,8 @@ static ssize_t lbs_wrrf_write(struct file *file,
692 CMD_OPTION_WAITFORRSP, 0, &offval); 700 CMD_OPTION_WAITFORRSP, 0, &offval);
693 mdelay(10); 701 mdelay(10);
694 702
695 res = count; 703 if (!res)
704 res = count;
696out_unlock: 705out_unlock:
697 free_page(addr); 706 free_page(addr);
698 return res; 707 return res;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index fb91c3639fc1..678f7c9f7503 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -8,74 +8,48 @@
8 8
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10 10
11#include "defs.h"
12 11
13/** Function Prototype Declaration */
14struct lbs_private; 12struct lbs_private;
15struct sk_buff; 13struct sk_buff;
16struct net_device; 14struct net_device;
17struct cmd_ctrl_node;
18struct cmd_ds_command;
19 15
20void lbs_set_mac_control(struct lbs_private *priv);
21 16
22void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count); 17/* ethtool.c */
23 18extern const struct ethtool_ops lbs_ethtool_ops;
24int lbs_free_cmd_buffer(struct lbs_private *priv);
25 19
26int lbs_prepare_and_send_command(struct lbs_private *priv,
27 u16 cmd_no,
28 u16 cmd_action,
29 u16 wait_option, u32 cmd_oid, void *pdata_buf);
30
31int lbs_allocate_cmd_buffer(struct lbs_private *priv);
32int lbs_execute_next_command(struct lbs_private *priv);
33int lbs_process_event(struct lbs_private *priv, u32 event);
34void lbs_queue_event(struct lbs_private *priv, u32 event);
35void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
36int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
37int lbs_enter_auto_deep_sleep(struct lbs_private *priv);
38int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
39 20
40u32 lbs_fw_index_to_data_rate(u8 index); 21/* tx.c */
41u8 lbs_data_rate_to_fw_index(u32 rate); 22void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count);
42
43/** The proc fs interface */
44int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
45void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
46 int result);
47netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, 23netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb,
48 struct net_device *dev); 24 struct net_device *dev);
49int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
50 25
26/* rx.c */
51int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *); 27int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *);
52 28
53void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
54void lbs_ps_confirm_sleep(struct lbs_private *priv);
55void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
56
57struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
58 struct lbs_private *priv,
59 u8 band,
60 u16 channel);
61
62void lbs_mac_event_disconnected(struct lbs_private *priv);
63
64void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str);
65 29
66/* persistcfg.c */ 30/* persistcfg.c */
67void lbs_persist_config_init(struct net_device *net); 31void lbs_persist_config_init(struct net_device *net);
68void lbs_persist_config_remove(struct net_device *net); 32void lbs_persist_config_remove(struct net_device *net);
69 33
34
70/* main.c */ 35/* main.c */
71struct chan_freq_power *lbs_get_region_cfp_table(u8 region,
72 int *cfp_no);
73struct lbs_private *lbs_add_card(void *card, struct device *dmdev); 36struct lbs_private *lbs_add_card(void *card, struct device *dmdev);
74void lbs_remove_card(struct lbs_private *priv); 37void lbs_remove_card(struct lbs_private *priv);
75int lbs_start_card(struct lbs_private *priv); 38int lbs_start_card(struct lbs_private *priv);
76void lbs_stop_card(struct lbs_private *priv); 39void lbs_stop_card(struct lbs_private *priv);
77void lbs_host_to_card_done(struct lbs_private *priv); 40void lbs_host_to_card_done(struct lbs_private *priv);
78 41
79int lbs_update_channel(struct lbs_private *priv); 42int lbs_suspend(struct lbs_private *priv);
43void lbs_resume(struct lbs_private *priv);
44
45void lbs_queue_event(struct lbs_private *priv, u32 event);
46void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
47
48int lbs_enter_auto_deep_sleep(struct lbs_private *priv);
49int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
50
51u32 lbs_fw_index_to_data_rate(u8 index);
52u8 lbs_data_rate_to_fw_index(u32 rate);
53
80 54
81#endif 55#endif
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 1cf5d5985dac..6b6ea9f7bf5b 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -322,7 +322,6 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
322extern const char lbs_driver_version[]; 322extern const char lbs_driver_version[];
323extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE]; 323extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE];
324 324
325extern u8 lbs_bg_rates[MAX_RATES];
326 325
327/** ENUM definition*/ 326/** ENUM definition*/
328/** SNRNF_TYPE */ 327/** SNRNF_TYPE */
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 8abb28af5afa..1a675111300d 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -6,75 +6,10 @@
6#ifndef _LBS_DEV_H_ 6#ifndef _LBS_DEV_H_
7#define _LBS_DEV_H_ 7#define _LBS_DEV_H_
8 8
9#include <linux/netdevice.h> 9#include "scan.h"
10#include <linux/wireless.h> 10#include "assoc.h"
11#include <linux/ethtool.h>
12#include <linux/debugfs.h>
13 11
14#include "defs.h"
15#include "hostcmd.h"
16 12
17extern const struct ethtool_ops lbs_ethtool_ops;
18
19#define MAX_BSSID_PER_CHANNEL 16
20
21#define NR_TX_QUEUE 3
22
23/* For the extended Scan */
24#define MAX_EXTENDED_SCAN_BSSID_LIST MAX_BSSID_PER_CHANNEL * \
25 MRVDRV_MAX_CHANNEL_SIZE + 1
26
27#define MAX_REGION_CHANNEL_NUM 2
28
29/** Chan-freq-TxPower mapping table*/
30struct chan_freq_power {
31 /** channel Number */
32 u16 channel;
33 /** frequency of this channel */
34 u32 freq;
35 /** Max allowed Tx power level */
36 u16 maxtxpower;
37 /** TRUE:channel unsupported; FLASE:supported*/
38 u8 unsupported;
39};
40
41/** region-band mapping table*/
42struct region_channel {
43 /** TRUE if this entry is valid */
44 u8 valid;
45 /** region code for US, Japan ... */
46 u8 region;
47 /** band B/G/A, used for BAND_CONFIG cmd */
48 u8 band;
49 /** Actual No. of elements in the array below */
50 u8 nrcfp;
51 /** chan-freq-txpower mapping table*/
52 struct chan_freq_power *CFP;
53};
54
55struct lbs_802_11_security {
56 u8 WPAenabled;
57 u8 WPA2enabled;
58 u8 wep_enabled;
59 u8 auth_mode;
60 u32 key_mgmt;
61};
62
63/** Current Basic Service Set State Structure */
64struct current_bss_params {
65 /** bssid */
66 u8 bssid[ETH_ALEN];
67 /** ssid */
68 u8 ssid[IW_ESSID_MAX_SIZE + 1];
69 u8 ssid_len;
70
71 /** band */
72 u8 band;
73 /** channel */
74 u8 channel;
75 /** zero-terminated array of supported data rates */
76 u8 rates[MAX_RATES + 1];
77};
78 13
79/** sleep_params */ 14/** sleep_params */
80struct sleep_params { 15struct sleep_params {
@@ -100,113 +35,96 @@ struct lbs_mesh_stats {
100 35
101/** Private structure for the MV device */ 36/** Private structure for the MV device */
102struct lbs_private { 37struct lbs_private {
38
39 /* Basic networking */
40 struct net_device *dev;
41 u32 connect_status;
42 int infra_open;
43 struct work_struct mcast_work;
44 u32 nr_of_multicastmacaddr;
45 u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
46
47 /* CFG80211 */
103 struct wireless_dev *wdev; 48 struct wireless_dev *wdev;
49
50 /* Mesh */
51 struct net_device *mesh_dev; /* Virtual device */
52 u32 mesh_connect_status;
53 struct lbs_mesh_stats mstats;
104 int mesh_open; 54 int mesh_open;
105 int mesh_fw_ver; 55 int mesh_fw_ver;
106 int infra_open;
107 int mesh_autostart_enabled; 56 int mesh_autostart_enabled;
57 uint16_t mesh_tlv;
58 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
59 u8 mesh_ssid_len;
60 struct work_struct sync_channel;
108 61
109 char name[DEV_NAME_LEN]; 62 /* Monitor mode */
110
111 void *card;
112 struct net_device *dev;
113
114 struct net_device *mesh_dev; /* Virtual device */
115 struct net_device *rtap_net_dev; 63 struct net_device *rtap_net_dev;
64 u32 monitormode;
116 65
117 struct iw_statistics wstats; 66 /* Debugfs */
118 struct lbs_mesh_stats mstats;
119 struct dentry *debugfs_dir; 67 struct dentry *debugfs_dir;
120 struct dentry *debugfs_debug; 68 struct dentry *debugfs_debug;
121 struct dentry *debugfs_files[6]; 69 struct dentry *debugfs_files[6];
122
123 struct dentry *events_dir; 70 struct dentry *events_dir;
124 struct dentry *debugfs_events_files[6]; 71 struct dentry *debugfs_events_files[6];
125
126 struct dentry *regs_dir; 72 struct dentry *regs_dir;
127 struct dentry *debugfs_regs_files[6]; 73 struct dentry *debugfs_regs_files[6];
128 74
75 /* Hardware debugging */
129 u32 mac_offset; 76 u32 mac_offset;
130 u32 bbp_offset; 77 u32 bbp_offset;
131 u32 rf_offset; 78 u32 rf_offset;
79 struct lbs_offset_value offsetvalue;
80
81 /* Power management */
82 u16 psmode;
83 u32 psstate;
84 u8 needtowakeup;
132 85
133 /** Deep sleep flag */ 86 /* Deep sleep */
134 int is_deep_sleep; 87 int is_deep_sleep;
135 /** Auto deep sleep enabled flag */
136 int is_auto_deep_sleep_enabled; 88 int is_auto_deep_sleep_enabled;
137 /** Device wakeup required flag */
138 int wakeup_dev_required; 89 int wakeup_dev_required;
139 /** Auto deep sleep flag*/
140 int is_activity_detected; 90 int is_activity_detected;
141 /** Auto deep sleep timeout (in miliseconds) */ 91 int auto_deep_sleep_timeout; /* in ms */
142 int auto_deep_sleep_timeout; 92 wait_queue_head_t ds_awake_q;
143 93 struct timer_list auto_deepsleep_timer;
144 /** Deep sleep wait queue */
145 wait_queue_head_t ds_awake_q;
146
147 /* Download sent:
148 bit0 1/0=data_sent/data_tx_done,
149 bit1 1/0=cmd_sent/cmd_tx_done,
150 all other bits reserved 0 */
151 u8 dnld_sent;
152
153 /** thread to service interrupts */
154 struct task_struct *main_thread;
155 wait_queue_head_t waitq;
156 struct workqueue_struct *work_thread;
157
158 struct work_struct mcast_work;
159
160 /** Scanning */
161 struct delayed_work scan_work;
162 struct delayed_work assoc_work;
163 struct work_struct sync_channel;
164 /* remember which channel was scanned last, != 0 if currently scanning */
165 int scan_channel;
166 u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
167 u8 scan_ssid_len;
168 94
169 /** Hardware access */ 95 /* Hardware access */
96 void *card;
97 u8 fw_ready;
98 u8 surpriseremoved;
170 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); 99 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
171 void (*reset_card) (struct lbs_private *priv); 100 void (*reset_card) (struct lbs_private *priv);
172 int (*enter_deep_sleep) (struct lbs_private *priv); 101 int (*enter_deep_sleep) (struct lbs_private *priv);
173 int (*exit_deep_sleep) (struct lbs_private *priv); 102 int (*exit_deep_sleep) (struct lbs_private *priv);
174 int (*reset_deep_sleep_wakeup) (struct lbs_private *priv); 103 int (*reset_deep_sleep_wakeup) (struct lbs_private *priv);
175 104
176 /* Wake On LAN */ 105 /* Adapter info (from EEPROM) */
177 uint32_t wol_criteria;
178 uint8_t wol_gpio;
179 uint8_t wol_gap;
180
181 /** Wlan adapter data structure*/
182 /** STATUS variables */
183 u32 fwrelease; 106 u32 fwrelease;
184 u32 fwcapinfo; 107 u32 fwcapinfo;
108 u16 regioncode;
109 u8 current_addr[ETH_ALEN];
185 110
186 struct mutex lock; 111 /* Command download */
187 112 u8 dnld_sent;
188 /* TX packet ready to be sent... */ 113 /* bit0 1/0=data_sent/data_tx_done,
189 int tx_pending_len; /* -1 while building packet */ 114 bit1 1/0=cmd_sent/cmd_tx_done,
190 115 all other bits reserved 0 */
191 u8 tx_pending_buf[LBS_UPLD_SIZE];
192 /* protected by hard_start_xmit serialization */
193
194 /** command-related variables */
195 u16 seqnum; 116 u16 seqnum;
196
197 struct cmd_ctrl_node *cmd_array; 117 struct cmd_ctrl_node *cmd_array;
198 /** Current command */
199 struct cmd_ctrl_node *cur_cmd; 118 struct cmd_ctrl_node *cur_cmd;
200 int cur_cmd_retcode; 119 struct list_head cmdfreeq; /* free command buffers */
201 /** command Queues */ 120 struct list_head cmdpendingq; /* pending command buffers */
202 /** Free command buffers */
203 struct list_head cmdfreeq;
204 /** Pending command buffers */
205 struct list_head cmdpendingq;
206
207 wait_queue_head_t cmd_pending; 121 wait_queue_head_t cmd_pending;
122 struct timer_list command_timer;
123 int nr_retries;
124 int cmd_timed_out;
208 125
209 /* Command responses sent from the hardware to the driver */ 126 /* Command responses sent from the hardware to the driver */
127 int cur_cmd_retcode;
210 u8 resp_idx; 128 u8 resp_idx;
211 u8 resp_buf[2][LBS_UPLD_SIZE]; 129 u8 resp_buf[2][LBS_UPLD_SIZE];
212 u32 resp_len[2]; 130 u32 resp_len[2];
@@ -214,96 +132,76 @@ struct lbs_private {
214 /* Events sent from hardware to driver */ 132 /* Events sent from hardware to driver */
215 struct kfifo *event_fifo; 133 struct kfifo *event_fifo;
216 134
217 /* nickname */ 135 /** thread to service interrupts */
218 u8 nodename[16]; 136 struct task_struct *main_thread;
219 137 wait_queue_head_t waitq;
220 /** spin locks */ 138 struct workqueue_struct *work_thread;
221 spinlock_t driver_lock;
222
223 /** Timers */
224 struct timer_list command_timer;
225 struct timer_list auto_deepsleep_timer;
226 int nr_retries;
227 int cmd_timed_out;
228
229 /** current ssid/bssid related parameters*/
230 struct current_bss_params curbssparams;
231
232 uint16_t mesh_tlv;
233 u8 mesh_ssid[IW_ESSID_MAX_SIZE + 1];
234 u8 mesh_ssid_len;
235
236 /* IW_MODE_* */
237 u8 mode;
238
239 /* Scan results list */
240 struct list_head network_list;
241 struct list_head network_free_list;
242 struct bss_descriptor *networks;
243
244 u16 beacon_period;
245 u8 beacon_enable;
246 u8 adhoccreate;
247
248 /** capability Info used in Association, start, join */
249 u16 capability;
250
251 /** MAC address information */
252 u8 current_addr[ETH_ALEN];
253 u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
254 u32 nr_of_multicastmacaddr;
255 139
256 /** 802.11 statistics */ 140 /** Encryption stuff */
257// struct cmd_DS_802_11_GET_STAT wlan802_11Stat; 141 struct lbs_802_11_security secinfo;
142 struct enc_key wpa_mcast_key;
143 struct enc_key wpa_unicast_key;
144 u8 wpa_ie[MAX_WPA_IE_LEN];
145 u8 wpa_ie_len;
146 u16 wep_tx_keyidx;
147 struct enc_key wep_keys[4];
258 148
259 uint16_t enablehwauto; 149 /* Wake On LAN */
260 uint16_t ratebitmap; 150 uint32_t wol_criteria;
151 uint8_t wol_gpio;
152 uint8_t wol_gap;
261 153
154 /* Transmitting */
155 int tx_pending_len; /* -1 while building packet */
156 u8 tx_pending_buf[LBS_UPLD_SIZE];
157 /* protected by hard_start_xmit serialization */
262 u8 txretrycount; 158 u8 txretrycount;
263
264 /** Tx-related variables (for single packet tx) */
265 struct sk_buff *currenttxskb; 159 struct sk_buff *currenttxskb;
266 160
267 /** NIC Operation characteristics */ 161 /* Locks */
162 struct mutex lock;
163 spinlock_t driver_lock;
164
165 /* NIC/link operation characteristics */
268 u16 mac_control; 166 u16 mac_control;
269 u32 connect_status; 167 u8 radio_on;
270 u32 mesh_connect_status; 168 u8 channel;
271 u16 regioncode;
272 s16 txpower_cur; 169 s16 txpower_cur;
273 s16 txpower_min; 170 s16 txpower_min;
274 s16 txpower_max; 171 s16 txpower_max;
275 172
276 /** POWER MANAGEMENT AND PnP SUPPORT */ 173 /** Scanning */
277 u8 surpriseremoved; 174 struct delayed_work scan_work;
278 175 int scan_channel;
279 u16 psmode; /* Wlan802_11PowermodeCAM=disable 176 /* remember which channel was scanned last, != 0 if currently scanning */
280 Wlan802_11PowermodeMAX_PSP=enable */ 177 u8 scan_ssid[IEEE80211_MAX_SSID_LEN + 1];
281 u32 psstate; 178 u8 scan_ssid_len;
282 u8 needtowakeup;
283 179
180 /* Associating */
181 struct delayed_work assoc_work;
182 struct current_bss_params curbssparams;
183 u8 mode;
184 struct list_head network_list;
185 struct list_head network_free_list;
186 struct bss_descriptor *networks;
284 struct assoc_request * pending_assoc_req; 187 struct assoc_request * pending_assoc_req;
285 struct assoc_request * in_progress_assoc_req; 188 struct assoc_request * in_progress_assoc_req;
189 u16 capability;
190 uint16_t enablehwauto;
191 uint16_t ratebitmap;
286 192
287 /** Encryption parameter */ 193 /* ADHOC */
288 struct lbs_802_11_security secinfo; 194 u16 beacon_period;
289 195 u8 beacon_enable;
290 /** WEP keys */ 196 u8 adhoccreate;
291 struct enc_key wep_keys[4];
292 u16 wep_tx_keyidx;
293
294 /** WPA keys */
295 struct enc_key wpa_mcast_key;
296 struct enc_key wpa_unicast_key;
297
298/*
299 * In theory, the IE is limited to the IE length, 255,
300 * but in practice 64 bytes are enough.
301 */
302#define MAX_WPA_IE_LEN 64
303 197
304 /** WPA Information Elements*/ 198 /* WEXT */
305 u8 wpa_ie[MAX_WPA_IE_LEN]; 199 char name[DEV_NAME_LEN];
306 u8 wpa_ie_len; 200 u8 nodename[16];
201 struct iw_statistics wstats;
202 u8 cur_rate;
203#define MAX_REGION_CHANNEL_NUM 2
204 struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
307 205
308 /** Requested Signal Strength*/ 206 /** Requested Signal Strength*/
309 u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG]; 207 u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG];
@@ -313,116 +211,8 @@ struct lbs_private {
313 u8 rawNF[DEFAULT_DATA_AVG_FACTOR]; 211 u8 rawNF[DEFAULT_DATA_AVG_FACTOR];
314 u16 nextSNRNF; 212 u16 nextSNRNF;
315 u16 numSNRNF; 213 u16 numSNRNF;
316
317 u8 radio_on;
318
319 /** data rate stuff */
320 u8 cur_rate;
321
322 /** RF calibration data */
323
324#define MAX_REGION_CHANNEL_NUM 2
325 /** region channel data */
326 struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
327
328 struct region_channel universal_channel[MAX_REGION_CHANNEL_NUM];
329
330 /** 11D and Domain Regulatory Data */
331 struct lbs_802_11d_domain_reg domainreg;
332 struct parsed_region_chan_11d parsed_region_chan;
333
334 /** FSM variable for 11d support */
335 u32 enable11d;
336
337 /** MISCELLANEOUS */
338 struct lbs_offset_value offsetvalue;
339
340 u32 monitormode;
341 u8 fw_ready;
342}; 214};
343 215
344extern struct cmd_confirm_sleep confirm_sleep; 216extern struct cmd_confirm_sleep confirm_sleep;
345 217
346/**
347 * @brief Structure used to store information for each beacon/probe response
348 */
349struct bss_descriptor {
350 u8 bssid[ETH_ALEN];
351
352 u8 ssid[IW_ESSID_MAX_SIZE + 1];
353 u8 ssid_len;
354
355 u16 capability;
356 u32 rssi;
357 u32 channel;
358 u16 beaconperiod;
359 __le16 atimwindow;
360
361 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
362 u8 mode;
363
364 /* zero-terminated array of supported data rates */
365 u8 rates[MAX_RATES + 1];
366
367 unsigned long last_scanned;
368
369 union ieee_phy_param_set phy;
370 union ieee_ss_param_set ss;
371
372 struct ieee_ie_country_info_full_set countryinfo;
373
374 u8 wpa_ie[MAX_WPA_IE_LEN];
375 size_t wpa_ie_len;
376 u8 rsn_ie[MAX_WPA_IE_LEN];
377 size_t rsn_ie_len;
378
379 u8 mesh;
380
381 struct list_head list;
382};
383
384/** Association request
385 *
386 * Encapsulates all the options that describe a specific assocation request
387 * or configuration of the wireless card's radio, mode, and security settings.
388 */
389struct assoc_request {
390#define ASSOC_FLAG_SSID 1
391#define ASSOC_FLAG_CHANNEL 2
392#define ASSOC_FLAG_BAND 3
393#define ASSOC_FLAG_MODE 4
394#define ASSOC_FLAG_BSSID 5
395#define ASSOC_FLAG_WEP_KEYS 6
396#define ASSOC_FLAG_WEP_TX_KEYIDX 7
397#define ASSOC_FLAG_WPA_MCAST_KEY 8
398#define ASSOC_FLAG_WPA_UCAST_KEY 9
399#define ASSOC_FLAG_SECINFO 10
400#define ASSOC_FLAG_WPA_IE 11
401 unsigned long flags;
402
403 u8 ssid[IW_ESSID_MAX_SIZE + 1];
404 u8 ssid_len;
405 u8 channel;
406 u8 band;
407 u8 mode;
408 u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
409
410 /** WEP keys */
411 struct enc_key wep_keys[4];
412 u16 wep_tx_keyidx;
413
414 /** WPA keys */
415 struct enc_key wpa_mcast_key;
416 struct enc_key wpa_unicast_key;
417
418 struct lbs_802_11_security secinfo;
419
420 /** WPA Information Elements*/
421 u8 wpa_ie[MAX_WPA_IE_LEN];
422 u8 wpa_ie_len;
423
424 /* BSS to associate with for infrastructure of Ad-Hoc join */
425 struct bss_descriptor bss;
426};
427
428#endif 218#endif
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index c055daabea13..3809c0b49464 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -1,202 +1,190 @@
1/** 1/**
2 * This file contains definitions of WLAN commands. 2 * This file function prototypes, data structure
3 * and definitions for all the host/station commands
3 */ 4 */
4 5
5#ifndef _LBS_HOST_H_ 6#ifndef _LBS_HOST_H_
6#define _LBS_HOST_H_ 7#define _LBS_HOST_H_
7 8
8/** PUBLIC DEFINITIONS */ 9#include "types.h"
9#define DEFAULT_AD_HOC_CHANNEL 6 10#include "defs.h"
10#define DEFAULT_AD_HOC_CHANNEL_A 36
11 11
12#define CMD_OPTION_WAITFORRSP 0x0002 12#define DEFAULT_AD_HOC_CHANNEL 6
13
14#define CMD_OPTION_WAITFORRSP 0x0002
13 15
14/** Host command IDs */ 16/** Host command IDs */
15 17
16/* Return command are almost always the same as the host command, but with 18/* Return command are almost always the same as the host command, but with
17 * bit 15 set high. There are a few exceptions, though... 19 * bit 15 set high. There are a few exceptions, though...
18 */ 20 */
19#define CMD_RET(cmd) (0x8000 | cmd) 21#define CMD_RET(cmd) (0x8000 | cmd)
20 22
21/* Return command convention exceptions: */ 23/* Return command convention exceptions: */
22#define CMD_RET_802_11_ASSOCIATE 0x8012 24#define CMD_RET_802_11_ASSOCIATE 0x8012
23 25
24/* Command codes */ 26/* Command codes */
25#define CMD_GET_HW_SPEC 0x0003 27#define CMD_GET_HW_SPEC 0x0003
26#define CMD_EEPROM_UPDATE 0x0004 28#define CMD_EEPROM_UPDATE 0x0004
27#define CMD_802_11_RESET 0x0005 29#define CMD_802_11_RESET 0x0005
28#define CMD_802_11_SCAN 0x0006 30#define CMD_802_11_SCAN 0x0006
29#define CMD_802_11_GET_LOG 0x000b 31#define CMD_802_11_GET_LOG 0x000b
30#define CMD_MAC_MULTICAST_ADR 0x0010 32#define CMD_MAC_MULTICAST_ADR 0x0010
31#define CMD_802_11_AUTHENTICATE 0x0011 33#define CMD_802_11_AUTHENTICATE 0x0011
32#define CMD_802_11_EEPROM_ACCESS 0x0059 34#define CMD_802_11_EEPROM_ACCESS 0x0059
33#define CMD_802_11_ASSOCIATE 0x0050 35#define CMD_802_11_ASSOCIATE 0x0050
34#define CMD_802_11_SET_WEP 0x0013 36#define CMD_802_11_SET_WEP 0x0013
35#define CMD_802_11_GET_STAT 0x0014 37#define CMD_802_11_GET_STAT 0x0014
36#define CMD_802_3_GET_STAT 0x0015 38#define CMD_802_3_GET_STAT 0x0015
37#define CMD_802_11_SNMP_MIB 0x0016 39#define CMD_802_11_SNMP_MIB 0x0016
38#define CMD_MAC_REG_MAP 0x0017 40#define CMD_MAC_REG_MAP 0x0017
39#define CMD_BBP_REG_MAP 0x0018 41#define CMD_BBP_REG_MAP 0x0018
40#define CMD_MAC_REG_ACCESS 0x0019 42#define CMD_MAC_REG_ACCESS 0x0019
41#define CMD_BBP_REG_ACCESS 0x001a 43#define CMD_BBP_REG_ACCESS 0x001a
42#define CMD_RF_REG_ACCESS 0x001b 44#define CMD_RF_REG_ACCESS 0x001b
43#define CMD_802_11_RADIO_CONTROL 0x001c 45#define CMD_802_11_RADIO_CONTROL 0x001c
44#define CMD_802_11_RF_CHANNEL 0x001d 46#define CMD_802_11_RF_CHANNEL 0x001d
45#define CMD_802_11_RF_TX_POWER 0x001e 47#define CMD_802_11_RF_TX_POWER 0x001e
46#define CMD_802_11_RSSI 0x001f 48#define CMD_802_11_RSSI 0x001f
47#define CMD_802_11_RF_ANTENNA 0x0020 49#define CMD_802_11_RF_ANTENNA 0x0020
48#define CMD_802_11_PS_MODE 0x0021 50#define CMD_802_11_PS_MODE 0x0021
49#define CMD_802_11_DATA_RATE 0x0022 51#define CMD_802_11_DATA_RATE 0x0022
50#define CMD_RF_REG_MAP 0x0023 52#define CMD_RF_REG_MAP 0x0023
51#define CMD_802_11_DEAUTHENTICATE 0x0024 53#define CMD_802_11_DEAUTHENTICATE 0x0024
52#define CMD_802_11_REASSOCIATE 0x0025 54#define CMD_802_11_REASSOCIATE 0x0025
53#define CMD_MAC_CONTROL 0x0028 55#define CMD_MAC_CONTROL 0x0028
54#define CMD_802_11_AD_HOC_START 0x002b 56#define CMD_802_11_AD_HOC_START 0x002b
55#define CMD_802_11_AD_HOC_JOIN 0x002c 57#define CMD_802_11_AD_HOC_JOIN 0x002c
56#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e 58#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e
57#define CMD_802_11_ENABLE_RSN 0x002f 59#define CMD_802_11_ENABLE_RSN 0x002f
58#define CMD_802_11_SET_AFC 0x003c 60#define CMD_802_11_SET_AFC 0x003c
59#define CMD_802_11_GET_AFC 0x003d 61#define CMD_802_11_GET_AFC 0x003d
60#define CMD_802_11_DEEP_SLEEP 0x003e 62#define CMD_802_11_DEEP_SLEEP 0x003e
61#define CMD_802_11_AD_HOC_STOP 0x0040 63#define CMD_802_11_AD_HOC_STOP 0x0040
62#define CMD_802_11_HOST_SLEEP_CFG 0x0043 64#define CMD_802_11_HOST_SLEEP_CFG 0x0043
63#define CMD_802_11_WAKEUP_CONFIRM 0x0044 65#define CMD_802_11_WAKEUP_CONFIRM 0x0044
64#define CMD_802_11_HOST_SLEEP_ACTIVATE 0x0045 66#define CMD_802_11_HOST_SLEEP_ACTIVATE 0x0045
65#define CMD_802_11_BEACON_STOP 0x0049 67#define CMD_802_11_BEACON_STOP 0x0049
66#define CMD_802_11_MAC_ADDRESS 0x004d 68#define CMD_802_11_MAC_ADDRESS 0x004d
67#define CMD_802_11_LED_GPIO_CTRL 0x004e 69#define CMD_802_11_LED_GPIO_CTRL 0x004e
68#define CMD_802_11_EEPROM_ACCESS 0x0059 70#define CMD_802_11_EEPROM_ACCESS 0x0059
69#define CMD_802_11_BAND_CONFIG 0x0058 71#define CMD_802_11_BAND_CONFIG 0x0058
70#define CMD_GSPI_BUS_CONFIG 0x005a 72#define CMD_GSPI_BUS_CONFIG 0x005a
71#define CMD_802_11D_DOMAIN_INFO 0x005b 73#define CMD_802_11D_DOMAIN_INFO 0x005b
72#define CMD_802_11_KEY_MATERIAL 0x005e 74#define CMD_802_11_KEY_MATERIAL 0x005e
73#define CMD_802_11_SLEEP_PARAMS 0x0066 75#define CMD_802_11_SLEEP_PARAMS 0x0066
74#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067 76#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067
75#define CMD_802_11_SLEEP_PERIOD 0x0068 77#define CMD_802_11_SLEEP_PERIOD 0x0068
76#define CMD_802_11_TPC_CFG 0x0072 78#define CMD_802_11_TPC_CFG 0x0072
77#define CMD_802_11_PA_CFG 0x0073 79#define CMD_802_11_PA_CFG 0x0073
78#define CMD_802_11_FW_WAKE_METHOD 0x0074 80#define CMD_802_11_FW_WAKE_METHOD 0x0074
79#define CMD_802_11_SUBSCRIBE_EVENT 0x0075 81#define CMD_802_11_SUBSCRIBE_EVENT 0x0075
80#define CMD_802_11_RATE_ADAPT_RATESET 0x0076 82#define CMD_802_11_RATE_ADAPT_RATESET 0x0076
81#define CMD_802_11_TX_RATE_QUERY 0x007f 83#define CMD_802_11_TX_RATE_QUERY 0x007f
82#define CMD_GET_TSF 0x0080 84#define CMD_GET_TSF 0x0080
83#define CMD_BT_ACCESS 0x0087 85#define CMD_BT_ACCESS 0x0087
84#define CMD_FWT_ACCESS 0x0095 86#define CMD_FWT_ACCESS 0x0095
85#define CMD_802_11_MONITOR_MODE 0x0098 87#define CMD_802_11_MONITOR_MODE 0x0098
86#define CMD_MESH_ACCESS 0x009b 88#define CMD_MESH_ACCESS 0x009b
87#define CMD_MESH_CONFIG_OLD 0x00a3 89#define CMD_MESH_CONFIG_OLD 0x00a3
88#define CMD_MESH_CONFIG 0x00ac 90#define CMD_MESH_CONFIG 0x00ac
89#define CMD_SET_BOOT2_VER 0x00a5 91#define CMD_SET_BOOT2_VER 0x00a5
90#define CMD_FUNC_INIT 0x00a9 92#define CMD_FUNC_INIT 0x00a9
91#define CMD_FUNC_SHUTDOWN 0x00aa 93#define CMD_FUNC_SHUTDOWN 0x00aa
92#define CMD_802_11_BEACON_CTRL 0x00b0 94#define CMD_802_11_BEACON_CTRL 0x00b0
93 95
94/* For the IEEE Power Save */ 96/* For the IEEE Power Save */
95#define CMD_SUBCMD_ENTER_PS 0x0030 97#define CMD_SUBCMD_ENTER_PS 0x0030
96#define CMD_SUBCMD_EXIT_PS 0x0031 98#define CMD_SUBCMD_EXIT_PS 0x0031
97#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034 99#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034
98#define CMD_SUBCMD_FULL_POWERDOWN 0x0035 100#define CMD_SUBCMD_FULL_POWERDOWN 0x0035
99#define CMD_SUBCMD_FULL_POWERUP 0x0036 101#define CMD_SUBCMD_FULL_POWERUP 0x0036
100 102
101#define CMD_ENABLE_RSN 0x0001 103#define CMD_ENABLE_RSN 0x0001
102#define CMD_DISABLE_RSN 0x0000 104#define CMD_DISABLE_RSN 0x0000
103 105
104#define CMD_ACT_GET 0x0000 106#define CMD_ACT_GET 0x0000
105#define CMD_ACT_SET 0x0001 107#define CMD_ACT_SET 0x0001
106#define CMD_ACT_GET_AES 0x0002
107#define CMD_ACT_SET_AES 0x0003
108#define CMD_ACT_REMOVE_AES 0x0004
109 108
110/* Define action or option for CMD_802_11_SET_WEP */ 109/* Define action or option for CMD_802_11_SET_WEP */
111#define CMD_ACT_ADD 0x0002 110#define CMD_ACT_ADD 0x0002
112#define CMD_ACT_REMOVE 0x0004 111#define CMD_ACT_REMOVE 0x0004
113#define CMD_ACT_USE_DEFAULT 0x0008
114
115#define CMD_TYPE_WEP_40_BIT 0x01
116#define CMD_TYPE_WEP_104_BIT 0x02
117 112
118#define CMD_NUM_OF_WEP_KEYS 4 113#define CMD_TYPE_WEP_40_BIT 0x01
114#define CMD_TYPE_WEP_104_BIT 0x02
119 115
120#define CMD_WEP_KEY_INDEX_MASK 0x3fff 116#define CMD_NUM_OF_WEP_KEYS 4
121 117
122/* Define action or option for CMD_802_11_RESET */ 118#define CMD_WEP_KEY_INDEX_MASK 0x3fff
123#define CMD_ACT_HALT 0x0003
124 119
125/* Define action or option for CMD_802_11_SCAN */ 120/* Define action or option for CMD_802_11_SCAN */
126#define CMD_BSS_TYPE_BSS 0x0001 121#define CMD_BSS_TYPE_BSS 0x0001
127#define CMD_BSS_TYPE_IBSS 0x0002 122#define CMD_BSS_TYPE_IBSS 0x0002
128#define CMD_BSS_TYPE_ANY 0x0003 123#define CMD_BSS_TYPE_ANY 0x0003
129 124
130/* Define action or option for CMD_802_11_SCAN */ 125/* Define action or option for CMD_802_11_SCAN */
131#define CMD_SCAN_TYPE_ACTIVE 0x0000 126#define CMD_SCAN_TYPE_ACTIVE 0x0000
132#define CMD_SCAN_TYPE_PASSIVE 0x0001 127#define CMD_SCAN_TYPE_PASSIVE 0x0001
133 128
134#define CMD_SCAN_RADIO_TYPE_BG 0 129#define CMD_SCAN_RADIO_TYPE_BG 0
135 130
136#define CMD_SCAN_PROBE_DELAY_TIME 0 131#define CMD_SCAN_PROBE_DELAY_TIME 0
137 132
138/* Define action or option for CMD_MAC_CONTROL */ 133/* Define action or option for CMD_MAC_CONTROL */
139#define CMD_ACT_MAC_RX_ON 0x0001 134#define CMD_ACT_MAC_RX_ON 0x0001
140#define CMD_ACT_MAC_TX_ON 0x0002 135#define CMD_ACT_MAC_TX_ON 0x0002
141#define CMD_ACT_MAC_LOOPBACK_ON 0x0004 136#define CMD_ACT_MAC_LOOPBACK_ON 0x0004
142#define CMD_ACT_MAC_WEP_ENABLE 0x0008 137#define CMD_ACT_MAC_WEP_ENABLE 0x0008
143#define CMD_ACT_MAC_INT_ENABLE 0x0010 138#define CMD_ACT_MAC_INT_ENABLE 0x0010
144#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020 139#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020
145#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040 140#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040
146#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080 141#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080
147#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 142#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100
148#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400 143#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400
149 144
150/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */ 145/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */
151#define CMD_SUBSCRIBE_RSSI_LOW 0x0001 146#define CMD_SUBSCRIBE_RSSI_LOW 0x0001
152#define CMD_SUBSCRIBE_SNR_LOW 0x0002 147#define CMD_SUBSCRIBE_SNR_LOW 0x0002
153#define CMD_SUBSCRIBE_FAILCOUNT 0x0004 148#define CMD_SUBSCRIBE_FAILCOUNT 0x0004
154#define CMD_SUBSCRIBE_BCNMISS 0x0008 149#define CMD_SUBSCRIBE_BCNMISS 0x0008
155#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010 150#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010
156#define CMD_SUBSCRIBE_SNR_HIGH 0x0020 151#define CMD_SUBSCRIBE_SNR_HIGH 0x0020
157 152
158#define RADIO_PREAMBLE_LONG 0x00 153#define RADIO_PREAMBLE_LONG 0x00
159#define RADIO_PREAMBLE_SHORT 0x02 154#define RADIO_PREAMBLE_SHORT 0x02
160#define RADIO_PREAMBLE_AUTO 0x04 155#define RADIO_PREAMBLE_AUTO 0x04
161 156
162/* Define action or option for CMD_802_11_RF_CHANNEL */ 157/* Define action or option for CMD_802_11_RF_CHANNEL */
163#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00 158#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00
164#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01 159#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01
165 160
166/* Define action or option for CMD_802_11_DATA_RATE */ 161/* Define action or option for CMD_802_11_DATA_RATE */
167#define CMD_ACT_SET_TX_AUTO 0x0000 162#define CMD_ACT_SET_TX_AUTO 0x0000
168#define CMD_ACT_SET_TX_FIX_RATE 0x0001 163#define CMD_ACT_SET_TX_FIX_RATE 0x0001
169#define CMD_ACT_GET_TX_RATE 0x0002 164#define CMD_ACT_GET_TX_RATE 0x0002
170
171#define CMD_ACT_SET_RX 0x0001
172#define CMD_ACT_SET_TX 0x0002
173#define CMD_ACT_SET_BOTH 0x0003
174#define CMD_ACT_GET_RX 0x0004
175#define CMD_ACT_GET_TX 0x0008
176#define CMD_ACT_GET_BOTH 0x000c
177 165
178/* Define action or option for CMD_802_11_PS_MODE */ 166/* Define action or option for CMD_802_11_PS_MODE */
179#define CMD_TYPE_CAM 0x0000 167#define CMD_TYPE_CAM 0x0000
180#define CMD_TYPE_MAX_PSP 0x0001 168#define CMD_TYPE_MAX_PSP 0x0001
181#define CMD_TYPE_FAST_PSP 0x0002 169#define CMD_TYPE_FAST_PSP 0x0002
182 170
183/* Options for CMD_802_11_FW_WAKE_METHOD */ 171/* Options for CMD_802_11_FW_WAKE_METHOD */
184#define CMD_WAKE_METHOD_UNCHANGED 0x0000 172#define CMD_WAKE_METHOD_UNCHANGED 0x0000
185#define CMD_WAKE_METHOD_COMMAND_INT 0x0001 173#define CMD_WAKE_METHOD_COMMAND_INT 0x0001
186#define CMD_WAKE_METHOD_GPIO 0x0002 174#define CMD_WAKE_METHOD_GPIO 0x0002
187 175
188/* Object IDs for CMD_802_11_SNMP_MIB */ 176/* Object IDs for CMD_802_11_SNMP_MIB */
189#define SNMP_MIB_OID_BSS_TYPE 0x0000 177#define SNMP_MIB_OID_BSS_TYPE 0x0000
190#define SNMP_MIB_OID_OP_RATE_SET 0x0001 178#define SNMP_MIB_OID_OP_RATE_SET 0x0001
191#define SNMP_MIB_OID_BEACON_PERIOD 0x0002 /* Reserved on v9+ */ 179#define SNMP_MIB_OID_BEACON_PERIOD 0x0002 /* Reserved on v9+ */
192#define SNMP_MIB_OID_DTIM_PERIOD 0x0003 /* Reserved on v9+ */ 180#define SNMP_MIB_OID_DTIM_PERIOD 0x0003 /* Reserved on v9+ */
193#define SNMP_MIB_OID_ASSOC_TIMEOUT 0x0004 /* Reserved on v9+ */ 181#define SNMP_MIB_OID_ASSOC_TIMEOUT 0x0004 /* Reserved on v9+ */
194#define SNMP_MIB_OID_RTS_THRESHOLD 0x0005 182#define SNMP_MIB_OID_RTS_THRESHOLD 0x0005
195#define SNMP_MIB_OID_SHORT_RETRY_LIMIT 0x0006 183#define SNMP_MIB_OID_SHORT_RETRY_LIMIT 0x0006
196#define SNMP_MIB_OID_LONG_RETRY_LIMIT 0x0007 184#define SNMP_MIB_OID_LONG_RETRY_LIMIT 0x0007
197#define SNMP_MIB_OID_FRAG_THRESHOLD 0x0008 185#define SNMP_MIB_OID_FRAG_THRESHOLD 0x0008
198#define SNMP_MIB_OID_11D_ENABLE 0x0009 186#define SNMP_MIB_OID_11D_ENABLE 0x0009
199#define SNMP_MIB_OID_11H_ENABLE 0x000A 187#define SNMP_MIB_OID_11H_ENABLE 0x000A
200 188
201/* Define action or option for CMD_BT_ACCESS */ 189/* Define action or option for CMD_BT_ACCESS */
202enum cmd_bt_access_opts { 190enum cmd_bt_access_opts {
@@ -303,4 +291,672 @@ enum cmd_mesh_config_types {
303#define MACREG_INT_CODE_MESH_AUTO_STARTED 35 291#define MACREG_INT_CODE_MESH_AUTO_STARTED 35
304#define MACREG_INT_CODE_FIRMWARE_READY 48 292#define MACREG_INT_CODE_FIRMWARE_READY 48
305 293
294
295/* 802.11-related definitions */
296
297/* TxPD descriptor */
298struct txpd {
299 /* union to cope up with later FW revisions */
300 union {
301 /* Current Tx packet status */
302 __le32 tx_status;
303 struct {
304 /* BSS type: client, AP, etc. */
305 u8 bss_type;
306 /* BSS number */
307 u8 bss_num;
308 /* Reserved */
309 __le16 reserved;
310 } bss;
311 } u;
312 /* Tx control */
313 __le32 tx_control;
314 __le32 tx_packet_location;
315 /* Tx packet length */
316 __le16 tx_packet_length;
317 /* First 2 byte of destination MAC address */
318 u8 tx_dest_addr_high[2];
319 /* Last 4 byte of destination MAC address */
320 u8 tx_dest_addr_low[4];
321 /* Pkt Priority */
322 u8 priority;
323 /* Pkt Trasnit Power control */
324 u8 powermgmt;
325 /* Amount of time the packet has been queued (units = 2ms) */
326 u8 pktdelay_2ms;
327 /* reserved */
328 u8 reserved1;
329} __attribute__ ((packed));
330
331/* RxPD Descriptor */
332struct rxpd {
333 /* union to cope up with later FW revisions */
334 union {
335 /* Current Rx packet status */
336 __le16 status;
337 struct {
338 /* BSS type: client, AP, etc. */
339 u8 bss_type;
340 /* BSS number */
341 u8 bss_num;
342 } __attribute__ ((packed)) bss;
343 } __attribute__ ((packed)) u;
344
345 /* SNR */
346 u8 snr;
347
348 /* Tx control */
349 u8 rx_control;
350
351 /* Pkt length */
352 __le16 pkt_len;
353
354 /* Noise Floor */
355 u8 nf;
356
357 /* Rx Packet Rate */
358 u8 rx_rate;
359
360 /* Pkt addr */
361 __le32 pkt_ptr;
362
363 /* Next Rx RxPD addr */
364 __le32 next_rxpd_ptr;
365
366 /* Pkt Priority */
367 u8 priority;
368 u8 reserved[3];
369} __attribute__ ((packed));
370
371struct cmd_header {
372 __le16 command;
373 __le16 size;
374 __le16 seqnum;
375 __le16 result;
376} __attribute__ ((packed));
377
378/* Generic structure to hold all key types. */
379struct enc_key {
380 u16 len;
381 u16 flags; /* KEY_INFO_* from defs.h */
382 u16 type; /* KEY_TYPE_* from defs.h */
383 u8 key[32];
384};
385
386/* lbs_offset_value */
387struct lbs_offset_value {
388 u32 offset;
389 u32 value;
390} __attribute__ ((packed));
391
392/*
393 * Define data structure for CMD_GET_HW_SPEC
394 * This structure defines the response for the GET_HW_SPEC command
395 */
396struct cmd_ds_get_hw_spec {
397 struct cmd_header hdr;
398
399 /* HW Interface version number */
400 __le16 hwifversion;
401 /* HW version number */
402 __le16 version;
403 /* Max number of TxPD FW can handle */
404 __le16 nr_txpd;
405 /* Max no of Multicast address */
406 __le16 nr_mcast_adr;
407 /* MAC address */
408 u8 permanentaddr[6];
409
410 /* region Code */
411 __le16 regioncode;
412
413 /* Number of antenna used */
414 __le16 nr_antenna;
415
416 /* FW release number, example 0x01030304 = 2.3.4p1 */
417 __le32 fwrelease;
418
419 /* Base Address of TxPD queue */
420 __le32 wcb_base;
421 /* Read Pointer of RxPd queue */
422 __le32 rxpd_rdptr;
423
424 /* Write Pointer of RxPd queue */
425 __le32 rxpd_wrptr;
426
427 /*FW/HW capability */
428 __le32 fwcapinfo;
429} __attribute__ ((packed));
430
431struct cmd_ds_802_11_subscribe_event {
432 struct cmd_header hdr;
433
434 __le16 action;
435 __le16 events;
436
437 /* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a
438 * number of TLVs. From the v5.1 manual, those TLVs would add up to
439 * 40 bytes. However, future firmware might add additional TLVs, so I
440 * bump this up a bit.
441 */
442 uint8_t tlv[128];
443} __attribute__ ((packed));
444
445/*
446 * This scan handle Country Information IE(802.11d compliant)
447 * Define data structure for CMD_802_11_SCAN
448 */
449struct cmd_ds_802_11_scan {
450 struct cmd_header hdr;
451
452 uint8_t bsstype;
453 uint8_t bssid[ETH_ALEN];
454 uint8_t tlvbuffer[0];
455} __attribute__ ((packed));
456
457struct cmd_ds_802_11_scan_rsp {
458 struct cmd_header hdr;
459
460 __le16 bssdescriptsize;
461 uint8_t nr_sets;
462 uint8_t bssdesc_and_tlvbuffer[0];
463} __attribute__ ((packed));
464
465struct cmd_ds_802_11_get_log {
466 struct cmd_header hdr;
467
468 __le32 mcasttxframe;
469 __le32 failed;
470 __le32 retry;
471 __le32 multiretry;
472 __le32 framedup;
473 __le32 rtssuccess;
474 __le32 rtsfailure;
475 __le32 ackfailure;
476 __le32 rxfrag;
477 __le32 mcastrxframe;
478 __le32 fcserror;
479 __le32 txframe;
480 __le32 wepundecryptable;
481} __attribute__ ((packed));
482
483struct cmd_ds_mac_control {
484 struct cmd_header hdr;
485 __le16 action;
486 u16 reserved;
487} __attribute__ ((packed));
488
489struct cmd_ds_mac_multicast_adr {
490 struct cmd_header hdr;
491 __le16 action;
492 __le16 nr_of_adrs;
493 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
494} __attribute__ ((packed));
495
496struct cmd_ds_802_11_authenticate {
497 struct cmd_header hdr;
498
499 u8 bssid[ETH_ALEN];
500 u8 authtype;
501 u8 reserved[10];
502} __attribute__ ((packed));
503
504struct cmd_ds_802_11_deauthenticate {
505 struct cmd_header hdr;
506
507 u8 macaddr[ETH_ALEN];
508 __le16 reasoncode;
509} __attribute__ ((packed));
510
511struct cmd_ds_802_11_associate {
512 struct cmd_header hdr;
513
514 u8 bssid[6];
515 __le16 capability;
516 __le16 listeninterval;
517 __le16 bcnperiod;
518 u8 dtimperiod;
519 u8 iebuf[512]; /* Enough for required and most optional IEs */
520} __attribute__ ((packed));
521
522struct cmd_ds_802_11_associate_response {
523 struct cmd_header hdr;
524
525 __le16 capability;
526 __le16 statuscode;
527 __le16 aid;
528 u8 iebuf[512];
529} __attribute__ ((packed));
530
531struct cmd_ds_802_11_set_wep {
532 struct cmd_header hdr;
533
534 /* ACT_ADD, ACT_REMOVE or ACT_ENABLE */
535 __le16 action;
536
537 /* key Index selected for Tx */
538 __le16 keyindex;
539
540 /* 40, 128bit or TXWEP */
541 uint8_t keytype[4];
542 uint8_t keymaterial[4][16];
543} __attribute__ ((packed));
544
545struct cmd_ds_802_11_snmp_mib {
546 struct cmd_header hdr;
547
548 __le16 action;
549 __le16 oid;
550 __le16 bufsize;
551 u8 value[128];
552} __attribute__ ((packed));
553
554struct cmd_ds_mac_reg_access {
555 __le16 action;
556 __le16 offset;
557 __le32 value;
558} __attribute__ ((packed));
559
560struct cmd_ds_bbp_reg_access {
561 __le16 action;
562 __le16 offset;
563 u8 value;
564 u8 reserved[3];
565} __attribute__ ((packed));
566
567struct cmd_ds_rf_reg_access {
568 __le16 action;
569 __le16 offset;
570 u8 value;
571 u8 reserved[3];
572} __attribute__ ((packed));
573
574struct cmd_ds_802_11_radio_control {
575 struct cmd_header hdr;
576
577 __le16 action;
578 __le16 control;
579} __attribute__ ((packed));
580
581struct cmd_ds_802_11_beacon_control {
582 __le16 action;
583 __le16 beacon_enable;
584 __le16 beacon_period;
585} __attribute__ ((packed));
586
587struct cmd_ds_802_11_sleep_params {
588 struct cmd_header hdr;
589
590 /* ACT_GET/ACT_SET */
591 __le16 action;
592
593 /* Sleep clock error in ppm */
594 __le16 error;
595
596 /* Wakeup offset in usec */
597 __le16 offset;
598
599 /* Clock stabilization time in usec */
600 __le16 stabletime;
601
602 /* control periodic calibration */
603 uint8_t calcontrol;
604
605 /* control the use of external sleep clock */
606 uint8_t externalsleepclk;
607
608 /* reserved field, should be set to zero */
609 __le16 reserved;
610} __attribute__ ((packed));
611
612struct cmd_ds_802_11_rf_channel {
613 struct cmd_header hdr;
614
615 __le16 action;
616 __le16 channel;
617 __le16 rftype; /* unused */
618 __le16 reserved; /* unused */
619 u8 channellist[32]; /* unused */
620} __attribute__ ((packed));
621
622struct cmd_ds_802_11_rssi {
623 /* weighting factor */
624 __le16 N;
625
626 __le16 reserved_0;
627 __le16 reserved_1;
628 __le16 reserved_2;
629} __attribute__ ((packed));
630
631struct cmd_ds_802_11_rssi_rsp {
632 __le16 SNR;
633 __le16 noisefloor;
634 __le16 avgSNR;
635 __le16 avgnoisefloor;
636} __attribute__ ((packed));
637
638struct cmd_ds_802_11_mac_address {
639 struct cmd_header hdr;
640
641 __le16 action;
642 u8 macadd[ETH_ALEN];
643} __attribute__ ((packed));
644
645struct cmd_ds_802_11_rf_tx_power {
646 struct cmd_header hdr;
647
648 __le16 action;
649 __le16 curlevel;
650 s8 maxlevel;
651 s8 minlevel;
652} __attribute__ ((packed));
653
654struct cmd_ds_802_11_monitor_mode {
655 __le16 action;
656 __le16 mode;
657} __attribute__ ((packed));
658
659struct cmd_ds_set_boot2_ver {
660 struct cmd_header hdr;
661
662 __le16 action;
663 __le16 version;
664} __attribute__ ((packed));
665
666struct cmd_ds_802_11_fw_wake_method {
667 struct cmd_header hdr;
668
669 __le16 action;
670 __le16 method;
671} __attribute__ ((packed));
672
673struct cmd_ds_802_11_ps_mode {
674 __le16 action;
675 __le16 nullpktinterval;
676 __le16 multipledtim;
677 __le16 reserved;
678 __le16 locallisteninterval;
679} __attribute__ ((packed));
680
681struct cmd_confirm_sleep {
682 struct cmd_header hdr;
683
684 __le16 action;
685 __le16 nullpktinterval;
686 __le16 multipledtim;
687 __le16 reserved;
688 __le16 locallisteninterval;
689} __attribute__ ((packed));
690
691struct cmd_ds_802_11_data_rate {
692 struct cmd_header hdr;
693
694 __le16 action;
695 __le16 reserved;
696 u8 rates[MAX_RATES];
697} __attribute__ ((packed));
698
699struct cmd_ds_802_11_rate_adapt_rateset {
700 struct cmd_header hdr;
701 __le16 action;
702 __le16 enablehwauto;
703 __le16 bitmap;
704} __attribute__ ((packed));
705
706struct cmd_ds_802_11_ad_hoc_start {
707 struct cmd_header hdr;
708
709 u8 ssid[IEEE80211_MAX_SSID_LEN];
710 u8 bsstype;
711 __le16 beaconperiod;
712 u8 dtimperiod; /* Reserved on v9 and later */
713 struct ieee_ie_ibss_param_set ibss;
714 u8 reserved1[4];
715 struct ieee_ie_ds_param_set ds;
716 u8 reserved2[4];
717 __le16 probedelay; /* Reserved on v9 and later */
718 __le16 capability;
719 u8 rates[MAX_RATES];
720 u8 tlv_memory_size_pad[100];
721} __attribute__ ((packed));
722
723struct cmd_ds_802_11_ad_hoc_result {
724 struct cmd_header hdr;
725
726 u8 pad[3];
727 u8 bssid[ETH_ALEN];
728} __attribute__ ((packed));
729
730struct adhoc_bssdesc {
731 u8 bssid[ETH_ALEN];
732 u8 ssid[IEEE80211_MAX_SSID_LEN];
733 u8 type;
734 __le16 beaconperiod;
735 u8 dtimperiod;
736 __le64 timestamp;
737 __le64 localtime;
738 struct ieee_ie_ds_param_set ds;
739 u8 reserved1[4];
740 struct ieee_ie_ibss_param_set ibss;
741 u8 reserved2[4];
742 __le16 capability;
743 u8 rates[MAX_RATES];
744
745 /* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
746 * Adhoc join command and will cause a binary layout mismatch with
747 * the firmware
748 */
749} __attribute__ ((packed));
750
751struct cmd_ds_802_11_ad_hoc_join {
752 struct cmd_header hdr;
753
754 struct adhoc_bssdesc bss;
755 __le16 failtimeout; /* Reserved on v9 and later */
756 __le16 probedelay; /* Reserved on v9 and later */
757} __attribute__ ((packed));
758
759struct cmd_ds_802_11_ad_hoc_stop {
760 struct cmd_header hdr;
761} __attribute__ ((packed));
762
763struct cmd_ds_802_11_enable_rsn {
764 struct cmd_header hdr;
765
766 __le16 action;
767 __le16 enable;
768} __attribute__ ((packed));
769
770struct MrvlIEtype_keyParamSet {
771 /* type ID */
772 __le16 type;
773
774 /* length of Payload */
775 __le16 length;
776
777 /* type of key: WEP=0, TKIP=1, AES=2 */
778 __le16 keytypeid;
779
780 /* key control Info specific to a keytypeid */
781 __le16 keyinfo;
782
783 /* length of key */
784 __le16 keylen;
785
786 /* key material of size keylen */
787 u8 key[32];
788} __attribute__ ((packed));
789
790#define MAX_WOL_RULES 16
791
792struct host_wol_rule {
793 uint8_t rule_no;
794 uint8_t rule_ops;
795 __le16 sig_offset;
796 __le16 sig_length;
797 __le16 reserve;
798 __be32 sig_mask;
799 __be32 signature;
800} __attribute__ ((packed));
801
802struct wol_config {
803 uint8_t action;
804 uint8_t pattern;
805 uint8_t no_rules_in_cmd;
806 uint8_t result;
807 struct host_wol_rule rule[MAX_WOL_RULES];
808} __attribute__ ((packed));
809
810struct cmd_ds_host_sleep {
811 struct cmd_header hdr;
812 __le32 criteria;
813 uint8_t gpio;
814 uint16_t gap;
815 struct wol_config wol_conf;
816} __attribute__ ((packed));
817
818
819
820struct cmd_ds_802_11_key_material {
821 struct cmd_header hdr;
822
823 __le16 action;
824 struct MrvlIEtype_keyParamSet keyParamSet[2];
825} __attribute__ ((packed));
826
827struct cmd_ds_802_11_eeprom_access {
828 struct cmd_header hdr;
829 __le16 action;
830 __le16 offset;
831 __le16 len;
832 /* firmware says it returns a maximum of 20 bytes */
833#define LBS_EEPROM_READ_LEN 20
834 u8 value[LBS_EEPROM_READ_LEN];
835} __attribute__ ((packed));
836
837struct cmd_ds_802_11_tpc_cfg {
838 struct cmd_header hdr;
839
840 __le16 action;
841 uint8_t enable;
842 int8_t P0;
843 int8_t P1;
844 int8_t P2;
845 uint8_t usesnr;
846} __attribute__ ((packed));
847
848
849struct cmd_ds_802_11_pa_cfg {
850 struct cmd_header hdr;
851
852 __le16 action;
853 uint8_t enable;
854 int8_t P0;
855 int8_t P1;
856 int8_t P2;
857} __attribute__ ((packed));
858
859
860struct cmd_ds_802_11_led_ctrl {
861 __le16 action;
862 __le16 numled;
863 u8 data[256];
864} __attribute__ ((packed));
865
866struct cmd_ds_802_11_afc {
867 __le16 afc_auto;
868 union {
869 struct {
870 __le16 threshold;
871 __le16 period;
872 };
873 struct {
874 __le16 timing_offset; /* signed */
875 __le16 carrier_offset; /* signed */
876 };
877 };
878} __attribute__ ((packed));
879
880struct cmd_tx_rate_query {
881 __le16 txrate;
882} __attribute__ ((packed));
883
884struct cmd_ds_get_tsf {
885 __le64 tsfvalue;
886} __attribute__ ((packed));
887
888struct cmd_ds_bt_access {
889 __le16 action;
890 __le32 id;
891 u8 addr1[ETH_ALEN];
892 u8 addr2[ETH_ALEN];
893} __attribute__ ((packed));
894
895struct cmd_ds_fwt_access {
896 __le16 action;
897 __le32 id;
898 u8 valid;
899 u8 da[ETH_ALEN];
900 u8 dir;
901 u8 ra[ETH_ALEN];
902 __le32 ssn;
903 __le32 dsn;
904 __le32 metric;
905 u8 rate;
906 u8 hopcount;
907 u8 ttl;
908 __le32 expiration;
909 u8 sleepmode;
910 __le32 snr;
911 __le32 references;
912 u8 prec[ETH_ALEN];
913} __attribute__ ((packed));
914
915struct cmd_ds_mesh_config {
916 struct cmd_header hdr;
917
918 __le16 action;
919 __le16 channel;
920 __le16 type;
921 __le16 length;
922 u8 data[128]; /* last position reserved */
923} __attribute__ ((packed));
924
925struct cmd_ds_mesh_access {
926 struct cmd_header hdr;
927
928 __le16 action;
929 __le32 data[32]; /* last position reserved */
930} __attribute__ ((packed));
931
932/* Number of stats counters returned by the firmware */
933#define MESH_STATS_NUM 8
934
935struct cmd_ds_command {
936 /* command header */
937 __le16 command;
938 __le16 size;
939 __le16 seqnum;
940 __le16 result;
941
942 /* command Body */
943 union {
944 struct cmd_ds_802_11_ps_mode psmode;
945 struct cmd_ds_802_11_monitor_mode monitor;
946 struct cmd_ds_802_11_rssi rssi;
947 struct cmd_ds_802_11_rssi_rsp rssirsp;
948 struct cmd_ds_mac_reg_access macreg;
949 struct cmd_ds_bbp_reg_access bbpreg;
950 struct cmd_ds_rf_reg_access rfreg;
951
952 struct cmd_ds_802_11_tpc_cfg tpccfg;
953 struct cmd_ds_802_11_afc afc;
954 struct cmd_ds_802_11_led_ctrl ledgpio;
955
956 struct cmd_ds_bt_access bt;
957 struct cmd_ds_fwt_access fwt;
958 struct cmd_ds_802_11_beacon_control bcn_ctrl;
959 } params;
960} __attribute__ ((packed));
961
306#endif 962#endif
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
deleted file mode 100644
index c8a1998d4744..000000000000
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ /dev/null
@@ -1,800 +0,0 @@
1/*
2 * This file contains the function prototypes, data structure
3 * and defines for all the host/station commands
4 */
5#ifndef _LBS_HOSTCMD_H
6#define _LBS_HOSTCMD_H
7
8#include <linux/wireless.h>
9#include "11d.h"
10#include "types.h"
11
12/* 802.11-related definitions */
13
14/* TxPD descriptor */
15struct txpd {
16 /* union to cope up with later FW revisions */
17 union {
18 /* Current Tx packet status */
19 __le32 tx_status;
20 struct {
21 /* BSS type: client, AP, etc. */
22 u8 bss_type;
23 /* BSS number */
24 u8 bss_num;
25 /* Reserved */
26 __le16 reserved;
27 } bss;
28 } u;
29 /* Tx control */
30 __le32 tx_control;
31 __le32 tx_packet_location;
32 /* Tx packet length */
33 __le16 tx_packet_length;
34 /* First 2 byte of destination MAC address */
35 u8 tx_dest_addr_high[2];
36 /* Last 4 byte of destination MAC address */
37 u8 tx_dest_addr_low[4];
38 /* Pkt Priority */
39 u8 priority;
40 /* Pkt Trasnit Power control */
41 u8 powermgmt;
42 /* Amount of time the packet has been queued in the driver (units = 2ms) */
43 u8 pktdelay_2ms;
44 /* reserved */
45 u8 reserved1;
46} __attribute__ ((packed));
47
48/* RxPD Descriptor */
49struct rxpd {
50 /* union to cope up with later FW revisions */
51 union {
52 /* Current Rx packet status */
53 __le16 status;
54 struct {
55 /* BSS type: client, AP, etc. */
56 u8 bss_type;
57 /* BSS number */
58 u8 bss_num;
59 } __attribute__ ((packed)) bss;
60 } __attribute__ ((packed)) u;
61
62 /* SNR */
63 u8 snr;
64
65 /* Tx control */
66 u8 rx_control;
67
68 /* Pkt length */
69 __le16 pkt_len;
70
71 /* Noise Floor */
72 u8 nf;
73
74 /* Rx Packet Rate */
75 u8 rx_rate;
76
77 /* Pkt addr */
78 __le32 pkt_ptr;
79
80 /* Next Rx RxPD addr */
81 __le32 next_rxpd_ptr;
82
83 /* Pkt Priority */
84 u8 priority;
85 u8 reserved[3];
86} __attribute__ ((packed));
87
88struct cmd_header {
89 __le16 command;
90 __le16 size;
91 __le16 seqnum;
92 __le16 result;
93} __attribute__ ((packed));
94
95struct cmd_ctrl_node {
96 struct list_head list;
97 int result;
98 /* command response */
99 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *);
100 unsigned long callback_arg;
101 /* command data */
102 struct cmd_header *cmdbuf;
103 /* wait queue */
104 u16 cmdwaitqwoken;
105 wait_queue_head_t cmdwait_q;
106};
107
108/* Generic structure to hold all key types. */
109struct enc_key {
110 u16 len;
111 u16 flags; /* KEY_INFO_* from defs.h */
112 u16 type; /* KEY_TYPE_* from defs.h */
113 u8 key[32];
114};
115
116/* lbs_offset_value */
117struct lbs_offset_value {
118 u32 offset;
119 u32 value;
120} __attribute__ ((packed));
121
122/* Define general data structure */
123/* cmd_DS_GEN */
124struct cmd_ds_gen {
125 __le16 command;
126 __le16 size;
127 __le16 seqnum;
128 __le16 result;
129 void *cmdresp[0];
130} __attribute__ ((packed));
131
132#define S_DS_GEN sizeof(struct cmd_ds_gen)
133
134
135/*
136 * Define data structure for CMD_GET_HW_SPEC
137 * This structure defines the response for the GET_HW_SPEC command
138 */
139struct cmd_ds_get_hw_spec {
140 struct cmd_header hdr;
141
142 /* HW Interface version number */
143 __le16 hwifversion;
144 /* HW version number */
145 __le16 version;
146 /* Max number of TxPD FW can handle */
147 __le16 nr_txpd;
148 /* Max no of Multicast address */
149 __le16 nr_mcast_adr;
150 /* MAC address */
151 u8 permanentaddr[6];
152
153 /* region Code */
154 __le16 regioncode;
155
156 /* Number of antenna used */
157 __le16 nr_antenna;
158
159 /* FW release number, example 0x01030304 = 2.3.4p1 */
160 __le32 fwrelease;
161
162 /* Base Address of TxPD queue */
163 __le32 wcb_base;
164 /* Read Pointer of RxPd queue */
165 __le32 rxpd_rdptr;
166
167 /* Write Pointer of RxPd queue */
168 __le32 rxpd_wrptr;
169
170 /*FW/HW capability */
171 __le32 fwcapinfo;
172} __attribute__ ((packed));
173
174struct cmd_ds_802_11_subscribe_event {
175 struct cmd_header hdr;
176
177 __le16 action;
178 __le16 events;
179
180 /* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a
181 * number of TLVs. From the v5.1 manual, those TLVs would add up to
182 * 40 bytes. However, future firmware might add additional TLVs, so I
183 * bump this up a bit.
184 */
185 uint8_t tlv[128];
186} __attribute__ ((packed));
187
188/*
189 * This scan handle Country Information IE(802.11d compliant)
190 * Define data structure for CMD_802_11_SCAN
191 */
192struct cmd_ds_802_11_scan {
193 struct cmd_header hdr;
194
195 uint8_t bsstype;
196 uint8_t bssid[ETH_ALEN];
197 uint8_t tlvbuffer[0];
198#if 0
199 mrvlietypes_ssidparamset_t ssidParamSet;
200 mrvlietypes_chanlistparamset_t ChanListParamSet;
201 mrvlietypes_ratesparamset_t OpRateSet;
202#endif
203} __attribute__ ((packed));
204
205struct cmd_ds_802_11_scan_rsp {
206 struct cmd_header hdr;
207
208 __le16 bssdescriptsize;
209 uint8_t nr_sets;
210 uint8_t bssdesc_and_tlvbuffer[0];
211} __attribute__ ((packed));
212
213struct cmd_ds_802_11_get_log {
214 struct cmd_header hdr;
215
216 __le32 mcasttxframe;
217 __le32 failed;
218 __le32 retry;
219 __le32 multiretry;
220 __le32 framedup;
221 __le32 rtssuccess;
222 __le32 rtsfailure;
223 __le32 ackfailure;
224 __le32 rxfrag;
225 __le32 mcastrxframe;
226 __le32 fcserror;
227 __le32 txframe;
228 __le32 wepundecryptable;
229} __attribute__ ((packed));
230
231struct cmd_ds_mac_control {
232 struct cmd_header hdr;
233 __le16 action;
234 u16 reserved;
235} __attribute__ ((packed));
236
237struct cmd_ds_mac_multicast_adr {
238 struct cmd_header hdr;
239 __le16 action;
240 __le16 nr_of_adrs;
241 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
242} __attribute__ ((packed));
243
244struct cmd_ds_gspi_bus_config {
245 struct cmd_header hdr;
246 __le16 action;
247 __le16 bus_delay_mode;
248 __le16 host_time_delay_to_read_port;
249 __le16 host_time_delay_to_read_register;
250} __attribute__ ((packed));
251
252struct cmd_ds_802_11_authenticate {
253 struct cmd_header hdr;
254
255 u8 bssid[ETH_ALEN];
256 u8 authtype;
257 u8 reserved[10];
258} __attribute__ ((packed));
259
260struct cmd_ds_802_11_deauthenticate {
261 struct cmd_header hdr;
262
263 u8 macaddr[ETH_ALEN];
264 __le16 reasoncode;
265} __attribute__ ((packed));
266
267struct cmd_ds_802_11_associate {
268 struct cmd_header hdr;
269
270 u8 bssid[6];
271 __le16 capability;
272 __le16 listeninterval;
273 __le16 bcnperiod;
274 u8 dtimperiod;
275 u8 iebuf[512]; /* Enough for required and most optional IEs */
276} __attribute__ ((packed));
277
278struct cmd_ds_802_11_associate_response {
279 struct cmd_header hdr;
280
281 __le16 capability;
282 __le16 statuscode;
283 __le16 aid;
284 u8 iebuf[512];
285} __attribute__ ((packed));
286
287struct cmd_ds_802_11_set_wep {
288 struct cmd_header hdr;
289
290 /* ACT_ADD, ACT_REMOVE or ACT_ENABLE */
291 __le16 action;
292
293 /* key Index selected for Tx */
294 __le16 keyindex;
295
296 /* 40, 128bit or TXWEP */
297 uint8_t keytype[4];
298 uint8_t keymaterial[4][16];
299} __attribute__ ((packed));
300
301struct cmd_ds_802_3_get_stat {
302 __le32 xmitok;
303 __le32 rcvok;
304 __le32 xmiterror;
305 __le32 rcverror;
306 __le32 rcvnobuffer;
307 __le32 rcvcrcerror;
308} __attribute__ ((packed));
309
310struct cmd_ds_802_11_get_stat {
311 __le32 txfragmentcnt;
312 __le32 mcasttxframecnt;
313 __le32 failedcnt;
314 __le32 retrycnt;
315 __le32 Multipleretrycnt;
316 __le32 rtssuccesscnt;
317 __le32 rtsfailurecnt;
318 __le32 ackfailurecnt;
319 __le32 frameduplicatecnt;
320 __le32 rxfragmentcnt;
321 __le32 mcastrxframecnt;
322 __le32 fcserrorcnt;
323 __le32 bcasttxframecnt;
324 __le32 bcastrxframecnt;
325 __le32 txbeacon;
326 __le32 rxbeacon;
327 __le32 wepundecryptable;
328} __attribute__ ((packed));
329
330struct cmd_ds_802_11_snmp_mib {
331 struct cmd_header hdr;
332
333 __le16 action;
334 __le16 oid;
335 __le16 bufsize;
336 u8 value[128];
337} __attribute__ ((packed));
338
339struct cmd_ds_mac_reg_map {
340 __le16 buffersize;
341 u8 regmap[128];
342 __le16 reserved;
343} __attribute__ ((packed));
344
345struct cmd_ds_bbp_reg_map {
346 __le16 buffersize;
347 u8 regmap[128];
348 __le16 reserved;
349} __attribute__ ((packed));
350
351struct cmd_ds_rf_reg_map {
352 __le16 buffersize;
353 u8 regmap[64];
354 __le16 reserved;
355} __attribute__ ((packed));
356
357struct cmd_ds_mac_reg_access {
358 __le16 action;
359 __le16 offset;
360 __le32 value;
361} __attribute__ ((packed));
362
363struct cmd_ds_bbp_reg_access {
364 __le16 action;
365 __le16 offset;
366 u8 value;
367 u8 reserved[3];
368} __attribute__ ((packed));
369
370struct cmd_ds_rf_reg_access {
371 __le16 action;
372 __le16 offset;
373 u8 value;
374 u8 reserved[3];
375} __attribute__ ((packed));
376
377struct cmd_ds_802_11_radio_control {
378 struct cmd_header hdr;
379
380 __le16 action;
381 __le16 control;
382} __attribute__ ((packed));
383
384struct cmd_ds_802_11_beacon_control {
385 __le16 action;
386 __le16 beacon_enable;
387 __le16 beacon_period;
388} __attribute__ ((packed));
389
390struct cmd_ds_802_11_sleep_params {
391 struct cmd_header hdr;
392
393 /* ACT_GET/ACT_SET */
394 __le16 action;
395
396 /* Sleep clock error in ppm */
397 __le16 error;
398
399 /* Wakeup offset in usec */
400 __le16 offset;
401
402 /* Clock stabilization time in usec */
403 __le16 stabletime;
404
405 /* control periodic calibration */
406 uint8_t calcontrol;
407
408 /* control the use of external sleep clock */
409 uint8_t externalsleepclk;
410
411 /* reserved field, should be set to zero */
412 __le16 reserved;
413} __attribute__ ((packed));
414
415struct cmd_ds_802_11_inactivity_timeout {
416 struct cmd_header hdr;
417
418 /* ACT_GET/ACT_SET */
419 __le16 action;
420
421 /* Inactivity timeout in msec */
422 __le16 timeout;
423} __attribute__ ((packed));
424
425struct cmd_ds_802_11_rf_channel {
426 struct cmd_header hdr;
427
428 __le16 action;
429 __le16 channel;
430 __le16 rftype; /* unused */
431 __le16 reserved; /* unused */
432 u8 channellist[32]; /* unused */
433} __attribute__ ((packed));
434
435struct cmd_ds_802_11_rssi {
436 /* weighting factor */
437 __le16 N;
438
439 __le16 reserved_0;
440 __le16 reserved_1;
441 __le16 reserved_2;
442} __attribute__ ((packed));
443
444struct cmd_ds_802_11_rssi_rsp {
445 __le16 SNR;
446 __le16 noisefloor;
447 __le16 avgSNR;
448 __le16 avgnoisefloor;
449} __attribute__ ((packed));
450
451struct cmd_ds_802_11_mac_address {
452 struct cmd_header hdr;
453
454 __le16 action;
455 u8 macadd[ETH_ALEN];
456} __attribute__ ((packed));
457
458struct cmd_ds_802_11_rf_tx_power {
459 struct cmd_header hdr;
460
461 __le16 action;
462 __le16 curlevel;
463 s8 maxlevel;
464 s8 minlevel;
465} __attribute__ ((packed));
466
467struct cmd_ds_802_11_rf_antenna {
468 __le16 action;
469
470 /* Number of antennas or 0xffff(diversity) */
471 __le16 antennamode;
472
473} __attribute__ ((packed));
474
475struct cmd_ds_802_11_monitor_mode {
476 __le16 action;
477 __le16 mode;
478} __attribute__ ((packed));
479
480struct cmd_ds_set_boot2_ver {
481 struct cmd_header hdr;
482
483 __le16 action;
484 __le16 version;
485} __attribute__ ((packed));
486
487struct cmd_ds_802_11_fw_wake_method {
488 struct cmd_header hdr;
489
490 __le16 action;
491 __le16 method;
492} __attribute__ ((packed));
493
494struct cmd_ds_802_11_sleep_period {
495 struct cmd_header hdr;
496
497 __le16 action;
498 __le16 period;
499} __attribute__ ((packed));
500
501struct cmd_ds_802_11_ps_mode {
502 __le16 action;
503 __le16 nullpktinterval;
504 __le16 multipledtim;
505 __le16 reserved;
506 __le16 locallisteninterval;
507} __attribute__ ((packed));
508
509struct cmd_confirm_sleep {
510 struct cmd_header hdr;
511
512 __le16 action;
513 __le16 nullpktinterval;
514 __le16 multipledtim;
515 __le16 reserved;
516 __le16 locallisteninterval;
517} __attribute__ ((packed));
518
519struct cmd_ds_802_11_data_rate {
520 struct cmd_header hdr;
521
522 __le16 action;
523 __le16 reserved;
524 u8 rates[MAX_RATES];
525} __attribute__ ((packed));
526
527struct cmd_ds_802_11_rate_adapt_rateset {
528 struct cmd_header hdr;
529 __le16 action;
530 __le16 enablehwauto;
531 __le16 bitmap;
532} __attribute__ ((packed));
533
534struct cmd_ds_802_11_ad_hoc_start {
535 struct cmd_header hdr;
536
537 u8 ssid[IW_ESSID_MAX_SIZE];
538 u8 bsstype;
539 __le16 beaconperiod;
540 u8 dtimperiod; /* Reserved on v9 and later */
541 struct ieee_ie_ibss_param_set ibss;
542 u8 reserved1[4];
543 struct ieee_ie_ds_param_set ds;
544 u8 reserved2[4];
545 __le16 probedelay; /* Reserved on v9 and later */
546 __le16 capability;
547 u8 rates[MAX_RATES];
548 u8 tlv_memory_size_pad[100];
549} __attribute__ ((packed));
550
551struct cmd_ds_802_11_ad_hoc_result {
552 struct cmd_header hdr;
553
554 u8 pad[3];
555 u8 bssid[ETH_ALEN];
556} __attribute__ ((packed));
557
558struct adhoc_bssdesc {
559 u8 bssid[ETH_ALEN];
560 u8 ssid[IW_ESSID_MAX_SIZE];
561 u8 type;
562 __le16 beaconperiod;
563 u8 dtimperiod;
564 __le64 timestamp;
565 __le64 localtime;
566 struct ieee_ie_ds_param_set ds;
567 u8 reserved1[4];
568 struct ieee_ie_ibss_param_set ibss;
569 u8 reserved2[4];
570 __le16 capability;
571 u8 rates[MAX_RATES];
572
573 /* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
574 * Adhoc join command and will cause a binary layout mismatch with
575 * the firmware
576 */
577} __attribute__ ((packed));
578
579struct cmd_ds_802_11_ad_hoc_join {
580 struct cmd_header hdr;
581
582 struct adhoc_bssdesc bss;
583 __le16 failtimeout; /* Reserved on v9 and later */
584 __le16 probedelay; /* Reserved on v9 and later */
585} __attribute__ ((packed));
586
587struct cmd_ds_802_11_ad_hoc_stop {
588 struct cmd_header hdr;
589} __attribute__ ((packed));
590
591struct cmd_ds_802_11_enable_rsn {
592 struct cmd_header hdr;
593
594 __le16 action;
595 __le16 enable;
596} __attribute__ ((packed));
597
598struct MrvlIEtype_keyParamSet {
599 /* type ID */
600 __le16 type;
601
602 /* length of Payload */
603 __le16 length;
604
605 /* type of key: WEP=0, TKIP=1, AES=2 */
606 __le16 keytypeid;
607
608 /* key control Info specific to a keytypeid */
609 __le16 keyinfo;
610
611 /* length of key */
612 __le16 keylen;
613
614 /* key material of size keylen */
615 u8 key[32];
616} __attribute__ ((packed));
617
618#define MAX_WOL_RULES 16
619
620struct host_wol_rule {
621 uint8_t rule_no;
622 uint8_t rule_ops;
623 __le16 sig_offset;
624 __le16 sig_length;
625 __le16 reserve;
626 __be32 sig_mask;
627 __be32 signature;
628} __attribute__ ((packed));
629
630struct wol_config {
631 uint8_t action;
632 uint8_t pattern;
633 uint8_t no_rules_in_cmd;
634 uint8_t result;
635 struct host_wol_rule rule[MAX_WOL_RULES];
636} __attribute__ ((packed));
637
638struct cmd_ds_host_sleep {
639 struct cmd_header hdr;
640 __le32 criteria;
641 uint8_t gpio;
642 uint16_t gap;
643 struct wol_config wol_conf;
644} __attribute__ ((packed));
645
646
647
648struct cmd_ds_802_11_key_material {
649 struct cmd_header hdr;
650
651 __le16 action;
652 struct MrvlIEtype_keyParamSet keyParamSet[2];
653} __attribute__ ((packed));
654
655struct cmd_ds_802_11_eeprom_access {
656 struct cmd_header hdr;
657 __le16 action;
658 __le16 offset;
659 __le16 len;
660 /* firmware says it returns a maximum of 20 bytes */
661#define LBS_EEPROM_READ_LEN 20
662 u8 value[LBS_EEPROM_READ_LEN];
663} __attribute__ ((packed));
664
665struct cmd_ds_802_11_tpc_cfg {
666 struct cmd_header hdr;
667
668 __le16 action;
669 uint8_t enable;
670 int8_t P0;
671 int8_t P1;
672 int8_t P2;
673 uint8_t usesnr;
674} __attribute__ ((packed));
675
676
677struct cmd_ds_802_11_pa_cfg {
678 struct cmd_header hdr;
679
680 __le16 action;
681 uint8_t enable;
682 int8_t P0;
683 int8_t P1;
684 int8_t P2;
685} __attribute__ ((packed));
686
687
688struct cmd_ds_802_11_led_ctrl {
689 __le16 action;
690 __le16 numled;
691 u8 data[256];
692} __attribute__ ((packed));
693
694struct cmd_ds_802_11_afc {
695 __le16 afc_auto;
696 union {
697 struct {
698 __le16 threshold;
699 __le16 period;
700 };
701 struct {
702 __le16 timing_offset; /* signed */
703 __le16 carrier_offset; /* signed */
704 };
705 };
706} __attribute__ ((packed));
707
708struct cmd_tx_rate_query {
709 __le16 txrate;
710} __attribute__ ((packed));
711
712struct cmd_ds_get_tsf {
713 __le64 tsfvalue;
714} __attribute__ ((packed));
715
716struct cmd_ds_bt_access {
717 __le16 action;
718 __le32 id;
719 u8 addr1[ETH_ALEN];
720 u8 addr2[ETH_ALEN];
721} __attribute__ ((packed));
722
723struct cmd_ds_fwt_access {
724 __le16 action;
725 __le32 id;
726 u8 valid;
727 u8 da[ETH_ALEN];
728 u8 dir;
729 u8 ra[ETH_ALEN];
730 __le32 ssn;
731 __le32 dsn;
732 __le32 metric;
733 u8 rate;
734 u8 hopcount;
735 u8 ttl;
736 __le32 expiration;
737 u8 sleepmode;
738 __le32 snr;
739 __le32 references;
740 u8 prec[ETH_ALEN];
741} __attribute__ ((packed));
742
743
744struct cmd_ds_mesh_config {
745 struct cmd_header hdr;
746
747 __le16 action;
748 __le16 channel;
749 __le16 type;
750 __le16 length;
751 u8 data[128]; /* last position reserved */
752} __attribute__ ((packed));
753
754
755struct cmd_ds_mesh_access {
756 struct cmd_header hdr;
757
758 __le16 action;
759 __le32 data[32]; /* last position reserved */
760} __attribute__ ((packed));
761
762/* Number of stats counters returned by the firmware */
763#define MESH_STATS_NUM 8
764
765struct cmd_ds_command {
766 /* command header */
767 __le16 command;
768 __le16 size;
769 __le16 seqnum;
770 __le16 result;
771
772 /* command Body */
773 union {
774 struct cmd_ds_802_11_ps_mode psmode;
775 struct cmd_ds_802_11_get_stat gstat;
776 struct cmd_ds_802_3_get_stat gstat_8023;
777 struct cmd_ds_802_11_rf_antenna rant;
778 struct cmd_ds_802_11_monitor_mode monitor;
779 struct cmd_ds_802_11_rssi rssi;
780 struct cmd_ds_802_11_rssi_rsp rssirsp;
781 struct cmd_ds_mac_reg_access macreg;
782 struct cmd_ds_bbp_reg_access bbpreg;
783 struct cmd_ds_rf_reg_access rfreg;
784
785 struct cmd_ds_802_11d_domain_info domaininfo;
786 struct cmd_ds_802_11d_domain_info domaininforesp;
787
788 struct cmd_ds_802_11_tpc_cfg tpccfg;
789 struct cmd_ds_802_11_afc afc;
790 struct cmd_ds_802_11_led_ctrl ledgpio;
791
792 struct cmd_tx_rate_query txrate;
793 struct cmd_ds_bt_access bt;
794 struct cmd_ds_fwt_access fwt;
795 struct cmd_ds_get_tsf gettsf;
796 struct cmd_ds_802_11_beacon_control bcn_ctrl;
797 } params;
798} __attribute__ ((packed));
799
800#endif
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 06df2e174b50..30d9d0ea28eb 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -134,7 +134,7 @@ static void spu_transaction_finish(struct if_spi_card *card)
134static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len) 134static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
135{ 135{
136 int err = 0; 136 int err = 0;
137 u16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK); 137 __le16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK);
138 struct spi_message m; 138 struct spi_message m;
139 struct spi_transfer reg_trans; 139 struct spi_transfer reg_trans;
140 struct spi_transfer data_trans; 140 struct spi_transfer data_trans;
@@ -166,7 +166,7 @@ static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
166 166
167static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val) 167static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val)
168{ 168{
169 u16 buff; 169 __le16 buff;
170 170
171 buff = cpu_to_le16(val); 171 buff = cpu_to_le16(val);
172 return spu_write(card, reg, (u8 *)&buff, sizeof(u16)); 172 return spu_write(card, reg, (u8 *)&buff, sizeof(u16));
@@ -188,7 +188,7 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
188{ 188{
189 unsigned int delay; 189 unsigned int delay;
190 int err = 0; 190 int err = 0;
191 u16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK); 191 __le16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK);
192 struct spi_message m; 192 struct spi_message m;
193 struct spi_transfer reg_trans; 193 struct spi_transfer reg_trans;
194 struct spi_transfer dummy_trans; 194 struct spi_transfer dummy_trans;
@@ -235,7 +235,7 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
235/* Read 16 bits from an SPI register */ 235/* Read 16 bits from an SPI register */
236static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val) 236static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
237{ 237{
238 u16 buf; 238 __le16 buf;
239 int ret; 239 int ret;
240 240
241 ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf)); 241 ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
@@ -248,7 +248,7 @@ static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
248 * The low 16 bits are read first. */ 248 * The low 16 bits are read first. */
249static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val) 249static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val)
250{ 250{
251 u32 buf; 251 __le32 buf;
252 int err; 252 int err;
253 253
254 err = spu_read(card, reg, (u8 *)&buf, sizeof(buf)); 254 err = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index a8262dea9b1f..f12d667ba100 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -511,7 +511,7 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
511 /* Fill the receive configuration URB and initialise the Rx call back */ 511 /* Fill the receive configuration URB and initialise the Rx call back */
512 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev, 512 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
513 usb_rcvbulkpipe(cardp->udev, cardp->ep_in), 513 usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
514 (void *) (skb->tail + (size_t) IPFIELD_ALIGN_OFFSET), 514 skb->data + IPFIELD_ALIGN_OFFSET,
515 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, 515 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn,
516 cardp); 516 cardp);
517 517
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 87bfd17b9c8c..01f46cf288d7 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -45,119 +45,6 @@ module_param_named(libertas_debug, lbs_debug, int, 0644);
45struct cmd_confirm_sleep confirm_sleep; 45struct cmd_confirm_sleep confirm_sleep;
46 46
47 47
48#define LBS_TX_PWR_DEFAULT 20 /*100mW */
49#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */
50#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */
51#define LBS_TX_PWR_FR_DEFAULT 20 /*100mW */
52#define LBS_TX_PWR_EMEA_DEFAULT 20 /*100mW */
53
54/* Format { channel, frequency (MHz), maxtxpower } */
55/* band: 'B/G', region: USA FCC/Canada IC */
56static struct chan_freq_power channel_freq_power_US_BG[] = {
57 {1, 2412, LBS_TX_PWR_US_DEFAULT},
58 {2, 2417, LBS_TX_PWR_US_DEFAULT},
59 {3, 2422, LBS_TX_PWR_US_DEFAULT},
60 {4, 2427, LBS_TX_PWR_US_DEFAULT},
61 {5, 2432, LBS_TX_PWR_US_DEFAULT},
62 {6, 2437, LBS_TX_PWR_US_DEFAULT},
63 {7, 2442, LBS_TX_PWR_US_DEFAULT},
64 {8, 2447, LBS_TX_PWR_US_DEFAULT},
65 {9, 2452, LBS_TX_PWR_US_DEFAULT},
66 {10, 2457, LBS_TX_PWR_US_DEFAULT},
67 {11, 2462, LBS_TX_PWR_US_DEFAULT}
68};
69
70/* band: 'B/G', region: Europe ETSI */
71static struct chan_freq_power channel_freq_power_EU_BG[] = {
72 {1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
73 {2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
74 {3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
75 {4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
76 {5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
77 {6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
78 {7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
79 {8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
80 {9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
81 {10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
82 {11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
83 {12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
84 {13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
85};
86
87/* band: 'B/G', region: Spain */
88static struct chan_freq_power channel_freq_power_SPN_BG[] = {
89 {10, 2457, LBS_TX_PWR_DEFAULT},
90 {11, 2462, LBS_TX_PWR_DEFAULT}
91};
92
93/* band: 'B/G', region: France */
94static struct chan_freq_power channel_freq_power_FR_BG[] = {
95 {10, 2457, LBS_TX_PWR_FR_DEFAULT},
96 {11, 2462, LBS_TX_PWR_FR_DEFAULT},
97 {12, 2467, LBS_TX_PWR_FR_DEFAULT},
98 {13, 2472, LBS_TX_PWR_FR_DEFAULT}
99};
100
101/* band: 'B/G', region: Japan */
102static struct chan_freq_power channel_freq_power_JPN_BG[] = {
103 {1, 2412, LBS_TX_PWR_JP_DEFAULT},
104 {2, 2417, LBS_TX_PWR_JP_DEFAULT},
105 {3, 2422, LBS_TX_PWR_JP_DEFAULT},
106 {4, 2427, LBS_TX_PWR_JP_DEFAULT},
107 {5, 2432, LBS_TX_PWR_JP_DEFAULT},
108 {6, 2437, LBS_TX_PWR_JP_DEFAULT},
109 {7, 2442, LBS_TX_PWR_JP_DEFAULT},
110 {8, 2447, LBS_TX_PWR_JP_DEFAULT},
111 {9, 2452, LBS_TX_PWR_JP_DEFAULT},
112 {10, 2457, LBS_TX_PWR_JP_DEFAULT},
113 {11, 2462, LBS_TX_PWR_JP_DEFAULT},
114 {12, 2467, LBS_TX_PWR_JP_DEFAULT},
115 {13, 2472, LBS_TX_PWR_JP_DEFAULT},
116 {14, 2484, LBS_TX_PWR_JP_DEFAULT}
117};
118
119/**
120 * the structure for channel, frequency and power
121 */
122struct region_cfp_table {
123 u8 region;
124 struct chan_freq_power *cfp_BG;
125 int cfp_no_BG;
126};
127
128/**
129 * the structure for the mapping between region and CFP
130 */
131static struct region_cfp_table region_cfp_table[] = {
132 {0x10, /*US FCC */
133 channel_freq_power_US_BG,
134 ARRAY_SIZE(channel_freq_power_US_BG),
135 }
136 ,
137 {0x20, /*CANADA IC */
138 channel_freq_power_US_BG,
139 ARRAY_SIZE(channel_freq_power_US_BG),
140 }
141 ,
142 {0x30, /*EU*/ channel_freq_power_EU_BG,
143 ARRAY_SIZE(channel_freq_power_EU_BG),
144 }
145 ,
146 {0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
147 ARRAY_SIZE(channel_freq_power_SPN_BG),
148 }
149 ,
150 {0x32, /*FRANCE*/ channel_freq_power_FR_BG,
151 ARRAY_SIZE(channel_freq_power_FR_BG),
152 }
153 ,
154 {0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
155 ARRAY_SIZE(channel_freq_power_JPN_BG),
156 }
157 ,
158/*Add new region here */
159};
160
161/** 48/**
162 * the table to keep region code 49 * the table to keep region code
163 */ 50 */
@@ -165,13 +52,6 @@ u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE] =
165 { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; 52 { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 };
166 53
167/** 54/**
168 * 802.11b/g supported bitrates (in 500Kb/s units)
169 */
170u8 lbs_bg_rates[MAX_RATES] =
171 { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
1720x00, 0x00 };
173
174/**
175 * FW rate table. FW refers to rates by their index in this table, not by the 55 * FW rate table. FW refers to rates by their index in this table, not by the
176 * rate value itself. Values of 0x00 are 56 * rate value itself. Values of 0x00 are
177 * reserved positions. 57 * reserved positions.
@@ -405,7 +285,7 @@ static ssize_t lbs_mesh_set(struct device *dev,
405 return count; 285 return count;
406 if (enable) 286 if (enable)
407 action = CMD_ACT_MESH_CONFIG_START; 287 action = CMD_ACT_MESH_CONFIG_START;
408 ret = lbs_mesh_config(priv, action, priv->curbssparams.channel); 288 ret = lbs_mesh_config(priv, action, priv->channel);
409 if (ret) 289 if (ret)
410 return ret; 290 return ret;
411 291
@@ -1089,6 +969,8 @@ static void auto_deepsleep_timer_fn(unsigned long data)
1089 ret = lbs_prepare_and_send_command(priv, 969 ret = lbs_prepare_and_send_command(priv,
1090 CMD_802_11_DEEP_SLEEP, 0, 970 CMD_802_11_DEEP_SLEEP, 0,
1091 0, 0, NULL); 971 0, 0, NULL);
972 if (ret)
973 lbs_pr_err("Enter Deep Sleep command failed\n");
1092 } 974 }
1093 } 975 }
1094 mod_timer(&priv->auto_deepsleep_timer , jiffies + 976 mod_timer(&priv->auto_deepsleep_timer , jiffies +
@@ -1164,7 +1046,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
1164 priv->mesh_connect_status = LBS_DISCONNECTED; 1046 priv->mesh_connect_status = LBS_DISCONNECTED;
1165 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 1047 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1166 priv->mode = IW_MODE_INFRA; 1048 priv->mode = IW_MODE_INFRA;
1167 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; 1049 priv->channel = DEFAULT_AD_HOC_CHANNEL;
1168 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 1050 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
1169 priv->radio_on = 1; 1051 priv->radio_on = 1;
1170 priv->enablehwauto = 1; 1052 priv->enablehwauto = 1;
@@ -1345,7 +1227,6 @@ EXPORT_SYMBOL_GPL(lbs_add_card);
1345void lbs_remove_card(struct lbs_private *priv) 1227void lbs_remove_card(struct lbs_private *priv)
1346{ 1228{
1347 struct net_device *dev = priv->dev; 1229 struct net_device *dev = priv->dev;
1348 union iwreq_data wrqu;
1349 1230
1350 lbs_deb_enter(LBS_DEB_MAIN); 1231 lbs_deb_enter(LBS_DEB_MAIN);
1351 1232
@@ -1370,9 +1251,7 @@ void lbs_remove_card(struct lbs_private *priv)
1370 lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); 1251 lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
1371 } 1252 }
1372 1253
1373 memset(wrqu.ap_addr.sa_data, 0xaa, ETH_ALEN); 1254 lbs_send_disconnect_notification(priv);
1374 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1375 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
1376 1255
1377 if (priv->is_deep_sleep) { 1256 if (priv->is_deep_sleep) {
1378 priv->is_deep_sleep = 0; 1257 priv->is_deep_sleep = 0;
@@ -1406,9 +1285,6 @@ int lbs_start_card(struct lbs_private *priv)
1406 if (ret) 1285 if (ret)
1407 goto done; 1286 goto done;
1408 1287
1409 /* init 802.11d */
1410 lbs_init_11d(priv);
1411
1412 if (lbs_cfg_register(priv)) { 1288 if (lbs_cfg_register(priv)) {
1413 lbs_pr_err("cannot register device\n"); 1289 lbs_pr_err("cannot register device\n");
1414 goto done; 1290 goto done;
@@ -1435,10 +1311,10 @@ int lbs_start_card(struct lbs_private *priv)
1435 1311
1436 priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID; 1312 priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID;
1437 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1313 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1438 priv->curbssparams.channel)) { 1314 priv->channel)) {
1439 priv->mesh_tlv = TLV_TYPE_MESH_ID; 1315 priv->mesh_tlv = TLV_TYPE_MESH_ID;
1440 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1316 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1441 priv->curbssparams.channel)) 1317 priv->channel))
1442 priv->mesh_tlv = 0; 1318 priv->mesh_tlv = 0;
1443 } 1319 }
1444 } else if (priv->mesh_fw_ver == MESH_FW_NEW) { 1320 } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
@@ -1447,7 +1323,7 @@ int lbs_start_card(struct lbs_private *priv)
1447 */ 1323 */
1448 priv->mesh_tlv = TLV_TYPE_MESH_ID; 1324 priv->mesh_tlv = TLV_TYPE_MESH_ID;
1449 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1325 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1450 priv->curbssparams.channel)) 1326 priv->channel))
1451 priv->mesh_tlv = 0; 1327 priv->mesh_tlv = 0;
1452 } 1328 }
1453 if (priv->mesh_tlv) { 1329 if (priv->mesh_tlv) {
@@ -1618,68 +1494,6 @@ static void lbs_remove_mesh(struct lbs_private *priv)
1618 lbs_deb_leave(LBS_DEB_MESH); 1494 lbs_deb_leave(LBS_DEB_MESH);
1619} 1495}
1620 1496
1621/**
1622 * @brief This function finds the CFP in
1623 * region_cfp_table based on region and band parameter.
1624 *
1625 * @param region The region code
1626 * @param band The band
1627 * @param cfp_no A pointer to CFP number
1628 * @return A pointer to CFP
1629 */
1630struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
1631{
1632 int i, end;
1633
1634 lbs_deb_enter(LBS_DEB_MAIN);
1635
1636 end = ARRAY_SIZE(region_cfp_table);
1637
1638 for (i = 0; i < end ; i++) {
1639 lbs_deb_main("region_cfp_table[i].region=%d\n",
1640 region_cfp_table[i].region);
1641 if (region_cfp_table[i].region == region) {
1642 *cfp_no = region_cfp_table[i].cfp_no_BG;
1643 lbs_deb_leave(LBS_DEB_MAIN);
1644 return region_cfp_table[i].cfp_BG;
1645 }
1646 }
1647
1648 lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
1649 return NULL;
1650}
1651
1652int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
1653{
1654 int ret = 0;
1655 int i = 0;
1656
1657 struct chan_freq_power *cfp;
1658 int cfp_no;
1659
1660 lbs_deb_enter(LBS_DEB_MAIN);
1661
1662 memset(priv->region_channel, 0, sizeof(priv->region_channel));
1663
1664 cfp = lbs_get_region_cfp_table(region, &cfp_no);
1665 if (cfp != NULL) {
1666 priv->region_channel[i].nrcfp = cfp_no;
1667 priv->region_channel[i].CFP = cfp;
1668 } else {
1669 lbs_deb_main("wrong region code %#x in band B/G\n",
1670 region);
1671 ret = -1;
1672 goto out;
1673 }
1674 priv->region_channel[i].valid = 1;
1675 priv->region_channel[i].region = region;
1676 priv->region_channel[i].band = band;
1677 i++;
1678out:
1679 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
1680 return ret;
1681}
1682
1683void lbs_queue_event(struct lbs_private *priv, u32 event) 1497void lbs_queue_event(struct lbs_private *priv, u32 event)
1684{ 1498{
1685 unsigned long flags; 1499 unsigned long flags;
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
index 18fe29faf99b..871f914a75fc 100644
--- a/drivers/net/wireless/libertas/persistcfg.c
+++ b/drivers/net/wireless/libertas/persistcfg.c
@@ -187,9 +187,9 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
187 if (ret) 187 if (ret)
188 return ret; 188 return ret;
189 189
190 if (defs.meshie.val.mesh_id_len > IW_ESSID_MAX_SIZE) { 190 if (defs.meshie.val.mesh_id_len > IEEE80211_MAX_SSID_LEN) {
191 lbs_pr_err("inconsistent mesh ID length"); 191 lbs_pr_err("inconsistent mesh ID length");
192 defs.meshie.val.mesh_id_len = IW_ESSID_MAX_SIZE; 192 defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN;
193 } 193 }
194 194
195 /* SSID not null terminated: reserve room for \0 + \n */ 195 /* SSID not null terminated: reserve room for \0 + \n */
@@ -214,7 +214,7 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
214 int len; 214 int len;
215 int ret; 215 int ret;
216 216
217 if (count < 2 || count > IW_ESSID_MAX_SIZE + 1) 217 if (count < 2 || count > IEEE80211_MAX_SSID_LEN + 1)
218 return -EINVAL; 218 return -EINVAL;
219 219
220 memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config)); 220 memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
@@ -233,7 +233,7 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
233 /* SSID len */ 233 /* SSID len */
234 ie->val.mesh_id_len = len; 234 ie->val.mesh_id_len = len;
235 /* IE len */ 235 /* IE len */
236 ie->len = sizeof(struct mrvl_meshie_val) - IW_ESSID_MAX_SIZE + len; 236 ie->len = sizeof(struct mrvl_meshie_val) - IEEE80211_MAX_SSID_LEN + len;
237 237
238 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, 238 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
239 CMD_TYPE_MESH_SET_MESH_IE); 239 CMD_TYPE_MESH_SET_MESH_IE);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 65f02cc6752f..9f18a19cc49d 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -4,7 +4,7 @@
4#include <linux/etherdevice.h> 4#include <linux/etherdevice.h>
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7#include "hostcmd.h" 7#include "host.h"
8#include "radiotap.h" 8#include "radiotap.h"
9#include "decl.h" 9#include "decl.h"
10#include "dev.h" 10#include "dev.h"
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 6c95af3023cc..c6a6c042b82f 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -12,18 +12,19 @@
12#include <net/lib80211.h> 12#include <net/lib80211.h>
13 13
14#include "host.h" 14#include "host.h"
15#include "decl.h"
16#include "dev.h" 15#include "dev.h"
17#include "scan.h" 16#include "scan.h"
17#include "assoc.h"
18#include "wext.h"
18#include "cmd.h" 19#include "cmd.h"
19 20
20//! Approximate amount of data needed to pass a scan result back to iwlist 21//! Approximate amount of data needed to pass a scan result back to iwlist
21#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \ 22#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \
22 + IW_ESSID_MAX_SIZE \ 23 + IEEE80211_MAX_SSID_LEN \
23 + IW_EV_UINT_LEN \ 24 + IW_EV_UINT_LEN \
24 + IW_EV_FREQ_LEN \ 25 + IW_EV_FREQ_LEN \
25 + IW_EV_QUAL_LEN \ 26 + IW_EV_QUAL_LEN \
26 + IW_ESSID_MAX_SIZE \ 27 + IEEE80211_MAX_SSID_LEN \
27 + IW_EV_PARAM_LEN \ 28 + IW_EV_PARAM_LEN \
28 + 40) /* 40 for WPAIE */ 29 + 40) /* 40 for WPAIE */
29 30
@@ -121,6 +122,189 @@ static inline int is_same_network(struct bss_descriptor *src,
121 122
122 123
123 124
125/*********************************************************************/
126/* */
127/* Region channel support */
128/* */
129/*********************************************************************/
130
131#define LBS_TX_PWR_DEFAULT 20 /*100mW */
132#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */
133#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */
134#define LBS_TX_PWR_FR_DEFAULT 20 /*100mW */
135#define LBS_TX_PWR_EMEA_DEFAULT 20 /*100mW */
136
137/* Format { channel, frequency (MHz), maxtxpower } */
138/* band: 'B/G', region: USA FCC/Canada IC */
139static struct chan_freq_power channel_freq_power_US_BG[] = {
140 {1, 2412, LBS_TX_PWR_US_DEFAULT},
141 {2, 2417, LBS_TX_PWR_US_DEFAULT},
142 {3, 2422, LBS_TX_PWR_US_DEFAULT},
143 {4, 2427, LBS_TX_PWR_US_DEFAULT},
144 {5, 2432, LBS_TX_PWR_US_DEFAULT},
145 {6, 2437, LBS_TX_PWR_US_DEFAULT},
146 {7, 2442, LBS_TX_PWR_US_DEFAULT},
147 {8, 2447, LBS_TX_PWR_US_DEFAULT},
148 {9, 2452, LBS_TX_PWR_US_DEFAULT},
149 {10, 2457, LBS_TX_PWR_US_DEFAULT},
150 {11, 2462, LBS_TX_PWR_US_DEFAULT}
151};
152
153/* band: 'B/G', region: Europe ETSI */
154static struct chan_freq_power channel_freq_power_EU_BG[] = {
155 {1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
156 {2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
157 {3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
158 {4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
159 {5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
160 {6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
161 {7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
162 {8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
163 {9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
164 {10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
165 {11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
166 {12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
167 {13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
168};
169
170/* band: 'B/G', region: Spain */
171static struct chan_freq_power channel_freq_power_SPN_BG[] = {
172 {10, 2457, LBS_TX_PWR_DEFAULT},
173 {11, 2462, LBS_TX_PWR_DEFAULT}
174};
175
176/* band: 'B/G', region: France */
177static struct chan_freq_power channel_freq_power_FR_BG[] = {
178 {10, 2457, LBS_TX_PWR_FR_DEFAULT},
179 {11, 2462, LBS_TX_PWR_FR_DEFAULT},
180 {12, 2467, LBS_TX_PWR_FR_DEFAULT},
181 {13, 2472, LBS_TX_PWR_FR_DEFAULT}
182};
183
184/* band: 'B/G', region: Japan */
185static struct chan_freq_power channel_freq_power_JPN_BG[] = {
186 {1, 2412, LBS_TX_PWR_JP_DEFAULT},
187 {2, 2417, LBS_TX_PWR_JP_DEFAULT},
188 {3, 2422, LBS_TX_PWR_JP_DEFAULT},
189 {4, 2427, LBS_TX_PWR_JP_DEFAULT},
190 {5, 2432, LBS_TX_PWR_JP_DEFAULT},
191 {6, 2437, LBS_TX_PWR_JP_DEFAULT},
192 {7, 2442, LBS_TX_PWR_JP_DEFAULT},
193 {8, 2447, LBS_TX_PWR_JP_DEFAULT},
194 {9, 2452, LBS_TX_PWR_JP_DEFAULT},
195 {10, 2457, LBS_TX_PWR_JP_DEFAULT},
196 {11, 2462, LBS_TX_PWR_JP_DEFAULT},
197 {12, 2467, LBS_TX_PWR_JP_DEFAULT},
198 {13, 2472, LBS_TX_PWR_JP_DEFAULT},
199 {14, 2484, LBS_TX_PWR_JP_DEFAULT}
200};
201
202/**
203 * the structure for channel, frequency and power
204 */
205struct region_cfp_table {
206 u8 region;
207 struct chan_freq_power *cfp_BG;
208 int cfp_no_BG;
209};
210
211/**
212 * the structure for the mapping between region and CFP
213 */
214static struct region_cfp_table region_cfp_table[] = {
215 {0x10, /*US FCC */
216 channel_freq_power_US_BG,
217 ARRAY_SIZE(channel_freq_power_US_BG),
218 }
219 ,
220 {0x20, /*CANADA IC */
221 channel_freq_power_US_BG,
222 ARRAY_SIZE(channel_freq_power_US_BG),
223 }
224 ,
225 {0x30, /*EU*/ channel_freq_power_EU_BG,
226 ARRAY_SIZE(channel_freq_power_EU_BG),
227 }
228 ,
229 {0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
230 ARRAY_SIZE(channel_freq_power_SPN_BG),
231 }
232 ,
233 {0x32, /*FRANCE*/ channel_freq_power_FR_BG,
234 ARRAY_SIZE(channel_freq_power_FR_BG),
235 }
236 ,
237 {0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
238 ARRAY_SIZE(channel_freq_power_JPN_BG),
239 }
240 ,
241/*Add new region here */
242};
243
244/**
245 * @brief This function finds the CFP in
246 * region_cfp_table based on region and band parameter.
247 *
248 * @param region The region code
249 * @param band The band
250 * @param cfp_no A pointer to CFP number
251 * @return A pointer to CFP
252 */
253static struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
254{
255 int i, end;
256
257 lbs_deb_enter(LBS_DEB_MAIN);
258
259 end = ARRAY_SIZE(region_cfp_table);
260
261 for (i = 0; i < end ; i++) {
262 lbs_deb_main("region_cfp_table[i].region=%d\n",
263 region_cfp_table[i].region);
264 if (region_cfp_table[i].region == region) {
265 *cfp_no = region_cfp_table[i].cfp_no_BG;
266 lbs_deb_leave(LBS_DEB_MAIN);
267 return region_cfp_table[i].cfp_BG;
268 }
269 }
270
271 lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
272 return NULL;
273}
274
275int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
276{
277 int ret = 0;
278 int i = 0;
279
280 struct chan_freq_power *cfp;
281 int cfp_no;
282
283 lbs_deb_enter(LBS_DEB_MAIN);
284
285 memset(priv->region_channel, 0, sizeof(priv->region_channel));
286
287 cfp = lbs_get_region_cfp_table(region, &cfp_no);
288 if (cfp != NULL) {
289 priv->region_channel[i].nrcfp = cfp_no;
290 priv->region_channel[i].CFP = cfp;
291 } else {
292 lbs_deb_main("wrong region code %#x in band B/G\n",
293 region);
294 ret = -1;
295 goto out;
296 }
297 priv->region_channel[i].valid = 1;
298 priv->region_channel[i].region = region;
299 priv->region_channel[i].band = band;
300 i++;
301out:
302 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
303 return ret;
304}
305
306
307
124 308
125/*********************************************************************/ 309/*********************************************************************/
126/* */ 310/* */
@@ -161,31 +345,15 @@ static int lbs_scan_create_channel_list(struct lbs_private *priv,
161 scantype = CMD_SCAN_TYPE_ACTIVE; 345 scantype = CMD_SCAN_TYPE_ACTIVE;
162 346
163 for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) { 347 for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) {
164 if (priv->enable11d && (priv->connect_status != LBS_CONNECTED) 348 if (!priv->region_channel[rgnidx].valid)
165 && (priv->mesh_connect_status != LBS_CONNECTED)) { 349 continue;
166 /* Scan all the supported chan for the first scan */ 350 scanregion = &priv->region_channel[rgnidx];
167 if (!priv->universal_channel[rgnidx].valid)
168 continue;
169 scanregion = &priv->universal_channel[rgnidx];
170
171 /* clear the parsed_region_chan for the first scan */
172 memset(&priv->parsed_region_chan, 0x00,
173 sizeof(priv->parsed_region_chan));
174 } else {
175 if (!priv->region_channel[rgnidx].valid)
176 continue;
177 scanregion = &priv->region_channel[rgnidx];
178 }
179 351
180 for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) { 352 for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) {
181 struct chanscanparamset *chan = &scanchanlist[chanidx]; 353 struct chanscanparamset *chan = &scanchanlist[chanidx];
182 354
183 cfp = scanregion->CFP + nextchan; 355 cfp = scanregion->CFP + nextchan;
184 356
185 if (priv->enable11d)
186 scantype = lbs_get_scan_type_11d(cfp->channel,
187 &priv->parsed_region_chan);
188
189 if (scanregion->band == BAND_B || scanregion->band == BAND_G) 357 if (scanregion->band == BAND_B || scanregion->band == BAND_G)
190 chan->radiotype = CMD_SCAN_RADIO_TYPE_BG; 358 chan->radiotype = CMD_SCAN_RADIO_TYPE_BG;
191 359
@@ -519,7 +687,6 @@ static int lbs_process_bss(struct bss_descriptor *bss,
519 struct ieee_ie_cf_param_set *cf; 687 struct ieee_ie_cf_param_set *cf;
520 struct ieee_ie_ibss_param_set *ibss; 688 struct ieee_ie_ibss_param_set *ibss;
521 DECLARE_SSID_BUF(ssid); 689 DECLARE_SSID_BUF(ssid);
522 struct ieee_ie_country_info_set *pcountryinfo;
523 uint8_t *pos, *end, *p; 690 uint8_t *pos, *end, *p;
524 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; 691 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
525 uint16_t beaconsize = 0; 692 uint16_t beaconsize = 0;
@@ -642,26 +809,6 @@ static int lbs_process_bss(struct bss_descriptor *bss,
642 lbs_deb_scan("got IBSS IE\n"); 809 lbs_deb_scan("got IBSS IE\n");
643 break; 810 break;
644 811
645 case WLAN_EID_COUNTRY:
646 pcountryinfo = (struct ieee_ie_country_info_set *) pos;
647 lbs_deb_scan("got COUNTRY IE\n");
648 if (pcountryinfo->header.len < sizeof(pcountryinfo->countrycode)
649 || pcountryinfo->header.len > 254) {
650 lbs_deb_scan("%s: 11D- Err CountryInfo len %d, min %zd, max 254\n",
651 __func__,
652 pcountryinfo->header.len,
653 sizeof(pcountryinfo->countrycode));
654 ret = -1;
655 goto done;
656 }
657
658 memcpy(&bss->countryinfo, pcountryinfo,
659 pcountryinfo->header.len + 2);
660 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo",
661 (uint8_t *) pcountryinfo,
662 (int) (pcountryinfo->header.len + 2));
663 break;
664
665 case WLAN_EID_EXT_SUPP_RATES: 812 case WLAN_EID_EXT_SUPP_RATES:
666 /* only process extended supported rate if data rate is 813 /* only process extended supported rate if data rate is
667 * already found. Data rate IE should come before 814 * already found. Data rate IE should come before
@@ -812,7 +959,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
812 /* SSID */ 959 /* SSID */
813 iwe.cmd = SIOCGIWESSID; 960 iwe.cmd = SIOCGIWESSID;
814 iwe.u.data.flags = 1; 961 iwe.u.data.flags = 1;
815 iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IW_ESSID_MAX_SIZE); 962 iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IEEE80211_MAX_SSID_LEN);
816 start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid); 963 start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
817 964
818 /* Mode */ 965 /* Mode */
@@ -1022,9 +1169,12 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1022 return -EAGAIN; 1169 return -EAGAIN;
1023 1170
1024 /* Update RSSI if current BSS is a locally created ad-hoc BSS */ 1171 /* Update RSSI if current BSS is a locally created ad-hoc BSS */
1025 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) 1172 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) {
1026 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, 1173 err = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
1027 CMD_OPTION_WAITFORRSP, 0, NULL); 1174 CMD_OPTION_WAITFORRSP, 0, NULL);
1175 if (err)
1176 goto out;
1177 }
1028 1178
1029 mutex_lock(&priv->lock); 1179 mutex_lock(&priv->lock);
1030 list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) { 1180 list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
@@ -1058,7 +1208,7 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1058 1208
1059 dwrq->length = (ev - extra); 1209 dwrq->length = (ev - extra);
1060 dwrq->flags = 0; 1210 dwrq->flags = 0;
1061 1211out:
1062 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err); 1212 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err);
1063 return err; 1213 return err;
1064} 1214}
@@ -1141,11 +1291,11 @@ static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
1141 /* The size of the TLV buffer is equal to the entire command response 1291 /* The size of the TLV buffer is equal to the entire command response
1142 * size (scanrespsize) minus the fixed fields (sizeof()'s), the 1292 * size (scanrespsize) minus the fixed fields (sizeof()'s), the
1143 * BSS Descriptions (bssdescriptsize as bytesLef) and the command 1293 * BSS Descriptions (bssdescriptsize as bytesLef) and the command
1144 * response header (S_DS_GEN) 1294 * response header (sizeof(struct cmd_header))
1145 */ 1295 */
1146 tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize) 1296 tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize)
1147 + sizeof(scanresp->nr_sets) 1297 + sizeof(scanresp->nr_sets)
1148 + S_DS_GEN); 1298 + sizeof(struct cmd_header));
1149 1299
1150 /* 1300 /*
1151 * Process each scan response returned (scanresp->nr_sets). Save 1301 * Process each scan response returned (scanresp->nr_sets). Save
diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h
index fab7d5d097fc..8fb1706d7526 100644
--- a/drivers/net/wireless/libertas/scan.h
+++ b/drivers/net/wireless/libertas/scan.h
@@ -9,8 +9,36 @@
9 9
10#include <net/iw_handler.h> 10#include <net/iw_handler.h>
11 11
12struct lbs_private;
13
12#define MAX_NETWORK_COUNT 128 14#define MAX_NETWORK_COUNT 128
13 15
16/** Chan-freq-TxPower mapping table*/
17struct chan_freq_power {
18 /** channel Number */
19 u16 channel;
20 /** frequency of this channel */
21 u32 freq;
22 /** Max allowed Tx power level */
23 u16 maxtxpower;
24 /** TRUE:channel unsupported; FLASE:supported*/
25 u8 unsupported;
26};
27
28/** region-band mapping table*/
29struct region_channel {
30 /** TRUE if this entry is valid */
31 u8 valid;
32 /** region code for US, Japan ... */
33 u8 region;
34 /** band B/G/A, used for BAND_CONFIG cmd */
35 u8 band;
36 /** Actual No. of elements in the array below */
37 u8 nrcfp;
38 /** chan-freq-txpower mapping table*/
39 struct chan_freq_power *CFP;
40};
41
14/** 42/**
15 * @brief Maximum number of channels that can be sent in a setuserscan ioctl 43 * @brief Maximum number of channels that can be sent in a setuserscan ioctl
16 */ 44 */
@@ -18,6 +46,8 @@
18 46
19int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len); 47int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len);
20 48
49int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
50
21int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid, 51int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
22 u8 ssid_len); 52 u8 ssid_len);
23 53
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 4c018f7a0a8d..5d7c011fe296 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -4,7 +4,7 @@
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
6 6
7#include "hostcmd.h" 7#include "host.h"
8#include "radiotap.h" 8#include "radiotap.h"
9#include "decl.h" 9#include "decl.h"
10#include "defs.h" 10#include "defs.h"
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 99905df65b25..3e72c86ceca8 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -5,8 +5,8 @@
5#define _LBS_TYPES_H_ 5#define _LBS_TYPES_H_
6 6
7#include <linux/if_ether.h> 7#include <linux/if_ether.h>
8#include <linux/ieee80211.h>
8#include <asm/byteorder.h> 9#include <asm/byteorder.h>
9#include <linux/wireless.h>
10 10
11struct ieee_ie_header { 11struct ieee_ie_header {
12 u8 id; 12 u8 id;
@@ -247,7 +247,7 @@ struct mrvl_meshie_val {
247 uint8_t active_metric_id; 247 uint8_t active_metric_id;
248 uint8_t mesh_capability; 248 uint8_t mesh_capability;
249 uint8_t mesh_id_len; 249 uint8_t mesh_id_len;
250 uint8_t mesh_id[IW_ESSID_MAX_SIZE]; 250 uint8_t mesh_id[IEEE80211_MAX_SSID_LEN];
251} __attribute__ ((packed)); 251} __attribute__ ((packed));
252 252
253struct mrvl_meshie { 253struct mrvl_meshie {
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 69dd19bf9558..a8eb9e1fcf36 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -45,6 +45,64 @@ static inline void lbs_cancel_association_work(struct lbs_private *priv)
45 priv->pending_assoc_req = NULL; 45 priv->pending_assoc_req = NULL;
46} 46}
47 47
48void lbs_send_disconnect_notification(struct lbs_private *priv)
49{
50 union iwreq_data wrqu;
51
52 memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
53 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
54 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
55}
56
57static void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
58{
59 union iwreq_data iwrq;
60 u8 buf[50];
61
62 lbs_deb_enter(LBS_DEB_WEXT);
63
64 memset(&iwrq, 0, sizeof(union iwreq_data));
65 memset(buf, 0, sizeof(buf));
66
67 snprintf(buf, sizeof(buf) - 1, "%s", str);
68
69 iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
70
71 /* Send Event to upper layer */
72 lbs_deb_wext("event indication string %s\n", (char *)buf);
73 lbs_deb_wext("event indication length %d\n", iwrq.data.length);
74 lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
75
76 wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
77
78 lbs_deb_leave(LBS_DEB_WEXT);
79}
80
81/**
82 * @brief This function handles MIC failure event.
83 *
84 * @param priv A pointer to struct lbs_private structure
85 * @para event the event id
86 * @return n/a
87 */
88void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
89{
90 char buf[50];
91
92 lbs_deb_enter(LBS_DEB_CMD);
93 memset(buf, 0, sizeof(buf));
94
95 sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
96
97 if (event == MACREG_INT_CODE_MIC_ERR_UNICAST)
98 strcat(buf, "unicast ");
99 else
100 strcat(buf, "multicast ");
101
102 lbs_send_iwevcustom_event(priv, buf);
103 lbs_deb_leave(LBS_DEB_CMD);
104}
105
48/** 106/**
49 * @brief Find the channel frequency power info with specific channel 107 * @brief Find the channel frequency power info with specific channel
50 * 108 *
@@ -65,8 +123,6 @@ struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
65 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) { 123 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
66 rc = &priv->region_channel[j]; 124 rc = &priv->region_channel[j];
67 125
68 if (priv->enable11d)
69 rc = &priv->universal_channel[j];
70 if (!rc->valid || !rc->CFP) 126 if (!rc->valid || !rc->CFP)
71 continue; 127 continue;
72 if (rc->band != band) 128 if (rc->band != band)
@@ -106,8 +162,6 @@ static struct chan_freq_power *find_cfp_by_band_and_freq(
106 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) { 162 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
107 rc = &priv->region_channel[j]; 163 rc = &priv->region_channel[j];
108 164
109 if (priv->enable11d)
110 rc = &priv->universal_channel[j];
111 if (!rc->valid || !rc->CFP) 165 if (!rc->valid || !rc->CFP)
112 continue; 166 continue;
113 if (rc->band != band) 167 if (rc->band != band)
@@ -168,12 +222,12 @@ static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
168 lbs_deb_enter(LBS_DEB_WEXT); 222 lbs_deb_enter(LBS_DEB_WEXT);
169 223
170 cfp = lbs_find_cfp_by_band_and_channel(priv, 0, 224 cfp = lbs_find_cfp_by_band_and_channel(priv, 0,
171 priv->curbssparams.channel); 225 priv->channel);
172 226
173 if (!cfp) { 227 if (!cfp) {
174 if (priv->curbssparams.channel) 228 if (priv->channel)
175 lbs_deb_wext("invalid channel %d\n", 229 lbs_deb_wext("invalid channel %d\n",
176 priv->curbssparams.channel); 230 priv->channel);
177 return -EINVAL; 231 return -EINVAL;
178 } 232 }
179 233
@@ -546,8 +600,6 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
546 struct chan_freq_power *cfp; 600 struct chan_freq_power *cfp;
547 u8 rates[MAX_RATES + 1]; 601 u8 rates[MAX_RATES + 1];
548 602
549 u8 flag = 0;
550
551 lbs_deb_enter(LBS_DEB_WEXT); 603 lbs_deb_enter(LBS_DEB_WEXT);
552 604
553 dwrq->length = sizeof(struct iw_range); 605 dwrq->length = sizeof(struct iw_range);
@@ -569,52 +621,21 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
569 621
570 range->scan_capa = IW_SCAN_CAPA_ESSID; 622 range->scan_capa = IW_SCAN_CAPA_ESSID;
571 623
572 if (priv->enable11d && 624 for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
573 (priv->connect_status == LBS_CONNECTED || 625 && (j < ARRAY_SIZE(priv->region_channel)); j++) {
574 priv->mesh_connect_status == LBS_CONNECTED)) { 626 cfp = priv->region_channel[j].CFP;
575 u8 chan_no;
576 u8 band;
577
578 struct parsed_region_chan_11d *parsed_region_chan =
579 &priv->parsed_region_chan;
580
581 if (parsed_region_chan == NULL) {
582 lbs_deb_wext("11d: parsed_region_chan is NULL\n");
583 goto out;
584 }
585 band = parsed_region_chan->band;
586 lbs_deb_wext("band %d, nr_char %d\n", band,
587 parsed_region_chan->nr_chan);
588
589 for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES) 627 for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
590 && (i < parsed_region_chan->nr_chan); i++) { 628 && priv->region_channel[j].valid
591 chan_no = parsed_region_chan->chanpwr[i].chan; 629 && cfp
592 lbs_deb_wext("chan_no %d\n", chan_no); 630 && (i < priv->region_channel[j].nrcfp); i++) {
593 range->freq[range->num_frequency].i = (long)chan_no; 631 range->freq[range->num_frequency].i =
632 (long)cfp->channel;
594 range->freq[range->num_frequency].m = 633 range->freq[range->num_frequency].m =
595 (long)lbs_chan_2_freq(chan_no) * 100000; 634 (long)cfp->freq * 100000;
596 range->freq[range->num_frequency].e = 1; 635 range->freq[range->num_frequency].e = 1;
636 cfp++;
597 range->num_frequency++; 637 range->num_frequency++;
598 } 638 }
599 flag = 1;
600 }
601 if (!flag) {
602 for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
603 && (j < ARRAY_SIZE(priv->region_channel)); j++) {
604 cfp = priv->region_channel[j].CFP;
605 for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
606 && priv->region_channel[j].valid
607 && cfp
608 && (i < priv->region_channel[j].nrcfp); i++) {
609 range->freq[range->num_frequency].i =
610 (long)cfp->channel;
611 range->freq[range->num_frequency].m =
612 (long)cfp->freq * 100000;
613 range->freq[range->num_frequency].e = 1;
614 cfp++;
615 range->num_frequency++;
616 }
617 }
618 } 639 }
619 640
620 lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n", 641 lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n",
@@ -699,7 +720,6 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
699 | IW_ENC_CAPA_CIPHER_CCMP; 720 | IW_ENC_CAPA_CIPHER_CCMP;
700 } 721 }
701 722
702out:
703 lbs_deb_leave(LBS_DEB_WEXT); 723 lbs_deb_leave(LBS_DEB_WEXT);
704 return 0; 724 return 0;
705} 725}
@@ -832,7 +852,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
832 u32 rssi_qual; 852 u32 rssi_qual;
833 u32 tx_qual; 853 u32 tx_qual;
834 u32 quality = 0; 854 u32 quality = 0;
835 int stats_valid = 0; 855 int ret, stats_valid = 0;
836 u8 rssi; 856 u8 rssi;
837 u32 tx_retries; 857 u32 tx_retries;
838 struct cmd_ds_802_11_get_log log; 858 struct cmd_ds_802_11_get_log log;
@@ -881,7 +901,9 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
881 901
882 memset(&log, 0, sizeof(log)); 902 memset(&log, 0, sizeof(log));
883 log.hdr.size = cpu_to_le16(sizeof(log)); 903 log.hdr.size = cpu_to_le16(sizeof(log));
884 lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log); 904 ret = lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
905 if (ret)
906 goto out;
885 907
886 tx_retries = le32_to_cpu(log.retry); 908 tx_retries = le32_to_cpu(log.retry);
887 909
@@ -909,8 +931,10 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
909 stats_valid = 1; 931 stats_valid = 1;
910 932
911 /* update stats asynchronously for future calls */ 933 /* update stats asynchronously for future calls */
912 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, 934 ret = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
913 0, 0, NULL); 935 0, 0, NULL);
936 if (ret)
937 lbs_pr_err("RSSI command failed\n");
914out: 938out:
915 if (!stats_valid) { 939 if (!stats_valid) {
916 priv->wstats.miss.beacon = 0; 940 priv->wstats.miss.beacon = 0;
@@ -1020,7 +1044,7 @@ static int lbs_mesh_set_freq(struct net_device *dev,
1020 goto out; 1044 goto out;
1021 } 1045 }
1022 1046
1023 if (fwrq->m != priv->curbssparams.channel) { 1047 if (fwrq->m != priv->channel) {
1024 lbs_deb_wext("mesh channel change forces eth disconnect\n"); 1048 lbs_deb_wext("mesh channel change forces eth disconnect\n");
1025 if (priv->mode == IW_MODE_INFRA) 1049 if (priv->mode == IW_MODE_INFRA)
1026 lbs_cmd_80211_deauthenticate(priv, 1050 lbs_cmd_80211_deauthenticate(priv,
@@ -2023,7 +2047,7 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
2023{ 2047{
2024 struct lbs_private *priv = dev->ml_priv; 2048 struct lbs_private *priv = dev->ml_priv;
2025 int ret = 0; 2049 int ret = 0;
2026 u8 ssid[IW_ESSID_MAX_SIZE]; 2050 u8 ssid[IEEE80211_MAX_SSID_LEN];
2027 u8 ssid_len = 0; 2051 u8 ssid_len = 0;
2028 struct assoc_request * assoc_req; 2052 struct assoc_request * assoc_req;
2029 int in_ssid_len = dwrq->length; 2053 int in_ssid_len = dwrq->length;
@@ -2037,7 +2061,7 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
2037 } 2061 }
2038 2062
2039 /* Check the size of the string */ 2063 /* Check the size of the string */
2040 if (in_ssid_len > IW_ESSID_MAX_SIZE) { 2064 if (in_ssid_len > IEEE80211_MAX_SSID_LEN) {
2041 ret = -E2BIG; 2065 ret = -E2BIG;
2042 goto out; 2066 goto out;
2043 } 2067 }
@@ -2068,7 +2092,7 @@ out:
2068 ret = -ENOMEM; 2092 ret = -ENOMEM;
2069 } else { 2093 } else {
2070 /* Copy the SSID to the association request */ 2094 /* Copy the SSID to the association request */
2071 memcpy(&assoc_req->ssid, &ssid, IW_ESSID_MAX_SIZE); 2095 memcpy(&assoc_req->ssid, &ssid, IEEE80211_MAX_SSID_LEN);
2072 assoc_req->ssid_len = ssid_len; 2096 assoc_req->ssid_len = ssid_len;
2073 set_bit(ASSOC_FLAG_SSID, &assoc_req->flags); 2097 set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
2074 lbs_postpone_association_work(priv); 2098 lbs_postpone_association_work(priv);
@@ -2119,7 +2143,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2119 } 2143 }
2120 2144
2121 /* Check the size of the string */ 2145 /* Check the size of the string */
2122 if (dwrq->length > IW_ESSID_MAX_SIZE) { 2146 if (dwrq->length > IEEE80211_MAX_SSID_LEN) {
2123 ret = -E2BIG; 2147 ret = -E2BIG;
2124 goto out; 2148 goto out;
2125 } 2149 }
@@ -2134,7 +2158,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2134 } 2158 }
2135 2159
2136 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 2160 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
2137 priv->curbssparams.channel); 2161 priv->channel);
2138 out: 2162 out:
2139 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 2163 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2140 return ret; 2164 return ret;
diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h
index 4c08db497606..7863baf7d234 100644
--- a/drivers/net/wireless/libertas/wext.h
+++ b/drivers/net/wireless/libertas/wext.h
@@ -4,7 +4,15 @@
4#ifndef _LBS_WEXT_H_ 4#ifndef _LBS_WEXT_H_
5#define _LBS_WEXT_H_ 5#define _LBS_WEXT_H_
6 6
7void lbs_send_disconnect_notification(struct lbs_private *priv);
8void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
9
7extern struct iw_handler_def lbs_handler_def; 10extern struct iw_handler_def lbs_handler_def;
8extern struct iw_handler_def mesh_handler_def; 11extern struct iw_handler_def mesh_handler_def;
9 12
13struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
14 struct lbs_private *priv,
15 u8 band,
16 u16 channel);
17
10#endif 18#endif
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 359652d35e63..404830f47ab2 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -60,8 +60,15 @@ static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
60/* Set priv->firmware type, determine firmware properties 60/* Set priv->firmware type, determine firmware properties
61 * This function can be called before we have registerred with netdev, 61 * This function can be called before we have registerred with netdev,
62 * so all errors go out with dev_* rather than printk 62 * so all errors go out with dev_* rather than printk
63 *
64 * If non-NULL stores a firmware description in fw_name.
65 * If non-NULL stores a HW version in hw_ver
66 *
67 * These are output via generic cfg80211 ethtool support.
63 */ 68 */
64int determine_fw_capabilities(struct orinoco_private *priv) 69int determine_fw_capabilities(struct orinoco_private *priv,
70 char *fw_name, size_t fw_name_len,
71 u32 *hw_ver)
65{ 72{
66 struct device *dev = priv->dev; 73 struct device *dev = priv->dev;
67 hermes_t *hw = &priv->hw; 74 hermes_t *hw = &priv->hw;
@@ -85,6 +92,12 @@ int determine_fw_capabilities(struct orinoco_private *priv)
85 dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n", 92 dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n",
86 nic_id.id, nic_id.variant, nic_id.major, nic_id.minor); 93 nic_id.id, nic_id.variant, nic_id.major, nic_id.minor);
87 94
95 if (hw_ver)
96 *hw_ver = (((nic_id.id & 0xff) << 24) |
97 ((nic_id.variant & 0xff) << 16) |
98 ((nic_id.major & 0xff) << 8) |
99 (nic_id.minor & 0xff));
100
88 priv->firmware_type = determine_firmware_type(&nic_id); 101 priv->firmware_type = determine_firmware_type(&nic_id);
89 102
90 /* Get the firmware version */ 103 /* Get the firmware version */
@@ -135,8 +148,9 @@ int determine_fw_capabilities(struct orinoco_private *priv)
135 case FIRMWARE_TYPE_AGERE: 148 case FIRMWARE_TYPE_AGERE:
136 /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout, 149 /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
137 ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */ 150 ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
138 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1, 151 if (fw_name)
139 "Lucent/Agere %d.%02d", sta_id.major, sta_id.minor); 152 snprintf(fw_name, fw_name_len, "Lucent/Agere %d.%02d",
153 sta_id.major, sta_id.minor);
140 154
141 firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor; 155 firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
142 156
@@ -185,8 +199,8 @@ int determine_fw_capabilities(struct orinoco_private *priv)
185 tmp[SYMBOL_MAX_VER_LEN] = '\0'; 199 tmp[SYMBOL_MAX_VER_LEN] = '\0';
186 } 200 }
187 201
188 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1, 202 if (fw_name)
189 "Symbol %s", tmp); 203 snprintf(fw_name, fw_name_len, "Symbol %s", tmp);
190 204
191 priv->has_ibss = (firmver >= 0x20000); 205 priv->has_ibss = (firmver >= 0x20000);
192 priv->has_wep = (firmver >= 0x15012); 206 priv->has_wep = (firmver >= 0x15012);
@@ -224,9 +238,9 @@ int determine_fw_capabilities(struct orinoco_private *priv)
224 * different and less well tested */ 238 * different and less well tested */
225 /* D-Link MAC : 00:40:05:* */ 239 /* D-Link MAC : 00:40:05:* */
226 /* Addtron MAC : 00:90:D1:* */ 240 /* Addtron MAC : 00:90:D1:* */
227 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1, 241 if (fw_name)
228 "Intersil %d.%d.%d", sta_id.major, sta_id.minor, 242 snprintf(fw_name, fw_name_len, "Intersil %d.%d.%d",
229 sta_id.variant); 243 sta_id.major, sta_id.minor, sta_id.variant);
230 244
231 firmver = ((unsigned long)sta_id.major << 16) | 245 firmver = ((unsigned long)sta_id.major << 16) |
232 ((unsigned long)sta_id.minor << 8) | sta_id.variant; 246 ((unsigned long)sta_id.minor << 8) | sta_id.variant;
@@ -245,7 +259,8 @@ int determine_fw_capabilities(struct orinoco_private *priv)
245 } 259 }
246 break; 260 break;
247 } 261 }
248 dev_info(dev, "Firmware determined as %s\n", priv->fw_name); 262 if (fw_name)
263 dev_info(dev, "Firmware determined as %s\n", fw_name);
249 264
250 return 0; 265 return 0;
251} 266}
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 8df6e8752be6..e2f7fdc4d45a 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -24,7 +24,8 @@
24struct orinoco_private; 24struct orinoco_private;
25struct dev_addr_list; 25struct dev_addr_list;
26 26
27int determine_fw_capabilities(struct orinoco_private *priv); 27int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
28 size_t fw_name_len, u32 *hw_ver);
28int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr); 29int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr);
29int orinoco_hw_allocate_fid(struct orinoco_private *priv); 30int orinoco_hw_allocate_fid(struct orinoco_private *priv);
30int orinoco_get_bitratemode(int bitrate, int automatic); 31int orinoco_get_bitratemode(int bitrate, int automatic);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 5fdc59c594f2..753a1804eee7 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -83,7 +83,6 @@
83#include <linux/device.h> 83#include <linux/device.h>
84#include <linux/netdevice.h> 84#include <linux/netdevice.h>
85#include <linux/etherdevice.h> 85#include <linux/etherdevice.h>
86#include <linux/ethtool.h>
87#include <linux/suspend.h> 86#include <linux/suspend.h>
88#include <linux/if_arp.h> 87#include <linux/if_arp.h>
89#include <linux/wireless.h> 88#include <linux/wireless.h>
@@ -162,8 +161,6 @@ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
162 | HERMES_EV_WTERR | HERMES_EV_INFO \ 161 | HERMES_EV_WTERR | HERMES_EV_INFO \
163 | HERMES_EV_INFDROP) 162 | HERMES_EV_INFDROP)
164 163
165static const struct ethtool_ops orinoco_ethtool_ops;
166
167/********************************************************************/ 164/********************************************************************/
168/* Data types */ 165/* Data types */
169/********************************************************************/ 166/********************************************************************/
@@ -1994,7 +1991,9 @@ int orinoco_init(struct orinoco_private *priv)
1994 goto out; 1991 goto out;
1995 } 1992 }
1996 1993
1997 err = determine_fw_capabilities(priv); 1994 err = determine_fw_capabilities(priv, wiphy->fw_version,
1995 sizeof(wiphy->fw_version),
1996 &wiphy->hw_version);
1998 if (err != 0) { 1997 if (err != 0) {
1999 dev_err(dev, "Incompatible firmware, aborting\n"); 1998 dev_err(dev, "Incompatible firmware, aborting\n");
2000 goto out; 1999 goto out;
@@ -2010,7 +2009,9 @@ int orinoco_init(struct orinoco_private *priv)
2010 priv->do_fw_download = 0; 2009 priv->do_fw_download = 0;
2011 2010
2012 /* Check firmware version again */ 2011 /* Check firmware version again */
2013 err = determine_fw_capabilities(priv); 2012 err = determine_fw_capabilities(priv, wiphy->fw_version,
2013 sizeof(wiphy->fw_version),
2014 &wiphy->hw_version);
2014 if (err != 0) { 2015 if (err != 0) {
2015 dev_err(dev, "Incompatible firmware, aborting\n"); 2016 dev_err(dev, "Incompatible firmware, aborting\n");
2016 goto out; 2017 goto out;
@@ -2212,7 +2213,6 @@ int orinoco_if_add(struct orinoco_private *priv,
2212 dev->ieee80211_ptr = wdev; 2213 dev->ieee80211_ptr = wdev;
2213 dev->netdev_ops = &orinoco_netdev_ops; 2214 dev->netdev_ops = &orinoco_netdev_ops;
2214 dev->watchdog_timeo = HZ; /* 1 second timeout */ 2215 dev->watchdog_timeo = HZ; /* 1 second timeout */
2215 dev->ethtool_ops = &orinoco_ethtool_ops;
2216 dev->wireless_handlers = &orinoco_handler_def; 2216 dev->wireless_handlers = &orinoco_handler_def;
2217#ifdef WIRELESS_SPY 2217#ifdef WIRELESS_SPY
2218 dev->wireless_data = &priv->wireless_data; 2218 dev->wireless_data = &priv->wireless_data;
@@ -2349,27 +2349,6 @@ void orinoco_down(struct orinoco_private *priv)
2349} 2349}
2350EXPORT_SYMBOL(orinoco_down); 2350EXPORT_SYMBOL(orinoco_down);
2351 2351
2352static void orinoco_get_drvinfo(struct net_device *dev,
2353 struct ethtool_drvinfo *info)
2354{
2355 struct orinoco_private *priv = ndev_priv(dev);
2356
2357 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
2358 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
2359 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1);
2360 if (dev->dev.parent)
2361 strncpy(info->bus_info, dev_name(dev->dev.parent),
2362 sizeof(info->bus_info) - 1);
2363 else
2364 snprintf(info->bus_info, sizeof(info->bus_info) - 1,
2365 "PCMCIA %p", priv->hw.iobase);
2366}
2367
2368static const struct ethtool_ops orinoco_ethtool_ops = {
2369 .get_drvinfo = orinoco_get_drvinfo,
2370 .get_link = ethtool_op_get_link,
2371};
2372
2373/********************************************************************/ 2352/********************************************************************/
2374/* Module initialization */ 2353/* Module initialization */
2375/********************************************************************/ 2354/********************************************************************/
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 9ac6f1dda4b0..665ef56f8382 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -93,7 +93,6 @@ struct orinoco_private {
93 93
94 /* Capabilities of the hardware/firmware */ 94 /* Capabilities of the hardware/firmware */
95 fwtype_t firmware_type; 95 fwtype_t firmware_type;
96 char fw_name[32];
97 int ibss_port; 96 int ibss_port;
98 int nicbuf_size; 97 int nicbuf_size;
99 u16 channel_mask; 98 u16 channel_mask;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ed1f997e3521..390c0c7b3ac2 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -53,6 +53,32 @@ config RT61PCI
53 53
54 When compiled as a module, this driver will be called rt61pci. 54 When compiled as a module, this driver will be called rt61pci.
55 55
56config RT2800PCI_PCI
57 tristate
58 depends on PCI
59 default y
60
61config RT2800PCI_SOC
62 tristate
63 depends on RALINK_RT288X || RALINK_RT305X
64 default y
65
66config RT2800PCI
67 tristate "Ralink rt2800 (PCI/PCMCIA) support"
68 depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL
69 select RT2X00_LIB_PCI if RT2800PCI_PCI
70 select RT2X00_LIB_SOC if RT2800PCI_SOC
71 select RT2X00_LIB_HT
72 select RT2X00_LIB_FIRMWARE
73 select RT2X00_LIB_CRYPTO
74 select CRC_CCITT
75 select EEPROM_93CX6
76 ---help---
77 This adds support for rt2800 wireless chipset family.
78 Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052
79
80 When compiled as a module, this driver will be called "rt2800pci.ko".
81
56config RT2500USB 82config RT2500USB
57 tristate "Ralink rt2500 (USB) support" 83 tristate "Ralink rt2500 (USB) support"
58 depends on USB 84 depends on USB
@@ -95,6 +121,10 @@ config RT2X00_LIB_PCI
95 tristate 121 tristate
96 select RT2X00_LIB 122 select RT2X00_LIB
97 123
124config RT2X00_LIB_SOC
125 tristate
126 select RT2X00_LIB
127
98config RT2X00_LIB_USB 128config RT2X00_LIB_USB
99 tristate 129 tristate
100 select RT2X00_LIB 130 select RT2X00_LIB
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 13043ea97667..912f5f67e159 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -11,10 +11,12 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_HT) += rt2x00ht.o
11 11
12obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o 12obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
13obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o 13obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
14obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
14obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o 15obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
15obj-$(CONFIG_RT2400PCI) += rt2400pci.o 16obj-$(CONFIG_RT2400PCI) += rt2400pci.o
16obj-$(CONFIG_RT2500PCI) += rt2500pci.o 17obj-$(CONFIG_RT2500PCI) += rt2500pci.o
17obj-$(CONFIG_RT61PCI) += rt61pci.o 18obj-$(CONFIG_RT61PCI) += rt61pci.o
19obj-$(CONFIG_RT2800PCI) += rt2800pci.o
18obj-$(CONFIG_RT2500USB) += rt2500usb.o 20obj-$(CONFIG_RT2500USB) += rt2500usb.o
19obj-$(CONFIG_RT73USB) += rt73usb.o 21obj-$(CONFIG_RT73USB) += rt73usb.o
20obj-$(CONFIG_RT2800USB) += rt2800usb.o 22obj-$(CONFIG_RT2800USB) += rt2800usb.o
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
new file mode 100644
index 000000000000..be81788b80c7
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -0,0 +1,3323 @@
1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2800pci
23 Abstract: rt2800pci device specific routines.
24 Supported chipsets: RT2800E & RT2800ED.
25 */
26
27#include <linux/crc-ccitt.h>
28#include <linux/delay.h>
29#include <linux/etherdevice.h>
30#include <linux/init.h>
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/platform_device.h>
35#include <linux/eeprom_93cx6.h>
36
37#include "rt2x00.h"
38#include "rt2x00pci.h"
39#include "rt2x00soc.h"
40#include "rt2800pci.h"
41
42#ifdef CONFIG_RT2800PCI_PCI_MODULE
43#define CONFIG_RT2800PCI_PCI
44#endif
45
46#ifdef CONFIG_RT2800PCI_WISOC_MODULE
47#define CONFIG_RT2800PCI_WISOC
48#endif
49
50/*
51 * Allow hardware encryption to be disabled.
52 */
53static int modparam_nohwcrypt = 1;
54module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
55MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
56
57/*
58 * Register access.
59 * BBP and RF register require indirect register access,
60 * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this.
61 * These indirect registers work with busy bits,
62 * and we will try maximal REGISTER_BUSY_COUNT times to access
63 * the register while taking a REGISTER_BUSY_DELAY us delay
64 * between each attampt. When the busy bit is still set at that time,
65 * the access attempt is considered to have failed,
66 * and we will print an error.
67 */
68#define WAIT_FOR_BBP(__dev, __reg) \
69 rt2x00pci_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
70#define WAIT_FOR_RFCSR(__dev, __reg) \
71 rt2x00pci_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
72#define WAIT_FOR_RF(__dev, __reg) \
73 rt2x00pci_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
74#define WAIT_FOR_MCU(__dev, __reg) \
75 rt2x00pci_regbusy_read((__dev), H2M_MAILBOX_CSR, \
76 H2M_MAILBOX_CSR_OWNER, (__reg))
77
78static void rt2800pci_bbp_write(struct rt2x00_dev *rt2x00dev,
79 const unsigned int word, const u8 value)
80{
81 u32 reg;
82
83 mutex_lock(&rt2x00dev->csr_mutex);
84
85 /*
86 * Wait until the BBP becomes available, afterwards we
87 * can safely write the new data into the register.
88 */
89 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
90 reg = 0;
91 rt2x00_set_field32(&reg, BBP_CSR_CFG_VALUE, value);
92 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
93 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
94 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
95 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
96
97 rt2x00pci_register_write(rt2x00dev, BBP_CSR_CFG, reg);
98 }
99
100 mutex_unlock(&rt2x00dev->csr_mutex);
101}
102
103static void rt2800pci_bbp_read(struct rt2x00_dev *rt2x00dev,
104 const unsigned int word, u8 *value)
105{
106 u32 reg;
107
108 mutex_lock(&rt2x00dev->csr_mutex);
109
110 /*
111 * Wait until the BBP becomes available, afterwards we
112 * can safely write the read request into the register.
113 * After the data has been written, we wait until hardware
114 * returns the correct value, if at any time the register
115 * doesn't become available in time, reg will be 0xffffffff
116 * which means we return 0xff to the caller.
117 */
118 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
119 reg = 0;
120 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
121 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
122 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
123 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
124
125 rt2x00pci_register_write(rt2x00dev, BBP_CSR_CFG, reg);
126
127 WAIT_FOR_BBP(rt2x00dev, &reg);
128 }
129
130 *value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
131
132 mutex_unlock(&rt2x00dev->csr_mutex);
133}
134
135static void rt2800pci_rfcsr_write(struct rt2x00_dev *rt2x00dev,
136 const unsigned int word, const u8 value)
137{
138 u32 reg;
139
140 mutex_lock(&rt2x00dev->csr_mutex);
141
142 /*
143 * Wait until the RFCSR becomes available, afterwards we
144 * can safely write the new data into the register.
145 */
146 if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
147 reg = 0;
148 rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
149 rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
150 rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
151 rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
152
153 rt2x00pci_register_write(rt2x00dev, RF_CSR_CFG, reg);
154 }
155
156 mutex_unlock(&rt2x00dev->csr_mutex);
157}
158
159static void rt2800pci_rfcsr_read(struct rt2x00_dev *rt2x00dev,
160 const unsigned int word, u8 *value)
161{
162 u32 reg;
163
164 mutex_lock(&rt2x00dev->csr_mutex);
165
166 /*
167 * Wait until the RFCSR becomes available, afterwards we
168 * can safely write the read request into the register.
169 * After the data has been written, we wait until hardware
170 * returns the correct value, if at any time the register
171 * doesn't become available in time, reg will be 0xffffffff
172 * which means we return 0xff to the caller.
173 */
174 if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
175 reg = 0;
176 rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
177 rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
178 rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
179
180 rt2x00pci_register_write(rt2x00dev, RF_CSR_CFG, reg);
181
182 WAIT_FOR_RFCSR(rt2x00dev, &reg);
183 }
184
185 *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
186
187 mutex_unlock(&rt2x00dev->csr_mutex);
188}
189
190static void rt2800pci_rf_write(struct rt2x00_dev *rt2x00dev,
191 const unsigned int word, const u32 value)
192{
193 u32 reg;
194
195 mutex_lock(&rt2x00dev->csr_mutex);
196
197 /*
198 * Wait until the RF becomes available, afterwards we
199 * can safely write the new data into the register.
200 */
201 if (WAIT_FOR_RF(rt2x00dev, &reg)) {
202 reg = 0;
203 rt2x00_set_field32(&reg, RF_CSR_CFG0_REG_VALUE_BW, value);
204 rt2x00_set_field32(&reg, RF_CSR_CFG0_STANDBYMODE, 0);
205 rt2x00_set_field32(&reg, RF_CSR_CFG0_SEL, 0);
206 rt2x00_set_field32(&reg, RF_CSR_CFG0_BUSY, 1);
207
208 rt2x00pci_register_write(rt2x00dev, RF_CSR_CFG0, reg);
209 rt2x00_rf_write(rt2x00dev, word, value);
210 }
211
212 mutex_unlock(&rt2x00dev->csr_mutex);
213}
214
215static void rt2800pci_mcu_request(struct rt2x00_dev *rt2x00dev,
216 const u8 command, const u8 token,
217 const u8 arg0, const u8 arg1)
218{
219 u32 reg;
220
221 /*
222 * RT2880 and RT3052 don't support MCU requests.
223 */
224 if (rt2x00_rt(&rt2x00dev->chip, RT2880) ||
225 rt2x00_rt(&rt2x00dev->chip, RT3052))
226 return;
227
228 mutex_lock(&rt2x00dev->csr_mutex);
229
230 /*
231 * Wait until the MCU becomes available, afterwards we
232 * can safely write the new data into the register.
233 */
234 if (WAIT_FOR_MCU(rt2x00dev, &reg)) {
235 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1);
236 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
237 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
238 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
239 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg);
240
241 reg = 0;
242 rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
243 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg);
244 }
245
246 mutex_unlock(&rt2x00dev->csr_mutex);
247}
248
249static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
250{
251 unsigned int i;
252 u32 reg;
253
254 for (i = 0; i < 200; i++) {
255 rt2x00pci_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
256
257 if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) ||
258 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) ||
259 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD2) == token) ||
260 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD3) == token))
261 break;
262
263 udelay(REGISTER_BUSY_DELAY);
264 }
265
266 if (i == 200)
267 ERROR(rt2x00dev, "MCU request failed, no response from hardware\n");
268
269 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
270 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
271}
272
273#ifdef CONFIG_RT2800PCI_WISOC
274static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
275{
276 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
277
278 memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
279}
280#else
281static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
282{
283}
284#endif /* CONFIG_RT2800PCI_WISOC */
285
286#ifdef CONFIG_RT2800PCI_PCI
287static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
288{
289 struct rt2x00_dev *rt2x00dev = eeprom->data;
290 u32 reg;
291
292 rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg);
293
294 eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN);
295 eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT);
296 eeprom->reg_data_clock =
297 !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK);
298 eeprom->reg_chip_select =
299 !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT);
300}
301
302static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
303{
304 struct rt2x00_dev *rt2x00dev = eeprom->data;
305 u32 reg = 0;
306
307 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in);
308 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out);
309 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK,
310 !!eeprom->reg_data_clock);
311 rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT,
312 !!eeprom->reg_chip_select);
313
314 rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg);
315}
316
317static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
318{
319 struct eeprom_93cx6 eeprom;
320 u32 reg;
321
322 rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg);
323
324 eeprom.data = rt2x00dev;
325 eeprom.register_read = rt2800pci_eepromregister_read;
326 eeprom.register_write = rt2800pci_eepromregister_write;
327 eeprom.width = !rt2x00_get_field32(reg, E2PROM_CSR_TYPE) ?
328 PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66;
329 eeprom.reg_data_in = 0;
330 eeprom.reg_data_out = 0;
331 eeprom.reg_data_clock = 0;
332 eeprom.reg_chip_select = 0;
333
334 eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom,
335 EEPROM_SIZE / sizeof(u16));
336}
337
338static void rt2800pci_efuse_read(struct rt2x00_dev *rt2x00dev,
339 unsigned int i)
340{
341 u32 reg;
342
343 rt2x00pci_register_read(rt2x00dev, EFUSE_CTRL, &reg);
344 rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i);
345 rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0);
346 rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1);
347 rt2x00pci_register_write(rt2x00dev, EFUSE_CTRL, reg);
348
349 /* Wait until the EEPROM has been loaded */
350 rt2x00pci_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
351
352 /* Apparently the data is read from end to start */
353 rt2x00pci_register_read(rt2x00dev, EFUSE_DATA3,
354 (u32 *)&rt2x00dev->eeprom[i]);
355 rt2x00pci_register_read(rt2x00dev, EFUSE_DATA2,
356 (u32 *)&rt2x00dev->eeprom[i + 2]);
357 rt2x00pci_register_read(rt2x00dev, EFUSE_DATA1,
358 (u32 *)&rt2x00dev->eeprom[i + 4]);
359 rt2x00pci_register_read(rt2x00dev, EFUSE_DATA0,
360 (u32 *)&rt2x00dev->eeprom[i + 6]);
361}
362
363static void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
364{
365 unsigned int i;
366
367 for (i = 0; i < EEPROM_SIZE / sizeof(u16); i += 8)
368 rt2800pci_efuse_read(rt2x00dev, i);
369}
370#else
371static inline void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
372{
373}
374
375static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
376{
377}
378#endif /* CONFIG_RT2800PCI_PCI */
379
380#ifdef CONFIG_RT2X00_LIB_DEBUGFS
381static const struct rt2x00debug rt2800pci_rt2x00debug = {
382 .owner = THIS_MODULE,
383 .csr = {
384 .read = rt2x00pci_register_read,
385 .write = rt2x00pci_register_write,
386 .flags = RT2X00DEBUGFS_OFFSET,
387 .word_base = CSR_REG_BASE,
388 .word_size = sizeof(u32),
389 .word_count = CSR_REG_SIZE / sizeof(u32),
390 },
391 .eeprom = {
392 .read = rt2x00_eeprom_read,
393 .write = rt2x00_eeprom_write,
394 .word_base = EEPROM_BASE,
395 .word_size = sizeof(u16),
396 .word_count = EEPROM_SIZE / sizeof(u16),
397 },
398 .bbp = {
399 .read = rt2800pci_bbp_read,
400 .write = rt2800pci_bbp_write,
401 .word_base = BBP_BASE,
402 .word_size = sizeof(u8),
403 .word_count = BBP_SIZE / sizeof(u8),
404 },
405 .rf = {
406 .read = rt2x00_rf_read,
407 .write = rt2800pci_rf_write,
408 .word_base = RF_BASE,
409 .word_size = sizeof(u32),
410 .word_count = RF_SIZE / sizeof(u32),
411 },
412};
413#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
414
415static int rt2800pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
416{
417 u32 reg;
418
419 rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
420 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
421}
422
423#ifdef CONFIG_RT2X00_LIB_LEDS
424static void rt2800pci_brightness_set(struct led_classdev *led_cdev,
425 enum led_brightness brightness)
426{
427 struct rt2x00_led *led =
428 container_of(led_cdev, struct rt2x00_led, led_dev);
429 unsigned int enabled = brightness != LED_OFF;
430 unsigned int bg_mode =
431 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
432 unsigned int polarity =
433 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
434 EEPROM_FREQ_LED_POLARITY);
435 unsigned int ledmode =
436 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
437 EEPROM_FREQ_LED_MODE);
438
439 if (led->type == LED_TYPE_RADIO) {
440 rt2800pci_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
441 enabled ? 0x20 : 0);
442 } else if (led->type == LED_TYPE_ASSOC) {
443 rt2800pci_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
444 enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
445 } else if (led->type == LED_TYPE_QUALITY) {
446 /*
447 * The brightness is divided into 6 levels (0 - 5),
448 * The specs tell us the following levels:
449 * 0, 1 ,3, 7, 15, 31
450 * to determine the level in a simple way we can simply
451 * work with bitshifting:
452 * (1 << level) - 1
453 */
454 rt2800pci_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
455 (1 << brightness / (LED_FULL / 6)) - 1,
456 polarity);
457 }
458}
459
460static int rt2800pci_blink_set(struct led_classdev *led_cdev,
461 unsigned long *delay_on,
462 unsigned long *delay_off)
463{
464 struct rt2x00_led *led =
465 container_of(led_cdev, struct rt2x00_led, led_dev);
466 u32 reg;
467
468 rt2x00pci_register_read(led->rt2x00dev, LED_CFG, &reg);
469 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
470 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
471 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
472 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
473 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
474 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
475 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
476 rt2x00pci_register_write(led->rt2x00dev, LED_CFG, reg);
477
478 return 0;
479}
480
481static void rt2800pci_init_led(struct rt2x00_dev *rt2x00dev,
482 struct rt2x00_led *led,
483 enum led_type type)
484{
485 led->rt2x00dev = rt2x00dev;
486 led->type = type;
487 led->led_dev.brightness_set = rt2800pci_brightness_set;
488 led->led_dev.blink_set = rt2800pci_blink_set;
489 led->flags = LED_INITIALIZED;
490}
491#endif /* CONFIG_RT2X00_LIB_LEDS */
492
493/*
494 * Configuration handlers.
495 */
496static void rt2800pci_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
497 struct rt2x00lib_crypto *crypto,
498 struct ieee80211_key_conf *key)
499{
500 struct mac_wcid_entry wcid_entry;
501 struct mac_iveiv_entry iveiv_entry;
502 u32 offset;
503 u32 reg;
504
505 offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
506
507 rt2x00pci_register_read(rt2x00dev, offset, &reg);
508 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
509 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
510 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
511 (crypto->cmd == SET_KEY) * crypto->cipher);
512 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
513 (crypto->cmd == SET_KEY) * crypto->bssidx);
514 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
515 rt2x00pci_register_write(rt2x00dev, offset, reg);
516
517 offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
518
519 memset(&iveiv_entry, 0, sizeof(iveiv_entry));
520 if ((crypto->cipher == CIPHER_TKIP) ||
521 (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
522 (crypto->cipher == CIPHER_AES))
523 iveiv_entry.iv[3] |= 0x20;
524 iveiv_entry.iv[3] |= key->keyidx << 6;
525 rt2x00pci_register_multiwrite(rt2x00dev, offset,
526 &iveiv_entry, sizeof(iveiv_entry));
527
528 offset = MAC_WCID_ENTRY(key->hw_key_idx);
529
530 memset(&wcid_entry, 0, sizeof(wcid_entry));
531 if (crypto->cmd == SET_KEY)
532 memcpy(&wcid_entry, crypto->address, ETH_ALEN);
533 rt2x00pci_register_multiwrite(rt2x00dev, offset,
534 &wcid_entry, sizeof(wcid_entry));
535}
536
537static int rt2800pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
538 struct rt2x00lib_crypto *crypto,
539 struct ieee80211_key_conf *key)
540{
541 struct hw_key_entry key_entry;
542 struct rt2x00_field32 field;
543 u32 offset;
544 u32 reg;
545
546 if (crypto->cmd == SET_KEY) {
547 key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx;
548
549 memcpy(key_entry.key, crypto->key,
550 sizeof(key_entry.key));
551 memcpy(key_entry.tx_mic, crypto->tx_mic,
552 sizeof(key_entry.tx_mic));
553 memcpy(key_entry.rx_mic, crypto->rx_mic,
554 sizeof(key_entry.rx_mic));
555
556 offset = SHARED_KEY_ENTRY(key->hw_key_idx);
557 rt2x00pci_register_multiwrite(rt2x00dev, offset,
558 &key_entry, sizeof(key_entry));
559 }
560
561 /*
562 * The cipher types are stored over multiple registers
563 * starting with SHARED_KEY_MODE_BASE each word will have
564 * 32 bits and contains the cipher types for 2 bssidx each.
565 * Using the correct defines correctly will cause overhead,
566 * so just calculate the correct offset.
567 */
568 field.bit_offset = 4 * (key->hw_key_idx % 8);
569 field.bit_mask = 0x7 << field.bit_offset;
570
571 offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
572
573 rt2x00pci_register_read(rt2x00dev, offset, &reg);
574 rt2x00_set_field32(&reg, field,
575 (crypto->cmd == SET_KEY) * crypto->cipher);
576 rt2x00pci_register_write(rt2x00dev, offset, reg);
577
578 /*
579 * Update WCID information
580 */
581 rt2800pci_config_wcid_attr(rt2x00dev, crypto, key);
582
583 return 0;
584}
585
586static int rt2800pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
587 struct rt2x00lib_crypto *crypto,
588 struct ieee80211_key_conf *key)
589{
590 struct hw_key_entry key_entry;
591 u32 offset;
592
593 if (crypto->cmd == SET_KEY) {
594 /*
595 * 1 pairwise key is possible per AID, this means that the AID
596 * equals our hw_key_idx. Make sure the WCID starts _after_ the
597 * last possible shared key entry.
598 */
599 if (crypto->aid > (256 - 32))
600 return -ENOSPC;
601
602 key->hw_key_idx = 32 + crypto->aid;
603
604
605 memcpy(key_entry.key, crypto->key,
606 sizeof(key_entry.key));
607 memcpy(key_entry.tx_mic, crypto->tx_mic,
608 sizeof(key_entry.tx_mic));
609 memcpy(key_entry.rx_mic, crypto->rx_mic,
610 sizeof(key_entry.rx_mic));
611
612 offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
613 rt2x00pci_register_multiwrite(rt2x00dev, offset,
614 &key_entry, sizeof(key_entry));
615 }
616
617 /*
618 * Update WCID information
619 */
620 rt2800pci_config_wcid_attr(rt2x00dev, crypto, key);
621
622 return 0;
623}
624
625static void rt2800pci_config_filter(struct rt2x00_dev *rt2x00dev,
626 const unsigned int filter_flags)
627{
628 u32 reg;
629
630 /*
631 * Start configuration steps.
632 * Note that the version error will always be dropped
633 * and broadcast frames will always be accepted since
634 * there is no filter for it at this time.
635 */
636 rt2x00pci_register_read(rt2x00dev, RX_FILTER_CFG, &reg);
637 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CRC_ERROR,
638 !(filter_flags & FIF_FCSFAIL));
639 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
640 !(filter_flags & FIF_PLCPFAIL));
641 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
642 !(filter_flags & FIF_PROMISC_IN_BSS));
643 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
644 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
645 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
646 !(filter_flags & FIF_ALLMULTI));
647 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BROADCAST, 0);
648 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_DUPLICATE, 1);
649 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END_ACK,
650 !(filter_flags & FIF_CONTROL));
651 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END,
652 !(filter_flags & FIF_CONTROL));
653 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_ACK,
654 !(filter_flags & FIF_CONTROL));
655 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CTS,
656 !(filter_flags & FIF_CONTROL));
657 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_RTS,
658 !(filter_flags & FIF_CONTROL));
659 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
660 !(filter_flags & FIF_PSPOLL));
661 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
662 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
663 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
664 !(filter_flags & FIF_CONTROL));
665 rt2x00pci_register_write(rt2x00dev, RX_FILTER_CFG, reg);
666}
667
668static void rt2800pci_config_intf(struct rt2x00_dev *rt2x00dev,
669 struct rt2x00_intf *intf,
670 struct rt2x00intf_conf *conf,
671 const unsigned int flags)
672{
673 unsigned int beacon_base;
674 u32 reg;
675
676 if (flags & CONFIG_UPDATE_TYPE) {
677 /*
678 * Clear current synchronisation setup.
679 * For the Beacon base registers we only need to clear
680 * the first byte since that byte contains the VALID and OWNER
681 * bits which (when set to 0) will invalidate the entire beacon.
682 */
683 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
684 rt2x00pci_register_write(rt2x00dev, beacon_base, 0);
685
686 /*
687 * Enable synchronisation.
688 */
689 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
690 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
691 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
692 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
693 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
694 }
695
696 if (flags & CONFIG_UPDATE_MAC) {
697 reg = le32_to_cpu(conf->mac[1]);
698 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
699 conf->mac[1] = cpu_to_le32(reg);
700
701 rt2x00pci_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
702 conf->mac, sizeof(conf->mac));
703 }
704
705 if (flags & CONFIG_UPDATE_BSSID) {
706 reg = le32_to_cpu(conf->bssid[1]);
707 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
708 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
709 conf->bssid[1] = cpu_to_le32(reg);
710
711 rt2x00pci_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
712 conf->bssid, sizeof(conf->bssid));
713 }
714}
715
716static void rt2800pci_config_erp(struct rt2x00_dev *rt2x00dev,
717 struct rt2x00lib_erp *erp)
718{
719 u32 reg;
720
721 rt2x00pci_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
722 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
723 rt2x00pci_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
724
725 rt2x00pci_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
726 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
727 !!erp->short_preamble);
728 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
729 !!erp->short_preamble);
730 rt2x00pci_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
731
732 rt2x00pci_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
733 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
734 erp->cts_protection ? 2 : 0);
735 rt2x00pci_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
736
737 rt2x00pci_register_write(rt2x00dev, LEGACY_BASIC_RATE,
738 erp->basic_rates);
739 rt2x00pci_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
740
741 rt2x00pci_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
742 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
743 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
744 rt2x00pci_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
745
746 rt2x00pci_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
747 rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
748 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
749 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
750 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
751 rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
752 rt2x00pci_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
753
754 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
755 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
756 erp->beacon_int * 16);
757 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
758}
759
760static void rt2800pci_config_ant(struct rt2x00_dev *rt2x00dev,
761 struct antenna_setup *ant)
762{
763 u8 r1;
764 u8 r3;
765
766 rt2800pci_bbp_read(rt2x00dev, 1, &r1);
767 rt2800pci_bbp_read(rt2x00dev, 3, &r3);
768
769 /*
770 * Configure the TX antenna.
771 */
772 switch ((int)ant->tx) {
773 case 1:
774 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
775 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
776 break;
777 case 2:
778 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
779 break;
780 case 3:
781 /* Do nothing */
782 break;
783 }
784
785 /*
786 * Configure the RX antenna.
787 */
788 switch ((int)ant->rx) {
789 case 1:
790 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
791 break;
792 case 2:
793 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
794 break;
795 case 3:
796 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
797 break;
798 }
799
800 rt2800pci_bbp_write(rt2x00dev, 3, r3);
801 rt2800pci_bbp_write(rt2x00dev, 1, r1);
802}
803
804static void rt2800pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
805 struct rt2x00lib_conf *libconf)
806{
807 u16 eeprom;
808 short lna_gain;
809
810 if (libconf->rf.channel <= 14) {
811 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
812 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
813 } else if (libconf->rf.channel <= 64) {
814 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
815 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
816 } else if (libconf->rf.channel <= 128) {
817 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
818 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
819 } else {
820 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
821 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
822 }
823
824 rt2x00dev->lna_gain = lna_gain;
825}
826
827static void rt2800pci_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
828 struct ieee80211_conf *conf,
829 struct rf_channel *rf,
830 struct channel_info *info)
831{
832 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
833
834 if (rt2x00dev->default_ant.tx == 1)
835 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
836
837 if (rt2x00dev->default_ant.rx == 1) {
838 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
839 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
840 } else if (rt2x00dev->default_ant.rx == 2)
841 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
842
843 if (rf->channel > 14) {
844 /*
845 * When TX power is below 0, we should increase it by 7 to
846 * make it a positive value (Minumum value is -7).
847 * However this means that values between 0 and 7 have
848 * double meaning, and we should set a 7DBm boost flag.
849 */
850 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
851 (info->tx_power1 >= 0));
852
853 if (info->tx_power1 < 0)
854 info->tx_power1 += 7;
855
856 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
857 TXPOWER_A_TO_DEV(info->tx_power1));
858
859 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
860 (info->tx_power2 >= 0));
861
862 if (info->tx_power2 < 0)
863 info->tx_power2 += 7;
864
865 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
866 TXPOWER_A_TO_DEV(info->tx_power2));
867 } else {
868 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
869 TXPOWER_G_TO_DEV(info->tx_power1));
870 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
871 TXPOWER_G_TO_DEV(info->tx_power2));
872 }
873
874 rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
875
876 rt2800pci_rf_write(rt2x00dev, 1, rf->rf1);
877 rt2800pci_rf_write(rt2x00dev, 2, rf->rf2);
878 rt2800pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
879 rt2800pci_rf_write(rt2x00dev, 4, rf->rf4);
880
881 udelay(200);
882
883 rt2800pci_rf_write(rt2x00dev, 1, rf->rf1);
884 rt2800pci_rf_write(rt2x00dev, 2, rf->rf2);
885 rt2800pci_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
886 rt2800pci_rf_write(rt2x00dev, 4, rf->rf4);
887
888 udelay(200);
889
890 rt2800pci_rf_write(rt2x00dev, 1, rf->rf1);
891 rt2800pci_rf_write(rt2x00dev, 2, rf->rf2);
892 rt2800pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
893 rt2800pci_rf_write(rt2x00dev, 4, rf->rf4);
894}
895
896static void rt2800pci_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
897 struct ieee80211_conf *conf,
898 struct rf_channel *rf,
899 struct channel_info *info)
900{
901 u8 rfcsr;
902
903 rt2800pci_rfcsr_write(rt2x00dev, 2, rf->rf1);
904 rt2800pci_rfcsr_write(rt2x00dev, 2, rf->rf3);
905
906 rt2800pci_rfcsr_read(rt2x00dev, 6, &rfcsr);
907 rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
908 rt2800pci_rfcsr_write(rt2x00dev, 6, rfcsr);
909
910 rt2800pci_rfcsr_read(rt2x00dev, 12, &rfcsr);
911 rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
912 TXPOWER_G_TO_DEV(info->tx_power1));
913 rt2800pci_rfcsr_write(rt2x00dev, 12, rfcsr);
914
915 rt2800pci_rfcsr_read(rt2x00dev, 23, &rfcsr);
916 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
917 rt2800pci_rfcsr_write(rt2x00dev, 23, rfcsr);
918
919 rt2800pci_rfcsr_write(rt2x00dev, 24,
920 rt2x00dev->calibration[conf_is_ht40(conf)]);
921
922 rt2800pci_rfcsr_read(rt2x00dev, 23, &rfcsr);
923 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
924 rt2800pci_rfcsr_write(rt2x00dev, 23, rfcsr);
925}
926
927static void rt2800pci_config_channel(struct rt2x00_dev *rt2x00dev,
928 struct ieee80211_conf *conf,
929 struct rf_channel *rf,
930 struct channel_info *info)
931{
932 u32 reg;
933 unsigned int tx_pin;
934 u8 bbp;
935
936 if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
937 rt2800pci_config_channel_rt2x(rt2x00dev, conf, rf, info);
938 else
939 rt2800pci_config_channel_rt3x(rt2x00dev, conf, rf, info);
940
941 /*
942 * Change BBP settings
943 */
944 rt2800pci_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
945 rt2800pci_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
946 rt2800pci_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
947 rt2800pci_bbp_write(rt2x00dev, 86, 0);
948
949 if (rf->channel <= 14) {
950 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
951 rt2800pci_bbp_write(rt2x00dev, 82, 0x62);
952 rt2800pci_bbp_write(rt2x00dev, 75, 0x46);
953 } else {
954 rt2800pci_bbp_write(rt2x00dev, 82, 0x84);
955 rt2800pci_bbp_write(rt2x00dev, 75, 0x50);
956 }
957 } else {
958 rt2800pci_bbp_write(rt2x00dev, 82, 0xf2);
959
960 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
961 rt2800pci_bbp_write(rt2x00dev, 75, 0x46);
962 else
963 rt2800pci_bbp_write(rt2x00dev, 75, 0x50);
964 }
965
966 rt2x00pci_register_read(rt2x00dev, TX_BAND_CFG, &reg);
967 rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
968 rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
969 rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
970 rt2x00pci_register_write(rt2x00dev, TX_BAND_CFG, reg);
971
972 tx_pin = 0;
973
974 /* Turn on unused PA or LNA when not using 1T or 1R */
975 if (rt2x00dev->default_ant.tx != 1) {
976 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
977 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
978 }
979
980 /* Turn on unused PA or LNA when not using 1T or 1R */
981 if (rt2x00dev->default_ant.rx != 1) {
982 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
983 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
984 }
985
986 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
987 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
988 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
989 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
990 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
991 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
992
993 rt2x00pci_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
994
995 rt2800pci_bbp_read(rt2x00dev, 4, &bbp);
996 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
997 rt2800pci_bbp_write(rt2x00dev, 4, bbp);
998
999 rt2800pci_bbp_read(rt2x00dev, 3, &bbp);
1000 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
1001 rt2800pci_bbp_write(rt2x00dev, 3, bbp);
1002
1003 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
1004 if (conf_is_ht40(conf)) {
1005 rt2800pci_bbp_write(rt2x00dev, 69, 0x1a);
1006 rt2800pci_bbp_write(rt2x00dev, 70, 0x0a);
1007 rt2800pci_bbp_write(rt2x00dev, 73, 0x16);
1008 } else {
1009 rt2800pci_bbp_write(rt2x00dev, 69, 0x16);
1010 rt2800pci_bbp_write(rt2x00dev, 70, 0x08);
1011 rt2800pci_bbp_write(rt2x00dev, 73, 0x11);
1012 }
1013 }
1014
1015 msleep(1);
1016}
1017
1018static void rt2800pci_config_txpower(struct rt2x00_dev *rt2x00dev,
1019 const int txpower)
1020{
1021 u32 reg;
1022 u32 value = TXPOWER_G_TO_DEV(txpower);
1023 u8 r1;
1024
1025 rt2800pci_bbp_read(rt2x00dev, 1, &r1);
1026 rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
1027 rt2800pci_bbp_write(rt2x00dev, 1, r1);
1028
1029 rt2x00pci_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
1030 rt2x00_set_field32(&reg, TX_PWR_CFG_0_1MBS, value);
1031 rt2x00_set_field32(&reg, TX_PWR_CFG_0_2MBS, value);
1032 rt2x00_set_field32(&reg, TX_PWR_CFG_0_55MBS, value);
1033 rt2x00_set_field32(&reg, TX_PWR_CFG_0_11MBS, value);
1034 rt2x00_set_field32(&reg, TX_PWR_CFG_0_6MBS, value);
1035 rt2x00_set_field32(&reg, TX_PWR_CFG_0_9MBS, value);
1036 rt2x00_set_field32(&reg, TX_PWR_CFG_0_12MBS, value);
1037 rt2x00_set_field32(&reg, TX_PWR_CFG_0_18MBS, value);
1038 rt2x00pci_register_write(rt2x00dev, TX_PWR_CFG_0, reg);
1039
1040 rt2x00pci_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
1041 rt2x00_set_field32(&reg, TX_PWR_CFG_1_24MBS, value);
1042 rt2x00_set_field32(&reg, TX_PWR_CFG_1_36MBS, value);
1043 rt2x00_set_field32(&reg, TX_PWR_CFG_1_48MBS, value);
1044 rt2x00_set_field32(&reg, TX_PWR_CFG_1_54MBS, value);
1045 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS0, value);
1046 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS1, value);
1047 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS2, value);
1048 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS3, value);
1049 rt2x00pci_register_write(rt2x00dev, TX_PWR_CFG_1, reg);
1050
1051 rt2x00pci_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
1052 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS4, value);
1053 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS5, value);
1054 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS6, value);
1055 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS7, value);
1056 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS8, value);
1057 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS9, value);
1058 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS10, value);
1059 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS11, value);
1060 rt2x00pci_register_write(rt2x00dev, TX_PWR_CFG_2, reg);
1061
1062 rt2x00pci_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
1063 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS12, value);
1064 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS13, value);
1065 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS14, value);
1066 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS15, value);
1067 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN1, value);
1068 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN2, value);
1069 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN3, value);
1070 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN4, value);
1071 rt2x00pci_register_write(rt2x00dev, TX_PWR_CFG_3, reg);
1072
1073 rt2x00pci_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
1074 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN5, value);
1075 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN6, value);
1076 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN7, value);
1077 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN8, value);
1078 rt2x00pci_register_write(rt2x00dev, TX_PWR_CFG_4, reg);
1079}
1080
1081static void rt2800pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
1082 struct rt2x00lib_conf *libconf)
1083{
1084 u32 reg;
1085
1086 rt2x00pci_register_read(rt2x00dev, TX_RTY_CFG, &reg);
1087 rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT,
1088 libconf->conf->short_frame_max_tx_count);
1089 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
1090 libconf->conf->long_frame_max_tx_count);
1091 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
1092 rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
1093 rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
1094 rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
1095 rt2x00pci_register_write(rt2x00dev, TX_RTY_CFG, reg);
1096}
1097
1098static void rt2800pci_config_ps(struct rt2x00_dev *rt2x00dev,
1099 struct rt2x00lib_conf *libconf)
1100{
1101 enum dev_state state =
1102 (libconf->conf->flags & IEEE80211_CONF_PS) ?
1103 STATE_SLEEP : STATE_AWAKE;
1104 u32 reg;
1105
1106 if (state == STATE_SLEEP) {
1107 rt2x00pci_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
1108
1109 rt2x00pci_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
1110 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
1111 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
1112 libconf->conf->listen_interval - 1);
1113 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 1);
1114 rt2x00pci_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
1115
1116 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
1117 } else {
1118 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
1119
1120 rt2x00pci_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
1121 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
1122 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
1123 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
1124 rt2x00pci_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
1125 }
1126}
1127
1128static void rt2800pci_config(struct rt2x00_dev *rt2x00dev,
1129 struct rt2x00lib_conf *libconf,
1130 const unsigned int flags)
1131{
1132 /* Always recalculate LNA gain before changing configuration */
1133 rt2800pci_config_lna_gain(rt2x00dev, libconf);
1134
1135 if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
1136 rt2800pci_config_channel(rt2x00dev, libconf->conf,
1137 &libconf->rf, &libconf->channel);
1138 if (flags & IEEE80211_CONF_CHANGE_POWER)
1139 rt2800pci_config_txpower(rt2x00dev, libconf->conf->power_level);
1140 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
1141 rt2800pci_config_retry_limit(rt2x00dev, libconf);
1142 if (flags & IEEE80211_CONF_CHANGE_PS)
1143 rt2800pci_config_ps(rt2x00dev, libconf);
1144}
1145
1146/*
1147 * Link tuning
1148 */
1149static void rt2800pci_link_stats(struct rt2x00_dev *rt2x00dev,
1150 struct link_qual *qual)
1151{
1152 u32 reg;
1153
1154 /*
1155 * Update FCS error count from register.
1156 */
1157 rt2x00pci_register_read(rt2x00dev, RX_STA_CNT0, &reg);
1158 qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
1159}
1160
1161static u8 rt2800pci_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1162{
1163 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
1164 return 0x2e + rt2x00dev->lna_gain;
1165
1166 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
1167 return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
1168 else
1169 return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
1170}
1171
1172static inline void rt2800pci_set_vgc(struct rt2x00_dev *rt2x00dev,
1173 struct link_qual *qual, u8 vgc_level)
1174{
1175 if (qual->vgc_level != vgc_level) {
1176 rt2800pci_bbp_write(rt2x00dev, 66, vgc_level);
1177 qual->vgc_level = vgc_level;
1178 qual->vgc_level_reg = vgc_level;
1179 }
1180}
1181
1182static void rt2800pci_reset_tuner(struct rt2x00_dev *rt2x00dev,
1183 struct link_qual *qual)
1184{
1185 rt2800pci_set_vgc(rt2x00dev, qual,
1186 rt2800pci_get_default_vgc(rt2x00dev));
1187}
1188
1189static void rt2800pci_link_tuner(struct rt2x00_dev *rt2x00dev,
1190 struct link_qual *qual, const u32 count)
1191{
1192 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
1193 return;
1194
1195 /*
1196 * When RSSI is better then -80 increase VGC level with 0x10
1197 */
1198 rt2800pci_set_vgc(rt2x00dev, qual,
1199 rt2800pci_get_default_vgc(rt2x00dev) +
1200 ((qual->rssi > -80) * 0x10));
1201}
1202
1203/*
1204 * Firmware functions
1205 */
1206static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
1207{
1208 return FIRMWARE_RT2860;
1209}
1210
1211static int rt2800pci_check_firmware(struct rt2x00_dev *rt2x00dev,
1212 const u8 *data, const size_t len)
1213{
1214 u16 fw_crc;
1215 u16 crc;
1216
1217 /*
1218 * Only support 8kb firmware files.
1219 */
1220 if (len != 8192)
1221 return FW_BAD_LENGTH;
1222
1223 /*
1224 * The last 2 bytes in the firmware array are the crc checksum itself,
1225 * this means that we should never pass those 2 bytes to the crc
1226 * algorithm.
1227 */
1228 fw_crc = (data[len - 2] << 8 | data[len - 1]);
1229
1230 /*
1231 * Use the crc ccitt algorithm.
1232 * This will return the same value as the legacy driver which
1233 * used bit ordering reversion on the both the firmware bytes
1234 * before input input as well as on the final output.
1235 * Obviously using crc ccitt directly is much more efficient.
1236 */
1237 crc = crc_ccitt(~0, data, len - 2);
1238
1239 /*
1240 * There is a small difference between the crc-itu-t + bitrev and
1241 * the crc-ccitt crc calculation. In the latter method the 2 bytes
1242 * will be swapped, use swab16 to convert the crc to the correct
1243 * value.
1244 */
1245 crc = swab16(crc);
1246
1247 return (fw_crc == crc) ? FW_OK : FW_BAD_CRC;
1248}
1249
1250static int rt2800pci_load_firmware(struct rt2x00_dev *rt2x00dev,
1251 const u8 *data, const size_t len)
1252{
1253 unsigned int i;
1254 u32 reg;
1255
1256 /*
1257 * Wait for stable hardware.
1258 */
1259 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1260 rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
1261 if (reg && reg != ~0)
1262 break;
1263 msleep(1);
1264 }
1265
1266 if (i == REGISTER_BUSY_COUNT) {
1267 ERROR(rt2x00dev, "Unstable hardware.\n");
1268 return -EBUSY;
1269 }
1270
1271 rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
1272 rt2x00pci_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
1273
1274 /*
1275 * Disable DMA, will be reenabled later when enabling
1276 * the radio.
1277 */
1278 rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1279 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
1280 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
1281 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
1282 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
1283 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
1284 rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1285
1286 /*
1287 * enable Host program ram write selection
1288 */
1289 reg = 0;
1290 rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1);
1291 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, reg);
1292
1293 /*
1294 * Write firmware to device.
1295 */
1296 rt2x00pci_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
1297 data, len);
1298
1299 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000);
1300 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001);
1301
1302 /*
1303 * Wait for device to stabilize.
1304 */
1305 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1306 rt2x00pci_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1307 if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
1308 break;
1309 msleep(1);
1310 }
1311
1312 if (i == REGISTER_BUSY_COUNT) {
1313 ERROR(rt2x00dev, "PBF system register not ready.\n");
1314 return -EBUSY;
1315 }
1316
1317 /*
1318 * Disable interrupts
1319 */
1320 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF);
1321
1322 /*
1323 * Initialize BBP R/W access agent
1324 */
1325 rt2x00pci_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
1326 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1327
1328 return 0;
1329}
1330
1331/*
1332 * Initialization functions.
1333 */
1334static bool rt2800pci_get_entry_state(struct queue_entry *entry)
1335{
1336 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1337 u32 word;
1338
1339 if (entry->queue->qid == QID_RX) {
1340 rt2x00_desc_read(entry_priv->desc, 1, &word);
1341
1342 return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
1343 } else {
1344 rt2x00_desc_read(entry_priv->desc, 1, &word);
1345
1346 return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
1347 }
1348}
1349
1350static void rt2800pci_clear_entry(struct queue_entry *entry)
1351{
1352 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1353 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1354 u32 word;
1355
1356 if (entry->queue->qid == QID_RX) {
1357 rt2x00_desc_read(entry_priv->desc, 0, &word);
1358 rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
1359 rt2x00_desc_write(entry_priv->desc, 0, word);
1360
1361 rt2x00_desc_read(entry_priv->desc, 1, &word);
1362 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
1363 rt2x00_desc_write(entry_priv->desc, 1, word);
1364 } else {
1365 rt2x00_desc_read(entry_priv->desc, 1, &word);
1366 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
1367 rt2x00_desc_write(entry_priv->desc, 1, word);
1368 }
1369}
1370
1371static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
1372{
1373 struct queue_entry_priv_pci *entry_priv;
1374 u32 reg;
1375
1376 rt2x00pci_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
1377 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
1378 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
1379 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
1380 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
1381 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
1382 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
1383 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
1384 rt2x00pci_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
1385
1386 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
1387 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
1388
1389 /*
1390 * Initialize registers.
1391 */
1392 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
1393 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma);
1394 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT0, rt2x00dev->tx[0].limit);
1395 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX0, 0);
1396 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX0, 0);
1397
1398 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
1399 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma);
1400 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT1, rt2x00dev->tx[1].limit);
1401 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX1, 0);
1402 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX1, 0);
1403
1404 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
1405 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma);
1406 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT2, rt2x00dev->tx[2].limit);
1407 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX2, 0);
1408 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX2, 0);
1409
1410 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
1411 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma);
1412 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT3, rt2x00dev->tx[3].limit);
1413 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0);
1414 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0);
1415
1416 entry_priv = rt2x00dev->rx->entries[0].priv_data;
1417 rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma);
1418 rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT, rt2x00dev->rx[0].limit);
1419 rt2x00pci_register_write(rt2x00dev, RX_CRX_IDX, rt2x00dev->rx[0].limit - 1);
1420 rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0);
1421
1422 /*
1423 * Enable global DMA configuration
1424 */
1425 rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1426 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
1427 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
1428 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
1429 rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1430
1431 rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0);
1432
1433 return 0;
1434}
1435
1436static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
1437{
1438 u32 reg;
1439 unsigned int i;
1440
1441 rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1442
1443 rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1444 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
1445 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
1446 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1447
1448 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1449
1450 rt2x00pci_register_read(rt2x00dev, BCN_OFFSET0, &reg);
1451 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
1452 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
1453 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
1454 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
1455 rt2x00pci_register_write(rt2x00dev, BCN_OFFSET0, reg);
1456
1457 rt2x00pci_register_read(rt2x00dev, BCN_OFFSET1, &reg);
1458 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
1459 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
1460 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
1461 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
1462 rt2x00pci_register_write(rt2x00dev, BCN_OFFSET1, reg);
1463
1464 rt2x00pci_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
1465 rt2x00pci_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
1466
1467 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1468
1469 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1470 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 0);
1471 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
1472 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0);
1473 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
1474 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
1475 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
1476 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1477
1478 rt2x00pci_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
1479 rt2x00pci_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
1480
1481 rt2x00pci_register_read(rt2x00dev, TX_LINK_CFG, &reg);
1482 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
1483 rt2x00_set_field32(&reg, TX_LINK_CFG_MFB_ENABLE, 0);
1484 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
1485 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_MRQ_EN, 0);
1486 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_RDG_EN, 0);
1487 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_CF_ACK_EN, 1);
1488 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB, 0);
1489 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFS, 0);
1490 rt2x00pci_register_write(rt2x00dev, TX_LINK_CFG, reg);
1491
1492 rt2x00pci_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
1493 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
1494 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
1495 rt2x00pci_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
1496
1497 rt2x00pci_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
1498 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
1499 if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
1500 rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
1501 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
1502 else
1503 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
1504 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
1505 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
1506 rt2x00pci_register_write(rt2x00dev, MAX_LEN_CFG, reg);
1507
1508 rt2x00pci_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
1509
1510 rt2x00pci_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
1511 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
1512 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
1513 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
1514 rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
1515 rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
1516 rt2x00pci_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
1517
1518 rt2x00pci_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
1519 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
1520 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
1521 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
1522 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1523 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1524 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1525 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1526 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1527 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1528 rt2x00pci_register_write(rt2x00dev, CCK_PROT_CFG, reg);
1529
1530 rt2x00pci_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
1531 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
1532 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
1533 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
1534 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1535 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1536 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1537 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1538 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1539 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1540 rt2x00pci_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1541
1542 rt2x00pci_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
1543 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
1544 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
1545 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
1546 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1547 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1548 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1549 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1550 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1551 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1552 rt2x00pci_register_write(rt2x00dev, MM20_PROT_CFG, reg);
1553
1554 rt2x00pci_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
1555 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
1556 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
1557 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
1558 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1559 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1560 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1561 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1562 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1563 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1564 rt2x00pci_register_write(rt2x00dev, MM40_PROT_CFG, reg);
1565
1566 rt2x00pci_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
1567 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
1568 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
1569 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
1570 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1571 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1572 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1573 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1574 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1575 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1576 rt2x00pci_register_write(rt2x00dev, GF20_PROT_CFG, reg);
1577
1578 rt2x00pci_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
1579 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
1580 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
1581 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
1582 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1583 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1584 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1585 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1586 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1587 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1588 rt2x00pci_register_write(rt2x00dev, GF40_PROT_CFG, reg);
1589
1590 rt2x00pci_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
1591 rt2x00pci_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
1592
1593 rt2x00pci_register_read(rt2x00dev, TX_RTS_CFG, &reg);
1594 rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
1595 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
1596 IEEE80211_MAX_RTS_THRESHOLD);
1597 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
1598 rt2x00pci_register_write(rt2x00dev, TX_RTS_CFG, reg);
1599
1600 rt2x00pci_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
1601 rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1602
1603 /*
1604 * ASIC will keep garbage value after boot, clear encryption keys.
1605 */
1606 for (i = 0; i < 4; i++)
1607 rt2x00pci_register_write(rt2x00dev,
1608 SHARED_KEY_MODE_ENTRY(i), 0);
1609
1610 for (i = 0; i < 256; i++) {
1611 u32 wcid[2] = { 0xffffffff, 0x00ffffff };
1612 rt2x00pci_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
1613 wcid, sizeof(wcid));
1614
1615 rt2x00pci_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
1616 rt2x00pci_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
1617 }
1618
1619 /*
1620 * Clear all beacons
1621 * For the Beacon base registers we only need to clear
1622 * the first byte since that byte contains the VALID and OWNER
1623 * bits which (when set to 0) will invalidate the entire beacon.
1624 */
1625 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
1626 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
1627 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
1628 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
1629 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
1630 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
1631 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
1632 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
1633
1634 rt2x00pci_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
1635 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS0FBK, 0);
1636 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS1FBK, 0);
1637 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS2FBK, 1);
1638 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS3FBK, 2);
1639 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS4FBK, 3);
1640 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS5FBK, 4);
1641 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS6FBK, 5);
1642 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS7FBK, 6);
1643 rt2x00pci_register_write(rt2x00dev, HT_FBK_CFG0, reg);
1644
1645 rt2x00pci_register_read(rt2x00dev, HT_FBK_CFG1, &reg);
1646 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS8FBK, 8);
1647 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS9FBK, 8);
1648 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS10FBK, 9);
1649 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS11FBK, 10);
1650 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS12FBK, 11);
1651 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS13FBK, 12);
1652 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS14FBK, 13);
1653 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS15FBK, 14);
1654 rt2x00pci_register_write(rt2x00dev, HT_FBK_CFG1, reg);
1655
1656 rt2x00pci_register_read(rt2x00dev, LG_FBK_CFG0, &reg);
1657 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS0FBK, 8);
1658 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS1FBK, 8);
1659 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS2FBK, 9);
1660 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS3FBK, 10);
1661 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS4FBK, 11);
1662 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS5FBK, 12);
1663 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS6FBK, 13);
1664 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS7FBK, 14);
1665 rt2x00pci_register_write(rt2x00dev, LG_FBK_CFG0, reg);
1666
1667 rt2x00pci_register_read(rt2x00dev, LG_FBK_CFG1, &reg);
1668 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS0FBK, 0);
1669 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS1FBK, 0);
1670 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS2FBK, 1);
1671 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS3FBK, 2);
1672 rt2x00pci_register_write(rt2x00dev, LG_FBK_CFG1, reg);
1673
1674 /*
1675 * We must clear the error counters.
1676 * These registers are cleared on read,
1677 * so we may pass a useless variable to store the value.
1678 */
1679 rt2x00pci_register_read(rt2x00dev, RX_STA_CNT0, &reg);
1680 rt2x00pci_register_read(rt2x00dev, RX_STA_CNT1, &reg);
1681 rt2x00pci_register_read(rt2x00dev, RX_STA_CNT2, &reg);
1682 rt2x00pci_register_read(rt2x00dev, TX_STA_CNT0, &reg);
1683 rt2x00pci_register_read(rt2x00dev, TX_STA_CNT1, &reg);
1684 rt2x00pci_register_read(rt2x00dev, TX_STA_CNT2, &reg);
1685
1686 return 0;
1687}
1688
1689static int rt2800pci_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
1690{
1691 unsigned int i;
1692 u32 reg;
1693
1694 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1695 rt2x00pci_register_read(rt2x00dev, MAC_STATUS_CFG, &reg);
1696 if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
1697 return 0;
1698
1699 udelay(REGISTER_BUSY_DELAY);
1700 }
1701
1702 ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n");
1703 return -EACCES;
1704}
1705
1706static int rt2800pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
1707{
1708 unsigned int i;
1709 u8 value;
1710
1711 /*
1712 * BBP was enabled after firmware was loaded,
1713 * but we need to reactivate it now.
1714 */
1715 rt2x00pci_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
1716 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1717 msleep(1);
1718
1719 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1720 rt2800pci_bbp_read(rt2x00dev, 0, &value);
1721 if ((value != 0xff) && (value != 0x00))
1722 return 0;
1723 udelay(REGISTER_BUSY_DELAY);
1724 }
1725
1726 ERROR(rt2x00dev, "BBP register access failed, aborting.\n");
1727 return -EACCES;
1728}
1729
1730static int rt2800pci_init_bbp(struct rt2x00_dev *rt2x00dev)
1731{
1732 unsigned int i;
1733 u16 eeprom;
1734 u8 reg_id;
1735 u8 value;
1736
1737 if (unlikely(rt2800pci_wait_bbp_rf_ready(rt2x00dev) ||
1738 rt2800pci_wait_bbp_ready(rt2x00dev)))
1739 return -EACCES;
1740
1741 rt2800pci_bbp_write(rt2x00dev, 65, 0x2c);
1742 rt2800pci_bbp_write(rt2x00dev, 66, 0x38);
1743 rt2800pci_bbp_write(rt2x00dev, 69, 0x12);
1744 rt2800pci_bbp_write(rt2x00dev, 70, 0x0a);
1745 rt2800pci_bbp_write(rt2x00dev, 73, 0x10);
1746 rt2800pci_bbp_write(rt2x00dev, 81, 0x37);
1747 rt2800pci_bbp_write(rt2x00dev, 82, 0x62);
1748 rt2800pci_bbp_write(rt2x00dev, 83, 0x6a);
1749 rt2800pci_bbp_write(rt2x00dev, 84, 0x99);
1750 rt2800pci_bbp_write(rt2x00dev, 86, 0x00);
1751 rt2800pci_bbp_write(rt2x00dev, 91, 0x04);
1752 rt2800pci_bbp_write(rt2x00dev, 92, 0x00);
1753 rt2800pci_bbp_write(rt2x00dev, 103, 0x00);
1754 rt2800pci_bbp_write(rt2x00dev, 105, 0x05);
1755
1756 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
1757 rt2800pci_bbp_write(rt2x00dev, 69, 0x16);
1758 rt2800pci_bbp_write(rt2x00dev, 73, 0x12);
1759 }
1760
1761 if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION)
1762 rt2800pci_bbp_write(rt2x00dev, 84, 0x19);
1763
1764 if (rt2x00_rt(&rt2x00dev->chip, RT3052)) {
1765 rt2800pci_bbp_write(rt2x00dev, 31, 0x08);
1766 rt2800pci_bbp_write(rt2x00dev, 78, 0x0e);
1767 rt2800pci_bbp_write(rt2x00dev, 80, 0x08);
1768 }
1769
1770 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
1771 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
1772
1773 if (eeprom != 0xffff && eeprom != 0x0000) {
1774 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
1775 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
1776 rt2800pci_bbp_write(rt2x00dev, reg_id, value);
1777 }
1778 }
1779
1780 return 0;
1781}
1782
1783static u8 rt2800pci_init_rx_filter(struct rt2x00_dev *rt2x00dev,
1784 bool bw40, u8 rfcsr24, u8 filter_target)
1785{
1786 unsigned int i;
1787 u8 bbp;
1788 u8 rfcsr;
1789 u8 passband;
1790 u8 stopband;
1791 u8 overtuned = 0;
1792
1793 rt2800pci_rfcsr_write(rt2x00dev, 24, rfcsr24);
1794
1795 rt2800pci_bbp_read(rt2x00dev, 4, &bbp);
1796 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
1797 rt2800pci_bbp_write(rt2x00dev, 4, bbp);
1798
1799 rt2800pci_rfcsr_read(rt2x00dev, 22, &rfcsr);
1800 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
1801 rt2800pci_rfcsr_write(rt2x00dev, 22, rfcsr);
1802
1803 /*
1804 * Set power & frequency of passband test tone
1805 */
1806 rt2800pci_bbp_write(rt2x00dev, 24, 0);
1807
1808 for (i = 0; i < 100; i++) {
1809 rt2800pci_bbp_write(rt2x00dev, 25, 0x90);
1810 msleep(1);
1811
1812 rt2800pci_bbp_read(rt2x00dev, 55, &passband);
1813 if (passband)
1814 break;
1815 }
1816
1817 /*
1818 * Set power & frequency of stopband test tone
1819 */
1820 rt2800pci_bbp_write(rt2x00dev, 24, 0x06);
1821
1822 for (i = 0; i < 100; i++) {
1823 rt2800pci_bbp_write(rt2x00dev, 25, 0x90);
1824 msleep(1);
1825
1826 rt2800pci_bbp_read(rt2x00dev, 55, &stopband);
1827
1828 if ((passband - stopband) <= filter_target) {
1829 rfcsr24++;
1830 overtuned += ((passband - stopband) == filter_target);
1831 } else
1832 break;
1833
1834 rt2800pci_rfcsr_write(rt2x00dev, 24, rfcsr24);
1835 }
1836
1837 rfcsr24 -= !!overtuned;
1838
1839 rt2800pci_rfcsr_write(rt2x00dev, 24, rfcsr24);
1840 return rfcsr24;
1841}
1842
1843static int rt2800pci_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1844{
1845 u8 rfcsr;
1846 u8 bbp;
1847
1848 if (!rt2x00_rf(&rt2x00dev->chip, RF3020) &&
1849 !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
1850 !rt2x00_rf(&rt2x00dev->chip, RF3022))
1851 return 0;
1852
1853 /*
1854 * Init RF calibration.
1855 */
1856 rt2800pci_rfcsr_read(rt2x00dev, 30, &rfcsr);
1857 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
1858 rt2800pci_rfcsr_write(rt2x00dev, 30, rfcsr);
1859 msleep(1);
1860 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
1861 rt2800pci_rfcsr_write(rt2x00dev, 30, rfcsr);
1862
1863 rt2800pci_rfcsr_write(rt2x00dev, 0, 0x50);
1864 rt2800pci_rfcsr_write(rt2x00dev, 1, 0x01);
1865 rt2800pci_rfcsr_write(rt2x00dev, 2, 0xf7);
1866 rt2800pci_rfcsr_write(rt2x00dev, 3, 0x75);
1867 rt2800pci_rfcsr_write(rt2x00dev, 4, 0x40);
1868 rt2800pci_rfcsr_write(rt2x00dev, 5, 0x03);
1869 rt2800pci_rfcsr_write(rt2x00dev, 6, 0x02);
1870 rt2800pci_rfcsr_write(rt2x00dev, 7, 0x50);
1871 rt2800pci_rfcsr_write(rt2x00dev, 8, 0x39);
1872 rt2800pci_rfcsr_write(rt2x00dev, 9, 0x0f);
1873 rt2800pci_rfcsr_write(rt2x00dev, 10, 0x60);
1874 rt2800pci_rfcsr_write(rt2x00dev, 11, 0x21);
1875 rt2800pci_rfcsr_write(rt2x00dev, 12, 0x75);
1876 rt2800pci_rfcsr_write(rt2x00dev, 13, 0x75);
1877 rt2800pci_rfcsr_write(rt2x00dev, 14, 0x90);
1878 rt2800pci_rfcsr_write(rt2x00dev, 15, 0x58);
1879 rt2800pci_rfcsr_write(rt2x00dev, 16, 0xb3);
1880 rt2800pci_rfcsr_write(rt2x00dev, 17, 0x92);
1881 rt2800pci_rfcsr_write(rt2x00dev, 18, 0x2c);
1882 rt2800pci_rfcsr_write(rt2x00dev, 19, 0x02);
1883 rt2800pci_rfcsr_write(rt2x00dev, 20, 0xba);
1884 rt2800pci_rfcsr_write(rt2x00dev, 21, 0xdb);
1885 rt2800pci_rfcsr_write(rt2x00dev, 22, 0x00);
1886 rt2800pci_rfcsr_write(rt2x00dev, 23, 0x31);
1887 rt2800pci_rfcsr_write(rt2x00dev, 24, 0x08);
1888 rt2800pci_rfcsr_write(rt2x00dev, 25, 0x01);
1889 rt2800pci_rfcsr_write(rt2x00dev, 26, 0x25);
1890 rt2800pci_rfcsr_write(rt2x00dev, 27, 0x23);
1891 rt2800pci_rfcsr_write(rt2x00dev, 28, 0x13);
1892 rt2800pci_rfcsr_write(rt2x00dev, 29, 0x83);
1893
1894 /*
1895 * Set RX Filter calibration for 20MHz and 40MHz
1896 */
1897 rt2x00dev->calibration[0] =
1898 rt2800pci_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
1899 rt2x00dev->calibration[1] =
1900 rt2800pci_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
1901
1902 /*
1903 * Set back to initial state
1904 */
1905 rt2800pci_bbp_write(rt2x00dev, 24, 0);
1906
1907 rt2800pci_rfcsr_read(rt2x00dev, 22, &rfcsr);
1908 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
1909 rt2800pci_rfcsr_write(rt2x00dev, 22, rfcsr);
1910
1911 /*
1912 * set BBP back to BW20
1913 */
1914 rt2800pci_bbp_read(rt2x00dev, 4, &bbp);
1915 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
1916 rt2800pci_bbp_write(rt2x00dev, 4, bbp);
1917
1918 return 0;
1919}
1920
1921/*
1922 * Device state switch handlers.
1923 */
1924static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
1925 enum dev_state state)
1926{
1927 u32 reg;
1928
1929 rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1930 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
1931 (state == STATE_RADIO_RX_ON) ||
1932 (state == STATE_RADIO_RX_ON_LINK));
1933 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1934}
1935
1936static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1937 enum dev_state state)
1938{
1939 int mask = (state == STATE_RADIO_IRQ_ON);
1940 u32 reg;
1941
1942 /*
1943 * When interrupts are being enabled, the interrupt registers
1944 * should clear the register to assure a clean state.
1945 */
1946 if (state == STATE_RADIO_IRQ_ON) {
1947 rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
1948 rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
1949 }
1950
1951 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
1952 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, mask);
1953 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, mask);
1954 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
1955 rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, mask);
1956 rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, mask);
1957 rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, mask);
1958 rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, mask);
1959 rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, mask);
1960 rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, mask);
1961 rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, mask);
1962 rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, mask);
1963 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
1964 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
1965 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
1966 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
1967 rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, mask);
1968 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, mask);
1969 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, mask);
1970 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
1971}
1972
1973static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
1974{
1975 unsigned int i;
1976 u32 reg;
1977
1978 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1979 rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1980 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
1981 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
1982 return 0;
1983
1984 msleep(1);
1985 }
1986
1987 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
1988 return -EACCES;
1989}
1990
1991static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
1992{
1993 u32 reg;
1994 u16 word;
1995
1996 /*
1997 * Initialize all registers.
1998 */
1999 if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) ||
2000 rt2800pci_init_queues(rt2x00dev) ||
2001 rt2800pci_init_registers(rt2x00dev) ||
2002 rt2800pci_wait_wpdma_ready(rt2x00dev) ||
2003 rt2800pci_init_bbp(rt2x00dev) ||
2004 rt2800pci_init_rfcsr(rt2x00dev)))
2005 return -EIO;
2006
2007 /*
2008 * Send signal to firmware during boot time.
2009 */
2010 rt2800pci_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
2011
2012 /*
2013 * Enable RX.
2014 */
2015 rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
2016 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
2017 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
2018 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
2019
2020 rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
2021 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
2022 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
2023 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
2024 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
2025 rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2026
2027 rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
2028 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
2029 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
2030 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
2031
2032 /*
2033 * Initialize LED control
2034 */
2035 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
2036 rt2800pci_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
2037 word & 0xff, (word >> 8) & 0xff);
2038
2039 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
2040 rt2800pci_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
2041 word & 0xff, (word >> 8) & 0xff);
2042
2043 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
2044 rt2800pci_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
2045 word & 0xff, (word >> 8) & 0xff);
2046
2047 return 0;
2048}
2049
2050static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
2051{
2052 u32 reg;
2053
2054 rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
2055 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
2056 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
2057 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
2058 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
2059 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
2060 rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2061
2062 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
2063 rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0);
2064 rt2x00pci_register_write(rt2x00dev, TX_PIN_CFG, 0);
2065
2066 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
2067
2068 rt2x00pci_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
2069 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
2070 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
2071 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
2072 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
2073 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
2074 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
2075 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
2076 rt2x00pci_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
2077
2078 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
2079 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
2080
2081 /* Wait for DMA, ignore error */
2082 rt2800pci_wait_wpdma_ready(rt2x00dev);
2083}
2084
2085static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
2086 enum dev_state state)
2087{
2088 /*
2089 * Always put the device to sleep (even when we intend to wakeup!)
2090 * if the device is booting and wasn't asleep it will return
2091 * failure when attempting to wakeup.
2092 */
2093 rt2800pci_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
2094
2095 if (state == STATE_AWAKE) {
2096 rt2800pci_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
2097 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
2098 }
2099
2100 return 0;
2101}
2102
2103static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
2104 enum dev_state state)
2105{
2106 int retval = 0;
2107
2108 switch (state) {
2109 case STATE_RADIO_ON:
2110 /*
2111 * Before the radio can be enabled, the device first has
2112 * to be woken up. After that it needs a bit of time
2113 * to be fully awake and then the radio can be enabled.
2114 */
2115 rt2800pci_set_state(rt2x00dev, STATE_AWAKE);
2116 msleep(1);
2117 retval = rt2800pci_enable_radio(rt2x00dev);
2118 break;
2119 case STATE_RADIO_OFF:
2120 /*
2121 * After the radio has been disabled, the device should
2122 * be put to sleep for powersaving.
2123 */
2124 rt2800pci_disable_radio(rt2x00dev);
2125 rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
2126 break;
2127 case STATE_RADIO_RX_ON:
2128 case STATE_RADIO_RX_ON_LINK:
2129 case STATE_RADIO_RX_OFF:
2130 case STATE_RADIO_RX_OFF_LINK:
2131 rt2800pci_toggle_rx(rt2x00dev, state);
2132 break;
2133 case STATE_RADIO_IRQ_ON:
2134 case STATE_RADIO_IRQ_OFF:
2135 rt2800pci_toggle_irq(rt2x00dev, state);
2136 break;
2137 case STATE_DEEP_SLEEP:
2138 case STATE_SLEEP:
2139 case STATE_STANDBY:
2140 case STATE_AWAKE:
2141 retval = rt2800pci_set_state(rt2x00dev, state);
2142 break;
2143 default:
2144 retval = -ENOTSUPP;
2145 break;
2146 }
2147
2148 if (unlikely(retval))
2149 ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n",
2150 state, retval);
2151
2152 return retval;
2153}
2154
2155/*
2156 * TX descriptor initialization
2157 */
2158static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
2159 struct sk_buff *skb,
2160 struct txentry_desc *txdesc)
2161{
2162 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
2163 __le32 *txd = skbdesc->desc;
2164 __le32 *txwi = (__le32 *)(skb->data - rt2x00dev->hw->extra_tx_headroom);
2165 u32 word;
2166
2167 /*
2168 * Initialize TX Info descriptor
2169 */
2170 rt2x00_desc_read(txwi, 0, &word);
2171 rt2x00_set_field32(&word, TXWI_W0_FRAG,
2172 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
2173 rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
2174 rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
2175 rt2x00_set_field32(&word, TXWI_W0_TS,
2176 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
2177 rt2x00_set_field32(&word, TXWI_W0_AMPDU,
2178 test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
2179 rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
2180 rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
2181 rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
2182 rt2x00_set_field32(&word, TXWI_W0_BW,
2183 test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
2184 rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
2185 test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
2186 rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
2187 rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
2188 rt2x00_desc_write(txwi, 0, word);
2189
2190 rt2x00_desc_read(txwi, 1, &word);
2191 rt2x00_set_field32(&word, TXWI_W1_ACK,
2192 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
2193 rt2x00_set_field32(&word, TXWI_W1_NSEQ,
2194 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
2195 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
2196 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
2197 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
2198 (skbdesc->entry->entry_idx + 1) : 0xff);
2199 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
2200 skb->len - txdesc->l2pad);
2201 rt2x00_set_field32(&word, TXWI_W1_PACKETID,
2202 skbdesc->entry->queue->qid + 1);
2203 rt2x00_desc_write(txwi, 1, word);
2204
2205 /*
2206 * Always write 0 to IV/EIV fields, hardware will insert the IV
2207 * from the IVEIV register when ENTRY_TXD_ENCRYPT_IV is set to 0.
2208 * When ENTRY_TXD_ENCRYPT_IV is set to 1 it will use the IV data
2209 * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
2210 * crypto entry in the registers should be used to encrypt the frame.
2211 */
2212 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
2213 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
2214
2215 /*
2216 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
2217 * must contains a TXWI structure + 802.11 header + padding + 802.11
2218 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
2219 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
2220 * data. It means that LAST_SEC0 is always 0.
2221 */
2222
2223 /*
2224 * Initialize TX descriptor
2225 */
2226 rt2x00_desc_read(txd, 0, &word);
2227 rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
2228 rt2x00_desc_write(txd, 0, word);
2229
2230 rt2x00_desc_read(txd, 1, &word);
2231 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, skb->len);
2232 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
2233 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
2234 rt2x00_set_field32(&word, TXD_W1_BURST,
2235 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
2236 rt2x00_set_field32(&word, TXD_W1_SD_LEN0,
2237 rt2x00dev->hw->extra_tx_headroom);
2238 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
2239 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
2240 rt2x00_desc_write(txd, 1, word);
2241
2242 rt2x00_desc_read(txd, 2, &word);
2243 rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
2244 skbdesc->skb_dma + rt2x00dev->hw->extra_tx_headroom);
2245 rt2x00_desc_write(txd, 2, word);
2246
2247 rt2x00_desc_read(txd, 3, &word);
2248 rt2x00_set_field32(&word, TXD_W3_WIV,
2249 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
2250 rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
2251 rt2x00_desc_write(txd, 3, word);
2252}
2253
2254/*
2255 * TX data initialization
2256 */
2257static void rt2800pci_write_beacon(struct queue_entry *entry)
2258{
2259 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
2260 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
2261 unsigned int beacon_base;
2262 u32 reg;
2263
2264 /*
2265 * Disable beaconing while we are reloading the beacon data,
2266 * otherwise we might be sending out invalid data.
2267 */
2268 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
2269 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
2270 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
2271
2272 /*
2273 * Write entire beacon with descriptor to register.
2274 */
2275 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
2276 rt2x00pci_register_multiwrite(rt2x00dev,
2277 beacon_base,
2278 skbdesc->desc, skbdesc->desc_len);
2279 rt2x00pci_register_multiwrite(rt2x00dev,
2280 beacon_base + skbdesc->desc_len,
2281 entry->skb->data, entry->skb->len);
2282
2283 /*
2284 * Clean up beacon skb.
2285 */
2286 dev_kfree_skb_any(entry->skb);
2287 entry->skb = NULL;
2288}
2289
2290static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
2291 const enum data_queue_qid queue_idx)
2292{
2293 struct data_queue *queue;
2294 unsigned int idx, qidx = 0;
2295 u32 reg;
2296
2297 if (queue_idx == QID_BEACON) {
2298 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
2299 if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
2300 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
2301 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
2302 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
2303 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
2304 }
2305 return;
2306 }
2307
2308 if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
2309 return;
2310
2311 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
2312 idx = queue->index[Q_INDEX];
2313
2314 if (queue_idx == QID_MGMT)
2315 qidx = 5;
2316 else
2317 qidx = queue_idx;
2318
2319 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX(qidx), idx);
2320}
2321
2322static void rt2800pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
2323 const enum data_queue_qid qid)
2324{
2325 u32 reg;
2326
2327 if (qid == QID_BEACON) {
2328 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, 0);
2329 return;
2330 }
2331
2332 rt2x00pci_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
2333 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (qid == QID_AC_BE));
2334 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (qid == QID_AC_BK));
2335 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (qid == QID_AC_VI));
2336 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (qid == QID_AC_VO));
2337 rt2x00pci_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
2338}
2339
2340/*
2341 * RX control handlers
2342 */
2343static void rt2800pci_fill_rxdone(struct queue_entry *entry,
2344 struct rxdone_entry_desc *rxdesc)
2345{
2346 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
2347 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
2348 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
2349 __le32 *rxd = entry_priv->desc;
2350 __le32 *rxwi = (__le32 *)entry->skb->data;
2351 u32 rxd3;
2352 u32 rxwi0;
2353 u32 rxwi1;
2354 u32 rxwi2;
2355 u32 rxwi3;
2356
2357 rt2x00_desc_read(rxd, 3, &rxd3);
2358 rt2x00_desc_read(rxwi, 0, &rxwi0);
2359 rt2x00_desc_read(rxwi, 1, &rxwi1);
2360 rt2x00_desc_read(rxwi, 2, &rxwi2);
2361 rt2x00_desc_read(rxwi, 3, &rxwi3);
2362
2363 if (rt2x00_get_field32(rxd3, RXD_W3_CRC_ERROR))
2364 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
2365
2366 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
2367 /*
2368 * Unfortunately we don't know the cipher type used during
2369 * decryption. This prevents us from correct providing
2370 * correct statistics through debugfs.
2371 */
2372 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
2373 rxdesc->cipher_status =
2374 rt2x00_get_field32(rxd3, RXD_W3_CIPHER_ERROR);
2375 }
2376
2377 if (rt2x00_get_field32(rxd3, RXD_W3_DECRYPTED)) {
2378 /*
2379 * Hardware has stripped IV/EIV data from 802.11 frame during
2380 * decryption. Unfortunately the descriptor doesn't contain
2381 * any fields with the EIV/IV data either, so they can't
2382 * be restored by rt2x00lib.
2383 */
2384 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
2385
2386 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
2387 rxdesc->flags |= RX_FLAG_DECRYPTED;
2388 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
2389 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
2390 }
2391
2392 if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
2393 rxdesc->dev_flags |= RXDONE_MY_BSS;
2394
2395 if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) {
2396 rxdesc->dev_flags |= RXDONE_L2PAD;
2397 skbdesc->flags |= SKBDESC_L2_PADDED;
2398 }
2399
2400 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
2401 rxdesc->flags |= RX_FLAG_SHORT_GI;
2402
2403 if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
2404 rxdesc->flags |= RX_FLAG_40MHZ;
2405
2406 /*
2407 * Detect RX rate, always use MCS as signal type.
2408 */
2409 rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
2410 rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
2411 rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
2412
2413 /*
2414 * Mask of 0x8 bit to remove the short preamble flag.
2415 */
2416 if (rxdesc->rate_mode == RATE_MODE_CCK)
2417 rxdesc->signal &= ~0x8;
2418
2419 rxdesc->rssi =
2420 (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
2421 rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
2422
2423 rxdesc->noise =
2424 (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
2425 rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
2426
2427 rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
2428
2429 /*
2430 * Set RX IDX in register to inform hardware that we have handled
2431 * this entry and it is available for reuse again.
2432 */
2433 rt2x00pci_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx);
2434
2435 /*
2436 * Remove TXWI descriptor from start of buffer.
2437 */
2438 skb_pull(entry->skb, RXWI_DESC_SIZE);
2439 skb_trim(entry->skb, rxdesc->size);
2440}
2441
2442/*
2443 * Interrupt functions.
2444 */
2445static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
2446{
2447 struct data_queue *queue;
2448 struct queue_entry *entry;
2449 struct queue_entry *entry_done;
2450 struct queue_entry_priv_pci *entry_priv;
2451 struct txdone_entry_desc txdesc;
2452 u32 word;
2453 u32 reg;
2454 u32 old_reg;
2455 unsigned int type;
2456 unsigned int index;
2457 u16 mcs, real_mcs;
2458
2459 /*
2460 * During each loop we will compare the freshly read
2461 * TX_STA_FIFO register value with the value read from
2462 * the previous loop. If the 2 values are equal then
2463 * we should stop processing because the chance it
2464 * quite big that the device has been unplugged and
2465 * we risk going into an endless loop.
2466 */
2467 old_reg = 0;
2468
2469 while (1) {
2470 rt2x00pci_register_read(rt2x00dev, TX_STA_FIFO, &reg);
2471 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
2472 break;
2473
2474 if (old_reg == reg)
2475 break;
2476 old_reg = reg;
2477
2478 /*
2479 * Skip this entry when it contains an invalid
2480 * queue identication number.
2481 */
2482 type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
2483 if (type >= QID_RX)
2484 continue;
2485
2486 queue = rt2x00queue_get_queue(rt2x00dev, type);
2487 if (unlikely(!queue))
2488 continue;
2489
2490 /*
2491 * Skip this entry when it contains an invalid
2492 * index number.
2493 */
2494 index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1;
2495 if (unlikely(index >= queue->limit))
2496 continue;
2497
2498 entry = &queue->entries[index];
2499 entry_priv = entry->priv_data;
2500 rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word);
2501
2502 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
2503 while (entry != entry_done) {
2504 /*
2505 * Catch up.
2506 * Just report any entries we missed as failed.
2507 */
2508 WARNING(rt2x00dev,
2509 "TX status report missed for entry %d\n",
2510 entry_done->entry_idx);
2511
2512 txdesc.flags = 0;
2513 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
2514 txdesc.retry = 0;
2515
2516 rt2x00lib_txdone(entry_done, &txdesc);
2517 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
2518 }
2519
2520 /*
2521 * Obtain the status about this packet.
2522 */
2523 txdesc.flags = 0;
2524 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS))
2525 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
2526 else
2527 __set_bit(TXDONE_FAILURE, &txdesc.flags);
2528
2529 /*
2530 * Ralink has a retry mechanism using a global fallback
2531 * table. We setup this fallback table to try immediate
2532 * lower rate for all rates. In the TX_STA_FIFO,
2533 * the MCS field contains the MCS used for the successfull
2534 * transmission. If the first transmission succeed,
2535 * we have mcs == tx_mcs. On the second transmission,
2536 * we have mcs = tx_mcs - 1. So the number of
2537 * retry is (tx_mcs - mcs).
2538 */
2539 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
2540 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
2541 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
2542 txdesc.retry = mcs - min(mcs, real_mcs);
2543
2544 rt2x00lib_txdone(entry, &txdesc);
2545 }
2546}
2547
2548static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
2549{
2550 struct rt2x00_dev *rt2x00dev = dev_instance;
2551 u32 reg;
2552
2553 /* Read status and ACK all interrupts */
2554 rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
2555 rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
2556
2557 if (!reg)
2558 return IRQ_NONE;
2559
2560 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
2561 return IRQ_HANDLED;
2562
2563 /*
2564 * 1 - Rx ring done interrupt.
2565 */
2566 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
2567 rt2x00pci_rxdone(rt2x00dev);
2568
2569 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
2570 rt2800pci_txdone(rt2x00dev);
2571
2572 return IRQ_HANDLED;
2573}
2574
2575/*
2576 * Device probe functions.
2577 */
2578static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2579{
2580 u16 word;
2581 u8 *mac;
2582 u8 default_lna_gain;
2583
2584 /*
2585 * Read EEPROM into buffer
2586 */
2587 switch(rt2x00dev->chip.rt) {
2588 case RT2880:
2589 case RT3052:
2590 rt2800pci_read_eeprom_soc(rt2x00dev);
2591 break;
2592 case RT3090:
2593 rt2800pci_read_eeprom_efuse(rt2x00dev);
2594 break;
2595 default:
2596 rt2800pci_read_eeprom_pci(rt2x00dev);
2597 break;
2598 }
2599
2600 /*
2601 * Start validation of the data that has been read.
2602 */
2603 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
2604 if (!is_valid_ether_addr(mac)) {
2605 random_ether_addr(mac);
2606 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
2607 }
2608
2609 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
2610 if (word == 0xffff) {
2611 rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
2612 rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1);
2613 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
2614 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
2615 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
2616 } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
2617 /*
2618 * There is a max of 2 RX streams for RT2860 series
2619 */
2620 if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2)
2621 rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
2622 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
2623 }
2624
2625 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
2626 if (word == 0xffff) {
2627 rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0);
2628 rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0);
2629 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0);
2630 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
2631 rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
2632 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0);
2633 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0);
2634 rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
2635 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
2636 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
2637 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
2638 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
2639 }
2640
2641 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
2642 if ((word & 0x00ff) == 0x00ff) {
2643 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
2644 rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
2645 LED_MODE_TXRX_ACTIVITY);
2646 rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
2647 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
2648 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
2649 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
2650 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
2651 EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
2652 }
2653
2654 /*
2655 * During the LNA validation we are going to use
2656 * lna0 as correct value. Note that EEPROM_LNA
2657 * is never validated.
2658 */
2659 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
2660 default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
2661
2662 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
2663 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
2664 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
2665 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
2666 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
2667 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
2668
2669 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
2670 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
2671 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
2672 if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
2673 rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
2674 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
2675 default_lna_gain);
2676 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
2677
2678 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
2679 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
2680 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
2681 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
2682 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
2683 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
2684
2685 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
2686 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
2687 rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
2688 if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
2689 rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
2690 rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
2691 default_lna_gain);
2692 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
2693
2694 return 0;
2695}
2696
2697static int rt2800pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2698{
2699 u32 reg;
2700 u16 value;
2701 u16 eeprom;
2702
2703 /*
2704 * Read EEPROM word for configuration.
2705 */
2706 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
2707
2708 /*
2709 * Identify RF chipset.
2710 */
2711 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
2712 rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
2713 rt2x00_set_chip_rf(rt2x00dev, value, reg);
2714
2715 if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
2716 !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
2717 !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
2718 !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
2719 !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
2720 !rt2x00_rf(&rt2x00dev->chip, RF2020) &&
2721 !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
2722 !rt2x00_rf(&rt2x00dev->chip, RF3022)) {
2723 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
2724 return -ENODEV;
2725 }
2726
2727 /*
2728 * Identify default antenna configuration.
2729 */
2730 rt2x00dev->default_ant.tx =
2731 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH);
2732 rt2x00dev->default_ant.rx =
2733 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH);
2734
2735 /*
2736 * Read frequency offset and RF programming sequence.
2737 */
2738 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
2739 rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
2740
2741 /*
2742 * Read external LNA informations.
2743 */
2744 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
2745
2746 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
2747 __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
2748 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
2749 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
2750
2751 /*
2752 * Detect if this device has an hardware controlled radio.
2753 */
2754 if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO))
2755 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
2756
2757 /*
2758 * Store led settings, for correct led behaviour.
2759 */
2760#ifdef CONFIG_RT2X00_LIB_LEDS
2761 rt2800pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
2762 rt2800pci_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
2763 rt2800pci_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
2764
2765 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
2766#endif /* CONFIG_RT2X00_LIB_LEDS */
2767
2768 return 0;
2769}
2770
2771/*
2772 * RF value list for rt2860
2773 * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
2774 */
2775static const struct rf_channel rf_vals[] = {
2776 { 1, 0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b },
2777 { 2, 0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f },
2778 { 3, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b },
2779 { 4, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f },
2780 { 5, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b },
2781 { 6, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f },
2782 { 7, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b },
2783 { 8, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f },
2784 { 9, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b },
2785 { 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f },
2786 { 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b },
2787 { 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f },
2788 { 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b },
2789 { 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 },
2790
2791 /* 802.11 UNI / HyperLan 2 */
2792 { 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 },
2793 { 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 },
2794 { 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 },
2795 { 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 },
2796 { 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b },
2797 { 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b },
2798 { 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 },
2799 { 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 },
2800 { 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b },
2801 { 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 },
2802 { 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 },
2803 { 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 },
2804
2805 /* 802.11 HyperLan 2 */
2806 { 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 },
2807 { 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 },
2808 { 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 },
2809 { 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 },
2810 { 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 },
2811 { 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b },
2812 { 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 },
2813 { 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 },
2814 { 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 },
2815 { 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 },
2816 { 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b },
2817 { 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 },
2818 { 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b },
2819 { 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 },
2820 { 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b },
2821 { 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 },
2822
2823 /* 802.11 UNII */
2824 { 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 },
2825 { 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 },
2826 { 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f },
2827 { 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f },
2828 { 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 },
2829 { 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 },
2830 { 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 },
2831
2832 /* 802.11 Japan */
2833 { 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b },
2834 { 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 },
2835 { 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b },
2836 { 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 },
2837 { 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 },
2838 { 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b },
2839 { 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 },
2840};
2841
2842static int rt2800pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2843{
2844 struct hw_mode_spec *spec = &rt2x00dev->spec;
2845 struct channel_info *info;
2846 char *tx_power1;
2847 char *tx_power2;
2848 unsigned int i;
2849 u16 eeprom;
2850
2851 /*
2852 * Initialize all hw fields.
2853 */
2854 rt2x00dev->hw->flags =
2855 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2856 IEEE80211_HW_SIGNAL_DBM |
2857 IEEE80211_HW_SUPPORTS_PS |
2858 IEEE80211_HW_PS_NULLFUNC_STACK;
2859 rt2x00dev->hw->extra_tx_headroom = TXWI_DESC_SIZE;
2860
2861 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
2862 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
2863 rt2x00_eeprom_addr(rt2x00dev,
2864 EEPROM_MAC_ADDR_0));
2865
2866 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
2867
2868 /*
2869 * Initialize hw_mode information.
2870 */
2871 spec->supported_bands = SUPPORT_BAND_2GHZ;
2872 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2873
2874 if (rt2x00_rf(&rt2x00dev->chip, RF2820) ||
2875 rt2x00_rf(&rt2x00dev->chip, RF2720) ||
2876 rt2x00_rf(&rt2x00dev->chip, RF3020) ||
2877 rt2x00_rf(&rt2x00dev->chip, RF3021) ||
2878 rt2x00_rf(&rt2x00dev->chip, RF3022) ||
2879 rt2x00_rf(&rt2x00dev->chip, RF2020) ||
2880 rt2x00_rf(&rt2x00dev->chip, RF3052)) {
2881 spec->num_channels = 14;
2882 spec->channels = rf_vals;
2883 } else if (rt2x00_rf(&rt2x00dev->chip, RF2850) ||
2884 rt2x00_rf(&rt2x00dev->chip, RF2750)) {
2885 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2886 spec->num_channels = ARRAY_SIZE(rf_vals);
2887 spec->channels = rf_vals;
2888 }
2889
2890 /*
2891 * Initialize HT information.
2892 */
2893 spec->ht.ht_supported = true;
2894 spec->ht.cap =
2895 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
2896 IEEE80211_HT_CAP_GRN_FLD |
2897 IEEE80211_HT_CAP_SGI_20 |
2898 IEEE80211_HT_CAP_SGI_40 |
2899 IEEE80211_HT_CAP_TX_STBC |
2900 IEEE80211_HT_CAP_RX_STBC |
2901 IEEE80211_HT_CAP_PSMP_SUPPORT;
2902 spec->ht.ampdu_factor = 3;
2903 spec->ht.ampdu_density = 4;
2904 spec->ht.mcs.tx_params =
2905 IEEE80211_HT_MCS_TX_DEFINED |
2906 IEEE80211_HT_MCS_TX_RX_DIFF |
2907 ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) <<
2908 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
2909
2910 switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) {
2911 case 3:
2912 spec->ht.mcs.rx_mask[2] = 0xff;
2913 case 2:
2914 spec->ht.mcs.rx_mask[1] = 0xff;
2915 case 1:
2916 spec->ht.mcs.rx_mask[0] = 0xff;
2917 spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
2918 break;
2919 }
2920
2921 /*
2922 * Create channel information array
2923 */
2924 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
2925 if (!info)
2926 return -ENOMEM;
2927
2928 spec->channels_info = info;
2929
2930 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
2931 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
2932
2933 for (i = 0; i < 14; i++) {
2934 info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
2935 info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
2936 }
2937
2938 if (spec->num_channels > 14) {
2939 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
2940 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
2941
2942 for (i = 14; i < spec->num_channels; i++) {
2943 info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
2944 info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
2945 }
2946 }
2947
2948 return 0;
2949}
2950
2951static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2952{
2953 int retval;
2954
2955 /*
2956 * Allocate eeprom data.
2957 */
2958 retval = rt2800pci_validate_eeprom(rt2x00dev);
2959 if (retval)
2960 return retval;
2961
2962 retval = rt2800pci_init_eeprom(rt2x00dev);
2963 if (retval)
2964 return retval;
2965
2966 /*
2967 * Initialize hw specifications.
2968 */
2969 retval = rt2800pci_probe_hw_mode(rt2x00dev);
2970 if (retval)
2971 return retval;
2972
2973 /*
2974 * This device has multiple filters for control frames
2975 * and has a separate filter for PS Poll frames.
2976 */
2977 __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags);
2978 __set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags);
2979
2980 /*
2981 * This device requires firmware.
2982 */
2983 if (!rt2x00_rt(&rt2x00dev->chip, RT2880) &&
2984 !rt2x00_rt(&rt2x00dev->chip, RT3052))
2985 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
2986 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
2987 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
2988 if (!modparam_nohwcrypt)
2989 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
2990
2991 /*
2992 * Set the rssi offset.
2993 */
2994 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
2995
2996 return 0;
2997}
2998
2999/*
3000 * IEEE80211 stack callback functions.
3001 */
3002static void rt2800pci_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
3003 u32 *iv32, u16 *iv16)
3004{
3005 struct rt2x00_dev *rt2x00dev = hw->priv;
3006 struct mac_iveiv_entry iveiv_entry;
3007 u32 offset;
3008
3009 offset = MAC_IVEIV_ENTRY(hw_key_idx);
3010 rt2x00pci_register_multiread(rt2x00dev, offset,
3011 &iveiv_entry, sizeof(iveiv_entry));
3012
3013 memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
3014 memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
3015}
3016
3017static int rt2800pci_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3018{
3019 struct rt2x00_dev *rt2x00dev = hw->priv;
3020 u32 reg;
3021 bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
3022
3023 rt2x00pci_register_read(rt2x00dev, TX_RTS_CFG, &reg);
3024 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, value);
3025 rt2x00pci_register_write(rt2x00dev, TX_RTS_CFG, reg);
3026
3027 rt2x00pci_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
3028 rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, enabled);
3029 rt2x00pci_register_write(rt2x00dev, CCK_PROT_CFG, reg);
3030
3031 rt2x00pci_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
3032 rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, enabled);
3033 rt2x00pci_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
3034
3035 rt2x00pci_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
3036 rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, enabled);
3037 rt2x00pci_register_write(rt2x00dev, MM20_PROT_CFG, reg);
3038
3039 rt2x00pci_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
3040 rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, enabled);
3041 rt2x00pci_register_write(rt2x00dev, MM40_PROT_CFG, reg);
3042
3043 rt2x00pci_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
3044 rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, enabled);
3045 rt2x00pci_register_write(rt2x00dev, GF20_PROT_CFG, reg);
3046
3047 rt2x00pci_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
3048 rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, enabled);
3049 rt2x00pci_register_write(rt2x00dev, GF40_PROT_CFG, reg);
3050
3051 return 0;
3052}
3053
3054static int rt2800pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
3055 const struct ieee80211_tx_queue_params *params)
3056{
3057 struct rt2x00_dev *rt2x00dev = hw->priv;
3058 struct data_queue *queue;
3059 struct rt2x00_field32 field;
3060 int retval;
3061 u32 reg;
3062 u32 offset;
3063
3064 /*
3065 * First pass the configuration through rt2x00lib, that will
3066 * update the queue settings and validate the input. After that
3067 * we are free to update the registers based on the value
3068 * in the queue parameter.
3069 */
3070 retval = rt2x00mac_conf_tx(hw, queue_idx, params);
3071 if (retval)
3072 return retval;
3073
3074 /*
3075 * We only need to perform additional register initialization
3076 * for WMM queues/
3077 */
3078 if (queue_idx >= 4)
3079 return 0;
3080
3081 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
3082
3083 /* Update WMM TXOP register */
3084 offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
3085 field.bit_offset = (queue_idx & 1) * 16;
3086 field.bit_mask = 0xffff << field.bit_offset;
3087
3088 rt2x00pci_register_read(rt2x00dev, offset, &reg);
3089 rt2x00_set_field32(&reg, field, queue->txop);
3090 rt2x00pci_register_write(rt2x00dev, offset, reg);
3091
3092 /* Update WMM registers */
3093 field.bit_offset = queue_idx * 4;
3094 field.bit_mask = 0xf << field.bit_offset;
3095
3096 rt2x00pci_register_read(rt2x00dev, WMM_AIFSN_CFG, &reg);
3097 rt2x00_set_field32(&reg, field, queue->aifs);
3098 rt2x00pci_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
3099
3100 rt2x00pci_register_read(rt2x00dev, WMM_CWMIN_CFG, &reg);
3101 rt2x00_set_field32(&reg, field, queue->cw_min);
3102 rt2x00pci_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
3103
3104 rt2x00pci_register_read(rt2x00dev, WMM_CWMAX_CFG, &reg);
3105 rt2x00_set_field32(&reg, field, queue->cw_max);
3106 rt2x00pci_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
3107
3108 /* Update EDCA registers */
3109 offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
3110
3111 rt2x00pci_register_read(rt2x00dev, offset, &reg);
3112 rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop);
3113 rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs);
3114 rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min);
3115 rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max);
3116 rt2x00pci_register_write(rt2x00dev, offset, reg);
3117
3118 return 0;
3119}
3120
3121static u64 rt2800pci_get_tsf(struct ieee80211_hw *hw)
3122{
3123 struct rt2x00_dev *rt2x00dev = hw->priv;
3124 u64 tsf;
3125 u32 reg;
3126
3127 rt2x00pci_register_read(rt2x00dev, TSF_TIMER_DW1, &reg);
3128 tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
3129 rt2x00pci_register_read(rt2x00dev, TSF_TIMER_DW0, &reg);
3130 tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
3131
3132 return tsf;
3133}
3134
3135static const struct ieee80211_ops rt2800pci_mac80211_ops = {
3136 .tx = rt2x00mac_tx,
3137 .start = rt2x00mac_start,
3138 .stop = rt2x00mac_stop,
3139 .add_interface = rt2x00mac_add_interface,
3140 .remove_interface = rt2x00mac_remove_interface,
3141 .config = rt2x00mac_config,
3142 .configure_filter = rt2x00mac_configure_filter,
3143 .set_key = rt2x00mac_set_key,
3144 .get_stats = rt2x00mac_get_stats,
3145 .get_tkip_seq = rt2800pci_get_tkip_seq,
3146 .set_rts_threshold = rt2800pci_set_rts_threshold,
3147 .bss_info_changed = rt2x00mac_bss_info_changed,
3148 .conf_tx = rt2800pci_conf_tx,
3149 .get_tx_stats = rt2x00mac_get_tx_stats,
3150 .get_tsf = rt2800pci_get_tsf,
3151 .rfkill_poll = rt2x00mac_rfkill_poll,
3152};
3153
3154static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
3155 .irq_handler = rt2800pci_interrupt,
3156 .probe_hw = rt2800pci_probe_hw,
3157 .get_firmware_name = rt2800pci_get_firmware_name,
3158 .check_firmware = rt2800pci_check_firmware,
3159 .load_firmware = rt2800pci_load_firmware,
3160 .initialize = rt2x00pci_initialize,
3161 .uninitialize = rt2x00pci_uninitialize,
3162 .get_entry_state = rt2800pci_get_entry_state,
3163 .clear_entry = rt2800pci_clear_entry,
3164 .set_device_state = rt2800pci_set_device_state,
3165 .rfkill_poll = rt2800pci_rfkill_poll,
3166 .link_stats = rt2800pci_link_stats,
3167 .reset_tuner = rt2800pci_reset_tuner,
3168 .link_tuner = rt2800pci_link_tuner,
3169 .write_tx_desc = rt2800pci_write_tx_desc,
3170 .write_tx_data = rt2x00pci_write_tx_data,
3171 .write_beacon = rt2800pci_write_beacon,
3172 .kick_tx_queue = rt2800pci_kick_tx_queue,
3173 .kill_tx_queue = rt2800pci_kill_tx_queue,
3174 .fill_rxdone = rt2800pci_fill_rxdone,
3175 .config_shared_key = rt2800pci_config_shared_key,
3176 .config_pairwise_key = rt2800pci_config_pairwise_key,
3177 .config_filter = rt2800pci_config_filter,
3178 .config_intf = rt2800pci_config_intf,
3179 .config_erp = rt2800pci_config_erp,
3180 .config_ant = rt2800pci_config_ant,
3181 .config = rt2800pci_config,
3182};
3183
3184static const struct data_queue_desc rt2800pci_queue_rx = {
3185 .entry_num = RX_ENTRIES,
3186 .data_size = AGGREGATION_SIZE,
3187 .desc_size = RXD_DESC_SIZE,
3188 .priv_size = sizeof(struct queue_entry_priv_pci),
3189};
3190
3191static const struct data_queue_desc rt2800pci_queue_tx = {
3192 .entry_num = TX_ENTRIES,
3193 .data_size = AGGREGATION_SIZE,
3194 .desc_size = TXD_DESC_SIZE,
3195 .priv_size = sizeof(struct queue_entry_priv_pci),
3196};
3197
3198static const struct data_queue_desc rt2800pci_queue_bcn = {
3199 .entry_num = 8 * BEACON_ENTRIES,
3200 .data_size = 0, /* No DMA required for beacons */
3201 .desc_size = TXWI_DESC_SIZE,
3202 .priv_size = sizeof(struct queue_entry_priv_pci),
3203};
3204
3205static const struct rt2x00_ops rt2800pci_ops = {
3206 .name = KBUILD_MODNAME,
3207 .max_sta_intf = 1,
3208 .max_ap_intf = 8,
3209 .eeprom_size = EEPROM_SIZE,
3210 .rf_size = RF_SIZE,
3211 .tx_queues = NUM_TX_QUEUES,
3212 .rx = &rt2800pci_queue_rx,
3213 .tx = &rt2800pci_queue_tx,
3214 .bcn = &rt2800pci_queue_bcn,
3215 .lib = &rt2800pci_rt2x00_ops,
3216 .hw = &rt2800pci_mac80211_ops,
3217#ifdef CONFIG_RT2X00_LIB_DEBUGFS
3218 .debugfs = &rt2800pci_rt2x00debug,
3219#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
3220};
3221
3222/*
3223 * RT2800pci module information.
3224 */
3225static struct pci_device_id rt2800pci_device_table[] = {
3226 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
3227 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
3228 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
3229 { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
3230 { PCI_DEVICE(0x1432, 0x7738), PCI_DEVICE_DATA(&rt2800pci_ops) },
3231 { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
3232 { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
3233 { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
3234 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
3235 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
3236 { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
3237 { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
3238 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
3239 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
3240 { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
3241 { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
3242 { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
3243 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
3244 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
3245 { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
3246 { 0, }
3247};
3248
3249MODULE_AUTHOR(DRV_PROJECT);
3250MODULE_VERSION(DRV_VERSION);
3251MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
3252MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
3253#ifdef CONFIG_RT2800PCI_PCI
3254MODULE_FIRMWARE(FIRMWARE_RT2860);
3255MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
3256#endif /* CONFIG_RT2800PCI_PCI */
3257MODULE_LICENSE("GPL");
3258
3259#ifdef CONFIG_RT2800PCI_WISOC
3260#if defined(CONFIG_RALINK_RT288X)
3261__rt2x00soc_probe(RT2880, &rt2800pci_ops);
3262#elif defined(CONFIG_RALINK_RT305X)
3263__rt2x00soc_probe(RT3052, &rt2800pci_ops);
3264#endif
3265
3266static struct platform_driver rt2800soc_driver = {
3267 .driver = {
3268 .name = "rt2800_wmac",
3269 .owner = THIS_MODULE,
3270 .mod_name = KBUILD_MODNAME,
3271 },
3272 .probe = __rt2x00soc_probe,
3273 .remove = __devexit_p(rt2x00soc_remove),
3274 .suspend = rt2x00soc_suspend,
3275 .resume = rt2x00soc_resume,
3276};
3277#endif /* CONFIG_RT2800PCI_WISOC */
3278
3279#ifdef CONFIG_RT2800PCI_PCI
3280static struct pci_driver rt2800pci_driver = {
3281 .name = KBUILD_MODNAME,
3282 .id_table = rt2800pci_device_table,
3283 .probe = rt2x00pci_probe,
3284 .remove = __devexit_p(rt2x00pci_remove),
3285 .suspend = rt2x00pci_suspend,
3286 .resume = rt2x00pci_resume,
3287};
3288#endif /* CONFIG_RT2800PCI_PCI */
3289
3290static int __init rt2800pci_init(void)
3291{
3292 int ret = 0;
3293
3294#ifdef CONFIG_RT2800PCI_WISOC
3295 ret = platform_driver_register(&rt2800soc_driver);
3296 if (ret)
3297 return ret;
3298#endif
3299#ifdef CONFIG_RT2800PCI_PCI
3300 ret = pci_register_driver(&rt2800pci_driver);
3301 if (ret) {
3302#ifdef CONFIG_RT2800PCI_WISOC
3303 platform_driver_unregister(&rt2800soc_driver);
3304#endif
3305 return ret;
3306 }
3307#endif
3308
3309 return ret;
3310}
3311
3312static void __exit rt2800pci_exit(void)
3313{
3314#ifdef CONFIG_RT2800PCI_PCI
3315 pci_unregister_driver(&rt2800pci_driver);
3316#endif
3317#ifdef CONFIG_RT2800PCI_WISOC
3318 platform_driver_unregister(&rt2800soc_driver);
3319#endif
3320}
3321
3322module_init(rt2800pci_init);
3323module_exit(rt2800pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
new file mode 100644
index 000000000000..856908815221
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -0,0 +1,1960 @@
1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2800pci
23 Abstract: Data structures and registers for the rt2800pci module.
24 Supported chipsets: RT2800E & RT2800ED.
25 */
26
27#ifndef RT2800PCI_H
28#define RT2800PCI_H
29
30/*
31 * RF chip defines.
32 *
33 * RF2820 2.4G 2T3R
34 * RF2850 2.4G/5G 2T3R
35 * RF2720 2.4G 1T2R
36 * RF2750 2.4G/5G 1T2R
37 * RF3020 2.4G 1T1R
38 * RF2020 2.4G B/G
39 * RF3021 2.4G 1T2R
40 * RF3022 2.4G 2T2R
41 * RF3052 2.4G 2T2R
42 */
43#define RF2820 0x0001
44#define RF2850 0x0002
45#define RF2720 0x0003
46#define RF2750 0x0004
47#define RF3020 0x0005
48#define RF2020 0x0006
49#define RF3021 0x0007
50#define RF3022 0x0008
51#define RF3052 0x0009
52
53/*
54 * RT2860 version
55 */
56#define RT2860C_VERSION 0x28600100
57#define RT2860D_VERSION 0x28600101
58#define RT2880E_VERSION 0x28720200
59#define RT2883_VERSION 0x28830300
60#define RT3070_VERSION 0x30700200
61
62/*
63 * Signal information.
64 * Default offset is required for RSSI <-> dBm conversion.
65 */
66#define DEFAULT_RSSI_OFFSET 120 /* FIXME */
67
68/*
69 * Register layout information.
70 */
71#define CSR_REG_BASE 0x1000
72#define CSR_REG_SIZE 0x0800
73#define EEPROM_BASE 0x0000
74#define EEPROM_SIZE 0x0110
75#define BBP_BASE 0x0000
76#define BBP_SIZE 0x0080
77#define RF_BASE 0x0004
78#define RF_SIZE 0x0010
79
80/*
81 * Number of TX queues.
82 */
83#define NUM_TX_QUEUES 4
84
85/*
86 * PCI registers.
87 */
88
89/*
90 * E2PROM_CSR: EEPROM control register.
91 * RELOAD: Write 1 to reload eeprom content.
92 * TYPE: 0: 93c46, 1:93c66.
93 * LOAD_STATUS: 1:loading, 0:done.
94 */
95#define E2PROM_CSR 0x0004
96#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000001)
97#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000002)
98#define E2PROM_CSR_DATA_IN FIELD32(0x00000004)
99#define E2PROM_CSR_DATA_OUT FIELD32(0x00000008)
100#define E2PROM_CSR_TYPE FIELD32(0x00000030)
101#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040)
102#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
103
104/*
105 * INT_SOURCE_CSR: Interrupt source register.
106 * Write one to clear corresponding bit.
107 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
108 */
109#define INT_SOURCE_CSR 0x0200
110#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
111#define INT_SOURCE_CSR_TXDELAYINT FIELD32(0x00000002)
112#define INT_SOURCE_CSR_RX_DONE FIELD32(0x00000004)
113#define INT_SOURCE_CSR_AC0_DMA_DONE FIELD32(0x00000008)
114#define INT_SOURCE_CSR_AC1_DMA_DONE FIELD32(0x00000010)
115#define INT_SOURCE_CSR_AC2_DMA_DONE FIELD32(0x00000020)
116#define INT_SOURCE_CSR_AC3_DMA_DONE FIELD32(0x00000040)
117#define INT_SOURCE_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
118#define INT_SOURCE_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
119#define INT_SOURCE_CSR_MCU_COMMAND FIELD32(0x00000200)
120#define INT_SOURCE_CSR_RXTX_COHERENT FIELD32(0x00000400)
121#define INT_SOURCE_CSR_TBTT FIELD32(0x00000800)
122#define INT_SOURCE_CSR_PRE_TBTT FIELD32(0x00001000)
123#define INT_SOURCE_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
124#define INT_SOURCE_CSR_AUTO_WAKEUP FIELD32(0x00004000)
125#define INT_SOURCE_CSR_GPTIMER FIELD32(0x00008000)
126#define INT_SOURCE_CSR_RX_COHERENT FIELD32(0x00010000)
127#define INT_SOURCE_CSR_TX_COHERENT FIELD32(0x00020000)
128
129/*
130 * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF.
131 */
132#define INT_MASK_CSR 0x0204
133#define INT_MASK_CSR_RXDELAYINT FIELD32(0x00000001)
134#define INT_MASK_CSR_TXDELAYINT FIELD32(0x00000002)
135#define INT_MASK_CSR_RX_DONE FIELD32(0x00000004)
136#define INT_MASK_CSR_AC0_DMA_DONE FIELD32(0x00000008)
137#define INT_MASK_CSR_AC1_DMA_DONE FIELD32(0x00000010)
138#define INT_MASK_CSR_AC2_DMA_DONE FIELD32(0x00000020)
139#define INT_MASK_CSR_AC3_DMA_DONE FIELD32(0x00000040)
140#define INT_MASK_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
141#define INT_MASK_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
142#define INT_MASK_CSR_MCU_COMMAND FIELD32(0x00000200)
143#define INT_MASK_CSR_RXTX_COHERENT FIELD32(0x00000400)
144#define INT_MASK_CSR_TBTT FIELD32(0x00000800)
145#define INT_MASK_CSR_PRE_TBTT FIELD32(0x00001000)
146#define INT_MASK_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
147#define INT_MASK_CSR_AUTO_WAKEUP FIELD32(0x00004000)
148#define INT_MASK_CSR_GPTIMER FIELD32(0x00008000)
149#define INT_MASK_CSR_RX_COHERENT FIELD32(0x00010000)
150#define INT_MASK_CSR_TX_COHERENT FIELD32(0x00020000)
151
152/*
153 * WPDMA_GLO_CFG
154 */
155#define WPDMA_GLO_CFG 0x0208
156#define WPDMA_GLO_CFG_ENABLE_TX_DMA FIELD32(0x00000001)
157#define WPDMA_GLO_CFG_TX_DMA_BUSY FIELD32(0x00000002)
158#define WPDMA_GLO_CFG_ENABLE_RX_DMA FIELD32(0x00000004)
159#define WPDMA_GLO_CFG_RX_DMA_BUSY FIELD32(0x00000008)
160#define WPDMA_GLO_CFG_WP_DMA_BURST_SIZE FIELD32(0x00000030)
161#define WPDMA_GLO_CFG_TX_WRITEBACK_DONE FIELD32(0x00000040)
162#define WPDMA_GLO_CFG_BIG_ENDIAN FIELD32(0x00000080)
163#define WPDMA_GLO_CFG_RX_HDR_SCATTER FIELD32(0x0000ff00)
164#define WPDMA_GLO_CFG_HDR_SEG_LEN FIELD32(0xffff0000)
165
166/*
167 * WPDMA_RST_IDX
168 */
169#define WPDMA_RST_IDX 0x020c
170#define WPDMA_RST_IDX_DTX_IDX0 FIELD32(0x00000001)
171#define WPDMA_RST_IDX_DTX_IDX1 FIELD32(0x00000002)
172#define WPDMA_RST_IDX_DTX_IDX2 FIELD32(0x00000004)
173#define WPDMA_RST_IDX_DTX_IDX3 FIELD32(0x00000008)
174#define WPDMA_RST_IDX_DTX_IDX4 FIELD32(0x00000010)
175#define WPDMA_RST_IDX_DTX_IDX5 FIELD32(0x00000020)
176#define WPDMA_RST_IDX_DRX_IDX0 FIELD32(0x00010000)
177
178/*
179 * DELAY_INT_CFG
180 */
181#define DELAY_INT_CFG 0x0210
182#define DELAY_INT_CFG_RXMAX_PTIME FIELD32(0x000000ff)
183#define DELAY_INT_CFG_RXMAX_PINT FIELD32(0x00007f00)
184#define DELAY_INT_CFG_RXDLY_INT_EN FIELD32(0x00008000)
185#define DELAY_INT_CFG_TXMAX_PTIME FIELD32(0x00ff0000)
186#define DELAY_INT_CFG_TXMAX_PINT FIELD32(0x7f000000)
187#define DELAY_INT_CFG_TXDLY_INT_EN FIELD32(0x80000000)
188
189/*
190 * WMM_AIFSN_CFG: Aifsn for each EDCA AC
191 * AIFSN0: AC_BE
192 * AIFSN1: AC_BK
193 * AIFSN1: AC_VI
194 * AIFSN1: AC_VO
195 */
196#define WMM_AIFSN_CFG 0x0214
197#define WMM_AIFSN_CFG_AIFSN0 FIELD32(0x0000000f)
198#define WMM_AIFSN_CFG_AIFSN1 FIELD32(0x000000f0)
199#define WMM_AIFSN_CFG_AIFSN2 FIELD32(0x00000f00)
200#define WMM_AIFSN_CFG_AIFSN3 FIELD32(0x0000f000)
201
202/*
203 * WMM_CWMIN_CSR: CWmin for each EDCA AC
204 * CWMIN0: AC_BE
205 * CWMIN1: AC_BK
206 * CWMIN1: AC_VI
207 * CWMIN1: AC_VO
208 */
209#define WMM_CWMIN_CFG 0x0218
210#define WMM_CWMIN_CFG_CWMIN0 FIELD32(0x0000000f)
211#define WMM_CWMIN_CFG_CWMIN1 FIELD32(0x000000f0)
212#define WMM_CWMIN_CFG_CWMIN2 FIELD32(0x00000f00)
213#define WMM_CWMIN_CFG_CWMIN3 FIELD32(0x0000f000)
214
215/*
216 * WMM_CWMAX_CSR: CWmax for each EDCA AC
217 * CWMAX0: AC_BE
218 * CWMAX1: AC_BK
219 * CWMAX1: AC_VI
220 * CWMAX1: AC_VO
221 */
222#define WMM_CWMAX_CFG 0x021c
223#define WMM_CWMAX_CFG_CWMAX0 FIELD32(0x0000000f)
224#define WMM_CWMAX_CFG_CWMAX1 FIELD32(0x000000f0)
225#define WMM_CWMAX_CFG_CWMAX2 FIELD32(0x00000f00)
226#define WMM_CWMAX_CFG_CWMAX3 FIELD32(0x0000f000)
227
228/*
229 * AC_TXOP0: AC_BK/AC_BE TXOP register
230 * AC0TXOP: AC_BK in unit of 32us
231 * AC1TXOP: AC_BE in unit of 32us
232 */
233#define WMM_TXOP0_CFG 0x0220
234#define WMM_TXOP0_CFG_AC0TXOP FIELD32(0x0000ffff)
235#define WMM_TXOP0_CFG_AC1TXOP FIELD32(0xffff0000)
236
237/*
238 * AC_TXOP1: AC_VO/AC_VI TXOP register
239 * AC2TXOP: AC_VI in unit of 32us
240 * AC3TXOP: AC_VO in unit of 32us
241 */
242#define WMM_TXOP1_CFG 0x0224
243#define WMM_TXOP1_CFG_AC2TXOP FIELD32(0x0000ffff)
244#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000)
245
246/*
247 * GPIO_CTRL_CFG:
248 */
249#define GPIO_CTRL_CFG 0x0228
250#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
251#define GPIO_CTRL_CFG_BIT1 FIELD32(0x00000002)
252#define GPIO_CTRL_CFG_BIT2 FIELD32(0x00000004)
253#define GPIO_CTRL_CFG_BIT3 FIELD32(0x00000008)
254#define GPIO_CTRL_CFG_BIT4 FIELD32(0x00000010)
255#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
256#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
257#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
258#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100)
259
260/*
261 * MCU_CMD_CFG
262 */
263#define MCU_CMD_CFG 0x022c
264
265/*
266 * AC_BK register offsets
267 */
268#define TX_BASE_PTR0 0x0230
269#define TX_MAX_CNT0 0x0234
270#define TX_CTX_IDX0 0x0238
271#define TX_DTX_IDX0 0x023c
272
273/*
274 * AC_BE register offsets
275 */
276#define TX_BASE_PTR1 0x0240
277#define TX_MAX_CNT1 0x0244
278#define TX_CTX_IDX1 0x0248
279#define TX_DTX_IDX1 0x024c
280
281/*
282 * AC_VI register offsets
283 */
284#define TX_BASE_PTR2 0x0250
285#define TX_MAX_CNT2 0x0254
286#define TX_CTX_IDX2 0x0258
287#define TX_DTX_IDX2 0x025c
288
289/*
290 * AC_VO register offsets
291 */
292#define TX_BASE_PTR3 0x0260
293#define TX_MAX_CNT3 0x0264
294#define TX_CTX_IDX3 0x0268
295#define TX_DTX_IDX3 0x026c
296
297/*
298 * HCCA register offsets
299 */
300#define TX_BASE_PTR4 0x0270
301#define TX_MAX_CNT4 0x0274
302#define TX_CTX_IDX4 0x0278
303#define TX_DTX_IDX4 0x027c
304
305/*
306 * MGMT register offsets
307 */
308#define TX_BASE_PTR5 0x0280
309#define TX_MAX_CNT5 0x0284
310#define TX_CTX_IDX5 0x0288
311#define TX_DTX_IDX5 0x028c
312
313/*
314 * Queue register offset macros
315 */
316#define TX_QUEUE_REG_OFFSET 0x10
317#define TX_BASE_PTR(__x) TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET)
318#define TX_MAX_CNT(__x) TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET)
319#define TX_CTX_IDX(__x) TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
320#define TX_DTX_IDX(__x) TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
321
322/*
323 * RX register offsets
324 */
325#define RX_BASE_PTR 0x0290
326#define RX_MAX_CNT 0x0294
327#define RX_CRX_IDX 0x0298
328#define RX_DRX_IDX 0x029c
329
330/*
331 * PBF_SYS_CTRL
332 * HOST_RAM_WRITE: enable Host program ram write selection
333 */
334#define PBF_SYS_CTRL 0x0400
335#define PBF_SYS_CTRL_READY FIELD32(0x00000080)
336#define PBF_SYS_CTRL_HOST_RAM_WRITE FIELD32(0x00010000)
337
338/*
339 * HOST-MCU shared memory
340 */
341#define HOST_CMD_CSR 0x0404
342#define HOST_CMD_CSR_HOST_COMMAND FIELD32(0x000000ff)
343
344/*
345 * PBF registers
346 * Most are for debug. Driver doesn't touch PBF register.
347 */
348#define PBF_CFG 0x0408
349#define PBF_MAX_PCNT 0x040c
350#define PBF_CTRL 0x0410
351#define PBF_INT_STA 0x0414
352#define PBF_INT_ENA 0x0418
353
354/*
355 * BCN_OFFSET0:
356 */
357#define BCN_OFFSET0 0x042c
358#define BCN_OFFSET0_BCN0 FIELD32(0x000000ff)
359#define BCN_OFFSET0_BCN1 FIELD32(0x0000ff00)
360#define BCN_OFFSET0_BCN2 FIELD32(0x00ff0000)
361#define BCN_OFFSET0_BCN3 FIELD32(0xff000000)
362
363/*
364 * BCN_OFFSET1:
365 */
366#define BCN_OFFSET1 0x0430
367#define BCN_OFFSET1_BCN4 FIELD32(0x000000ff)
368#define BCN_OFFSET1_BCN5 FIELD32(0x0000ff00)
369#define BCN_OFFSET1_BCN6 FIELD32(0x00ff0000)
370#define BCN_OFFSET1_BCN7 FIELD32(0xff000000)
371
372/*
373 * PBF registers
374 * Most are for debug. Driver doesn't touch PBF register.
375 */
376#define TXRXQ_PCNT 0x0438
377#define PBF_DBG 0x043c
378
379/*
380 * RF registers
381 */
382#define RF_CSR_CFG 0x0500
383#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
384#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00)
385#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
386#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
387
388/*
389 * EFUSE_CSR: RT3090 EEPROM
390 */
391#define EFUSE_CTRL 0x0580
392#define EFUSE_CTRL_ADDRESS_IN FIELD32(0x03fe0000)
393#define EFUSE_CTRL_MODE FIELD32(0x000000c0)
394#define EFUSE_CTRL_KICK FIELD32(0x40000000)
395
396/*
397 * EFUSE_DATA0
398 */
399#define EFUSE_DATA0 0x0590
400
401/*
402 * EFUSE_DATA1
403 */
404#define EFUSE_DATA1 0x0594
405
406/*
407 * EFUSE_DATA2
408 */
409#define EFUSE_DATA2 0x0598
410
411/*
412 * EFUSE_DATA3
413 */
414#define EFUSE_DATA3 0x059c
415
416/*
417 * MAC Control/Status Registers(CSR).
418 * Some values are set in TU, whereas 1 TU == 1024 us.
419 */
420
421/*
422 * MAC_CSR0: ASIC revision number.
423 * ASIC_REV: 0
424 * ASIC_VER: 2860
425 */
426#define MAC_CSR0 0x1000
427#define MAC_CSR0_ASIC_REV FIELD32(0x0000ffff)
428#define MAC_CSR0_ASIC_VER FIELD32(0xffff0000)
429
430/*
431 * MAC_SYS_CTRL:
432 */
433#define MAC_SYS_CTRL 0x1004
434#define MAC_SYS_CTRL_RESET_CSR FIELD32(0x00000001)
435#define MAC_SYS_CTRL_RESET_BBP FIELD32(0x00000002)
436#define MAC_SYS_CTRL_ENABLE_TX FIELD32(0x00000004)
437#define MAC_SYS_CTRL_ENABLE_RX FIELD32(0x00000008)
438#define MAC_SYS_CTRL_CONTINUOUS_TX FIELD32(0x00000010)
439#define MAC_SYS_CTRL_LOOPBACK FIELD32(0x00000020)
440#define MAC_SYS_CTRL_WLAN_HALT FIELD32(0x00000040)
441#define MAC_SYS_CTRL_RX_TIMESTAMP FIELD32(0x00000080)
442
443/*
444 * MAC_ADDR_DW0: STA MAC register 0
445 */
446#define MAC_ADDR_DW0 0x1008
447#define MAC_ADDR_DW0_BYTE0 FIELD32(0x000000ff)
448#define MAC_ADDR_DW0_BYTE1 FIELD32(0x0000ff00)
449#define MAC_ADDR_DW0_BYTE2 FIELD32(0x00ff0000)
450#define MAC_ADDR_DW0_BYTE3 FIELD32(0xff000000)
451
452/*
453 * MAC_ADDR_DW1: STA MAC register 1
454 * UNICAST_TO_ME_MASK:
455 * Used to mask off bits from byte 5 of the MAC address
456 * to determine the UNICAST_TO_ME bit for RX frames.
457 * The full mask is complemented by BSS_ID_MASK:
458 * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
459 */
460#define MAC_ADDR_DW1 0x100c
461#define MAC_ADDR_DW1_BYTE4 FIELD32(0x000000ff)
462#define MAC_ADDR_DW1_BYTE5 FIELD32(0x0000ff00)
463#define MAC_ADDR_DW1_UNICAST_TO_ME_MASK FIELD32(0x00ff0000)
464
465/*
466 * MAC_BSSID_DW0: BSSID register 0
467 */
468#define MAC_BSSID_DW0 0x1010
469#define MAC_BSSID_DW0_BYTE0 FIELD32(0x000000ff)
470#define MAC_BSSID_DW0_BYTE1 FIELD32(0x0000ff00)
471#define MAC_BSSID_DW0_BYTE2 FIELD32(0x00ff0000)
472#define MAC_BSSID_DW0_BYTE3 FIELD32(0xff000000)
473
474/*
475 * MAC_BSSID_DW1: BSSID register 1
476 * BSS_ID_MASK:
477 * 0: 1-BSSID mode (BSS index = 0)
478 * 1: 2-BSSID mode (BSS index: Byte5, bit 0)
479 * 2: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
480 * 3: 8-BSSID mode (BSS index: byte5, bit 0 - 2)
481 * This mask is used to mask off bits 0, 1 and 2 of byte 5 of the
482 * BSSID. This will make sure that those bits will be ignored
483 * when determining the MY_BSS of RX frames.
484 */
485#define MAC_BSSID_DW1 0x1014
486#define MAC_BSSID_DW1_BYTE4 FIELD32(0x000000ff)
487#define MAC_BSSID_DW1_BYTE5 FIELD32(0x0000ff00)
488#define MAC_BSSID_DW1_BSS_ID_MASK FIELD32(0x00030000)
489#define MAC_BSSID_DW1_BSS_BCN_NUM FIELD32(0x001c0000)
490
491/*
492 * MAX_LEN_CFG: Maximum frame length register.
493 * MAX_MPDU: rt2860b max 16k bytes
494 * MAX_PSDU: Maximum PSDU length
495 * (power factor) 0:2^13, 1:2^14, 2:2^15, 3:2^16
496 */
497#define MAX_LEN_CFG 0x1018
498#define MAX_LEN_CFG_MAX_MPDU FIELD32(0x00000fff)
499#define MAX_LEN_CFG_MAX_PSDU FIELD32(0x00003000)
500#define MAX_LEN_CFG_MIN_PSDU FIELD32(0x0000c000)
501#define MAX_LEN_CFG_MIN_MPDU FIELD32(0x000f0000)
502
503/*
504 * BBP_CSR_CFG: BBP serial control register
505 * VALUE: Register value to program into BBP
506 * REG_NUM: Selected BBP register
507 * READ_CONTROL: 0 write BBP, 1 read BBP
508 * BUSY: ASIC is busy executing BBP commands
509 * BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
510 * BBP_RW_MODE: 0 serial, 1 paralell
511 */
512#define BBP_CSR_CFG 0x101c
513#define BBP_CSR_CFG_VALUE FIELD32(0x000000ff)
514#define BBP_CSR_CFG_REGNUM FIELD32(0x0000ff00)
515#define BBP_CSR_CFG_READ_CONTROL FIELD32(0x00010000)
516#define BBP_CSR_CFG_BUSY FIELD32(0x00020000)
517#define BBP_CSR_CFG_BBP_PAR_DUR FIELD32(0x00040000)
518#define BBP_CSR_CFG_BBP_RW_MODE FIELD32(0x00080000)
519
520/*
521 * RF_CSR_CFG0: RF control register
522 * REGID_AND_VALUE: Register value to program into RF
523 * BITWIDTH: Selected RF register
524 * STANDBYMODE: 0 high when standby, 1 low when standby
525 * SEL: 0 RF_LE0 activate, 1 RF_LE1 activate
526 * BUSY: ASIC is busy executing RF commands
527 */
528#define RF_CSR_CFG0 0x1020
529#define RF_CSR_CFG0_REGID_AND_VALUE FIELD32(0x00ffffff)
530#define RF_CSR_CFG0_BITWIDTH FIELD32(0x1f000000)
531#define RF_CSR_CFG0_REG_VALUE_BW FIELD32(0x1fffffff)
532#define RF_CSR_CFG0_STANDBYMODE FIELD32(0x20000000)
533#define RF_CSR_CFG0_SEL FIELD32(0x40000000)
534#define RF_CSR_CFG0_BUSY FIELD32(0x80000000)
535
536/*
537 * RF_CSR_CFG1: RF control register
538 * REGID_AND_VALUE: Register value to program into RF
539 * RFGAP: Gap between BB_CONTROL_RF and RF_LE
540 * 0: 3 system clock cycle (37.5usec)
541 * 1: 5 system clock cycle (62.5usec)
542 */
543#define RF_CSR_CFG1 0x1024
544#define RF_CSR_CFG1_REGID_AND_VALUE FIELD32(0x00ffffff)
545#define RF_CSR_CFG1_RFGAP FIELD32(0x1f000000)
546
547/*
548 * RF_CSR_CFG2: RF control register
549 * VALUE: Register value to program into RF
550 * RFGAP: Gap between BB_CONTROL_RF and RF_LE
551 * 0: 3 system clock cycle (37.5usec)
552 * 1: 5 system clock cycle (62.5usec)
553 */
554#define RF_CSR_CFG2 0x1028
555#define RF_CSR_CFG2_VALUE FIELD32(0x00ffffff)
556
557/*
558 * LED_CFG: LED control
559 * color LED's:
560 * 0: off
561 * 1: blinking upon TX2
562 * 2: periodic slow blinking
563 * 3: always on
564 * LED polarity:
565 * 0: active low
566 * 1: active high
567 */
568#define LED_CFG 0x102c
569#define LED_CFG_ON_PERIOD FIELD32(0x000000ff)
570#define LED_CFG_OFF_PERIOD FIELD32(0x0000ff00)
571#define LED_CFG_SLOW_BLINK_PERIOD FIELD32(0x003f0000)
572#define LED_CFG_R_LED_MODE FIELD32(0x03000000)
573#define LED_CFG_G_LED_MODE FIELD32(0x0c000000)
574#define LED_CFG_Y_LED_MODE FIELD32(0x30000000)
575#define LED_CFG_LED_POLAR FIELD32(0x40000000)
576
577/*
578 * XIFS_TIME_CFG: MAC timing
579 * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
580 * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
581 * OFDM_XIFS_TIME: unit 1us. Applied after OFDM RX
582 * when MAC doesn't reference BBP signal BBRXEND
583 * EIFS: unit 1us
584 * BB_RXEND_ENABLE: reference RXEND signal to begin XIFS defer
585 *
586 */
587#define XIFS_TIME_CFG 0x1100
588#define XIFS_TIME_CFG_CCKM_SIFS_TIME FIELD32(0x000000ff)
589#define XIFS_TIME_CFG_OFDM_SIFS_TIME FIELD32(0x0000ff00)
590#define XIFS_TIME_CFG_OFDM_XIFS_TIME FIELD32(0x000f0000)
591#define XIFS_TIME_CFG_EIFS FIELD32(0x1ff00000)
592#define XIFS_TIME_CFG_BB_RXEND_ENABLE FIELD32(0x20000000)
593
594/*
595 * BKOFF_SLOT_CFG:
596 */
597#define BKOFF_SLOT_CFG 0x1104
598#define BKOFF_SLOT_CFG_SLOT_TIME FIELD32(0x000000ff)
599#define BKOFF_SLOT_CFG_CC_DELAY_TIME FIELD32(0x0000ff00)
600
601/*
602 * NAV_TIME_CFG:
603 */
604#define NAV_TIME_CFG 0x1108
605#define NAV_TIME_CFG_SIFS FIELD32(0x000000ff)
606#define NAV_TIME_CFG_SLOT_TIME FIELD32(0x0000ff00)
607#define NAV_TIME_CFG_EIFS FIELD32(0x01ff0000)
608#define NAV_TIME_ZERO_SIFS FIELD32(0x02000000)
609
610/*
611 * CH_TIME_CFG: count as channel busy
612 */
613#define CH_TIME_CFG 0x110c
614
615/*
616 * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
617 */
618#define PBF_LIFE_TIMER 0x1110
619
620/*
621 * BCN_TIME_CFG:
622 * BEACON_INTERVAL: in unit of 1/16 TU
623 * TSF_TICKING: Enable TSF auto counting
624 * TSF_SYNC: Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode
625 * BEACON_GEN: Enable beacon generator
626 */
627#define BCN_TIME_CFG 0x1114
628#define BCN_TIME_CFG_BEACON_INTERVAL FIELD32(0x0000ffff)
629#define BCN_TIME_CFG_TSF_TICKING FIELD32(0x00010000)
630#define BCN_TIME_CFG_TSF_SYNC FIELD32(0x00060000)
631#define BCN_TIME_CFG_TBTT_ENABLE FIELD32(0x00080000)
632#define BCN_TIME_CFG_BEACON_GEN FIELD32(0x00100000)
633#define BCN_TIME_CFG_TX_TIME_COMPENSATE FIELD32(0xf0000000)
634
635/*
636 * TBTT_SYNC_CFG:
637 */
638#define TBTT_SYNC_CFG 0x1118
639
640/*
641 * TSF_TIMER_DW0: Local lsb TSF timer, read-only
642 */
643#define TSF_TIMER_DW0 0x111c
644#define TSF_TIMER_DW0_LOW_WORD FIELD32(0xffffffff)
645
646/*
647 * TSF_TIMER_DW1: Local msb TSF timer, read-only
648 */
649#define TSF_TIMER_DW1 0x1120
650#define TSF_TIMER_DW1_HIGH_WORD FIELD32(0xffffffff)
651
652/*
653 * TBTT_TIMER: TImer remains till next TBTT, read-only
654 */
655#define TBTT_TIMER 0x1124
656
657/*
658 * INT_TIMER_CFG:
659 */
660#define INT_TIMER_CFG 0x1128
661
662/*
663 * INT_TIMER_EN: GP-timer and pre-tbtt Int enable
664 */
665#define INT_TIMER_EN 0x112c
666
667/*
668 * CH_IDLE_STA: channel idle time
669 */
670#define CH_IDLE_STA 0x1130
671
672/*
673 * CH_BUSY_STA: channel busy time
674 */
675#define CH_BUSY_STA 0x1134
676
677/*
678 * MAC_STATUS_CFG:
679 * BBP_RF_BUSY: When set to 0, BBP and RF are stable.
680 * if 1 or higher one of the 2 registers is busy.
681 */
682#define MAC_STATUS_CFG 0x1200
683#define MAC_STATUS_CFG_BBP_RF_BUSY FIELD32(0x00000003)
684
685/*
686 * PWR_PIN_CFG:
687 */
688#define PWR_PIN_CFG 0x1204
689
690/*
691 * AUTOWAKEUP_CFG: Manual power control / status register
692 * TBCN_BEFORE_WAKE: ForceWake has high privilege than PutToSleep when both set
693 * AUTOWAKE: 0:sleep, 1:awake
694 */
695#define AUTOWAKEUP_CFG 0x1208
696#define AUTOWAKEUP_CFG_AUTO_LEAD_TIME FIELD32(0x000000ff)
697#define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE FIELD32(0x00007f00)
698#define AUTOWAKEUP_CFG_AUTOWAKE FIELD32(0x00008000)
699
700/*
701 * EDCA_AC0_CFG:
702 */
703#define EDCA_AC0_CFG 0x1300
704#define EDCA_AC0_CFG_TX_OP FIELD32(0x000000ff)
705#define EDCA_AC0_CFG_AIFSN FIELD32(0x00000f00)
706#define EDCA_AC0_CFG_CWMIN FIELD32(0x0000f000)
707#define EDCA_AC0_CFG_CWMAX FIELD32(0x000f0000)
708
709/*
710 * EDCA_AC1_CFG:
711 */
712#define EDCA_AC1_CFG 0x1304
713#define EDCA_AC1_CFG_TX_OP FIELD32(0x000000ff)
714#define EDCA_AC1_CFG_AIFSN FIELD32(0x00000f00)
715#define EDCA_AC1_CFG_CWMIN FIELD32(0x0000f000)
716#define EDCA_AC1_CFG_CWMAX FIELD32(0x000f0000)
717
718/*
719 * EDCA_AC2_CFG:
720 */
721#define EDCA_AC2_CFG 0x1308
722#define EDCA_AC2_CFG_TX_OP FIELD32(0x000000ff)
723#define EDCA_AC2_CFG_AIFSN FIELD32(0x00000f00)
724#define EDCA_AC2_CFG_CWMIN FIELD32(0x0000f000)
725#define EDCA_AC2_CFG_CWMAX FIELD32(0x000f0000)
726
727/*
728 * EDCA_AC3_CFG:
729 */
730#define EDCA_AC3_CFG 0x130c
731#define EDCA_AC3_CFG_TX_OP FIELD32(0x000000ff)
732#define EDCA_AC3_CFG_AIFSN FIELD32(0x00000f00)
733#define EDCA_AC3_CFG_CWMIN FIELD32(0x0000f000)
734#define EDCA_AC3_CFG_CWMAX FIELD32(0x000f0000)
735
736/*
737 * EDCA_TID_AC_MAP:
738 */
739#define EDCA_TID_AC_MAP 0x1310
740
741/*
742 * TX_PWR_CFG_0:
743 */
744#define TX_PWR_CFG_0 0x1314
745#define TX_PWR_CFG_0_1MBS FIELD32(0x0000000f)
746#define TX_PWR_CFG_0_2MBS FIELD32(0x000000f0)
747#define TX_PWR_CFG_0_55MBS FIELD32(0x00000f00)
748#define TX_PWR_CFG_0_11MBS FIELD32(0x0000f000)
749#define TX_PWR_CFG_0_6MBS FIELD32(0x000f0000)
750#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000)
751#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000)
752#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000)
753
754/*
755 * TX_PWR_CFG_1:
756 */
757#define TX_PWR_CFG_1 0x1318
758#define TX_PWR_CFG_1_24MBS FIELD32(0x0000000f)
759#define TX_PWR_CFG_1_36MBS FIELD32(0x000000f0)
760#define TX_PWR_CFG_1_48MBS FIELD32(0x00000f00)
761#define TX_PWR_CFG_1_54MBS FIELD32(0x0000f000)
762#define TX_PWR_CFG_1_MCS0 FIELD32(0x000f0000)
763#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000)
764#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000)
765#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000)
766
767/*
768 * TX_PWR_CFG_2:
769 */
770#define TX_PWR_CFG_2 0x131c
771#define TX_PWR_CFG_2_MCS4 FIELD32(0x0000000f)
772#define TX_PWR_CFG_2_MCS5 FIELD32(0x000000f0)
773#define TX_PWR_CFG_2_MCS6 FIELD32(0x00000f00)
774#define TX_PWR_CFG_2_MCS7 FIELD32(0x0000f000)
775#define TX_PWR_CFG_2_MCS8 FIELD32(0x000f0000)
776#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000)
777#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000)
778#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000)
779
780/*
781 * TX_PWR_CFG_3:
782 */
783#define TX_PWR_CFG_3 0x1320
784#define TX_PWR_CFG_3_MCS12 FIELD32(0x0000000f)
785#define TX_PWR_CFG_3_MCS13 FIELD32(0x000000f0)
786#define TX_PWR_CFG_3_MCS14 FIELD32(0x00000f00)
787#define TX_PWR_CFG_3_MCS15 FIELD32(0x0000f000)
788#define TX_PWR_CFG_3_UKNOWN1 FIELD32(0x000f0000)
789#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000)
790#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000)
791#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000)
792
793/*
794 * TX_PWR_CFG_4:
795 */
796#define TX_PWR_CFG_4 0x1324
797#define TX_PWR_CFG_4_UKNOWN5 FIELD32(0x0000000f)
798#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0)
799#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
800#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
801
802/*
803 * TX_PIN_CFG:
804 */
805#define TX_PIN_CFG 0x1328
806#define TX_PIN_CFG_PA_PE_A0_EN FIELD32(0x00000001)
807#define TX_PIN_CFG_PA_PE_G0_EN FIELD32(0x00000002)
808#define TX_PIN_CFG_PA_PE_A1_EN FIELD32(0x00000004)
809#define TX_PIN_CFG_PA_PE_G1_EN FIELD32(0x00000008)
810#define TX_PIN_CFG_PA_PE_A0_POL FIELD32(0x00000010)
811#define TX_PIN_CFG_PA_PE_G0_POL FIELD32(0x00000020)
812#define TX_PIN_CFG_PA_PE_A1_POL FIELD32(0x00000040)
813#define TX_PIN_CFG_PA_PE_G1_POL FIELD32(0x00000080)
814#define TX_PIN_CFG_LNA_PE_A0_EN FIELD32(0x00000100)
815#define TX_PIN_CFG_LNA_PE_G0_EN FIELD32(0x00000200)
816#define TX_PIN_CFG_LNA_PE_A1_EN FIELD32(0x00000400)
817#define TX_PIN_CFG_LNA_PE_G1_EN FIELD32(0x00000800)
818#define TX_PIN_CFG_LNA_PE_A0_POL FIELD32(0x00001000)
819#define TX_PIN_CFG_LNA_PE_G0_POL FIELD32(0x00002000)
820#define TX_PIN_CFG_LNA_PE_A1_POL FIELD32(0x00004000)
821#define TX_PIN_CFG_LNA_PE_G1_POL FIELD32(0x00008000)
822#define TX_PIN_CFG_RFTR_EN FIELD32(0x00010000)
823#define TX_PIN_CFG_RFTR_POL FIELD32(0x00020000)
824#define TX_PIN_CFG_TRSW_EN FIELD32(0x00040000)
825#define TX_PIN_CFG_TRSW_POL FIELD32(0x00080000)
826
827/*
828 * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
829 */
830#define TX_BAND_CFG 0x132c
831#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001)
832#define TX_BAND_CFG_A FIELD32(0x00000002)
833#define TX_BAND_CFG_BG FIELD32(0x00000004)
834
835/*
836 * TX_SW_CFG0:
837 */
838#define TX_SW_CFG0 0x1330
839
840/*
841 * TX_SW_CFG1:
842 */
843#define TX_SW_CFG1 0x1334
844
845/*
846 * TX_SW_CFG2:
847 */
848#define TX_SW_CFG2 0x1338
849
850/*
851 * TXOP_THRES_CFG:
852 */
853#define TXOP_THRES_CFG 0x133c
854
855/*
856 * TXOP_CTRL_CFG:
857 */
858#define TXOP_CTRL_CFG 0x1340
859
860/*
861 * TX_RTS_CFG:
862 * RTS_THRES: unit:byte
863 * RTS_FBK_EN: enable rts rate fallback
864 */
865#define TX_RTS_CFG 0x1344
866#define TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT FIELD32(0x000000ff)
867#define TX_RTS_CFG_RTS_THRES FIELD32(0x00ffff00)
868#define TX_RTS_CFG_RTS_FBK_EN FIELD32(0x01000000)
869
870/*
871 * TX_TIMEOUT_CFG:
872 * MPDU_LIFETIME: expiration time = 2^(9+MPDU LIFE TIME) us
873 * RX_ACK_TIMEOUT: unit:slot. Used for TX procedure
874 * TX_OP_TIMEOUT: TXOP timeout value for TXOP truncation.
875 * it is recommended that:
876 * (SLOT_TIME) > (TX_OP_TIMEOUT) > (RX_ACK_TIMEOUT)
877 */
878#define TX_TIMEOUT_CFG 0x1348
879#define TX_TIMEOUT_CFG_MPDU_LIFETIME FIELD32(0x000000f0)
880#define TX_TIMEOUT_CFG_RX_ACK_TIMEOUT FIELD32(0x0000ff00)
881#define TX_TIMEOUT_CFG_TX_OP_TIMEOUT FIELD32(0x00ff0000)
882
883/*
884 * TX_RTY_CFG:
885 * SHORT_RTY_LIMIT: short retry limit
886 * LONG_RTY_LIMIT: long retry limit
887 * LONG_RTY_THRE: Long retry threshoold
888 * NON_AGG_RTY_MODE: Non-Aggregate MPDU retry mode
889 * 0:expired by retry limit, 1: expired by mpdu life timer
890 * AGG_RTY_MODE: Aggregate MPDU retry mode
891 * 0:expired by retry limit, 1: expired by mpdu life timer
892 * TX_AUTO_FB_ENABLE: Tx retry PHY rate auto fallback enable
893 */
894#define TX_RTY_CFG 0x134c
895#define TX_RTY_CFG_SHORT_RTY_LIMIT FIELD32(0x000000ff)
896#define TX_RTY_CFG_LONG_RTY_LIMIT FIELD32(0x0000ff00)
897#define TX_RTY_CFG_LONG_RTY_THRE FIELD32(0x0fff0000)
898#define TX_RTY_CFG_NON_AGG_RTY_MODE FIELD32(0x10000000)
899#define TX_RTY_CFG_AGG_RTY_MODE FIELD32(0x20000000)
900#define TX_RTY_CFG_TX_AUTO_FB_ENABLE FIELD32(0x40000000)
901
902/*
903 * TX_LINK_CFG:
904 * REMOTE_MFB_LIFETIME: remote MFB life time. unit: 32us
905 * MFB_ENABLE: TX apply remote MFB 1:enable
906 * REMOTE_UMFS_ENABLE: remote unsolicit MFB enable
907 * 0: not apply remote remote unsolicit (MFS=7)
908 * TX_MRQ_EN: MCS request TX enable
909 * TX_RDG_EN: RDG TX enable
910 * TX_CF_ACK_EN: Piggyback CF-ACK enable
911 * REMOTE_MFB: remote MCS feedback
912 * REMOTE_MFS: remote MCS feedback sequence number
913 */
914#define TX_LINK_CFG 0x1350
915#define TX_LINK_CFG_REMOTE_MFB_LIFETIME FIELD32(0x000000ff)
916#define TX_LINK_CFG_MFB_ENABLE FIELD32(0x00000100)
917#define TX_LINK_CFG_REMOTE_UMFS_ENABLE FIELD32(0x00000200)
918#define TX_LINK_CFG_TX_MRQ_EN FIELD32(0x00000400)
919#define TX_LINK_CFG_TX_RDG_EN FIELD32(0x00000800)
920#define TX_LINK_CFG_TX_CF_ACK_EN FIELD32(0x00001000)
921#define TX_LINK_CFG_REMOTE_MFB FIELD32(0x00ff0000)
922#define TX_LINK_CFG_REMOTE_MFS FIELD32(0xff000000)
923
924/*
925 * HT_FBK_CFG0:
926 */
927#define HT_FBK_CFG0 0x1354
928#define HT_FBK_CFG0_HTMCS0FBK FIELD32(0x0000000f)
929#define HT_FBK_CFG0_HTMCS1FBK FIELD32(0x000000f0)
930#define HT_FBK_CFG0_HTMCS2FBK FIELD32(0x00000f00)
931#define HT_FBK_CFG0_HTMCS3FBK FIELD32(0x0000f000)
932#define HT_FBK_CFG0_HTMCS4FBK FIELD32(0x000f0000)
933#define HT_FBK_CFG0_HTMCS5FBK FIELD32(0x00f00000)
934#define HT_FBK_CFG0_HTMCS6FBK FIELD32(0x0f000000)
935#define HT_FBK_CFG0_HTMCS7FBK FIELD32(0xf0000000)
936
937/*
938 * HT_FBK_CFG1:
939 */
940#define HT_FBK_CFG1 0x1358
941#define HT_FBK_CFG1_HTMCS8FBK FIELD32(0x0000000f)
942#define HT_FBK_CFG1_HTMCS9FBK FIELD32(0x000000f0)
943#define HT_FBK_CFG1_HTMCS10FBK FIELD32(0x00000f00)
944#define HT_FBK_CFG1_HTMCS11FBK FIELD32(0x0000f000)
945#define HT_FBK_CFG1_HTMCS12FBK FIELD32(0x000f0000)
946#define HT_FBK_CFG1_HTMCS13FBK FIELD32(0x00f00000)
947#define HT_FBK_CFG1_HTMCS14FBK FIELD32(0x0f000000)
948#define HT_FBK_CFG1_HTMCS15FBK FIELD32(0xf0000000)
949
950/*
951 * LG_FBK_CFG0:
952 */
953#define LG_FBK_CFG0 0x135c
954#define LG_FBK_CFG0_OFDMMCS0FBK FIELD32(0x0000000f)
955#define LG_FBK_CFG0_OFDMMCS1FBK FIELD32(0x000000f0)
956#define LG_FBK_CFG0_OFDMMCS2FBK FIELD32(0x00000f00)
957#define LG_FBK_CFG0_OFDMMCS3FBK FIELD32(0x0000f000)
958#define LG_FBK_CFG0_OFDMMCS4FBK FIELD32(0x000f0000)
959#define LG_FBK_CFG0_OFDMMCS5FBK FIELD32(0x00f00000)
960#define LG_FBK_CFG0_OFDMMCS6FBK FIELD32(0x0f000000)
961#define LG_FBK_CFG0_OFDMMCS7FBK FIELD32(0xf0000000)
962
963/*
964 * LG_FBK_CFG1:
965 */
966#define LG_FBK_CFG1 0x1360
967#define LG_FBK_CFG0_CCKMCS0FBK FIELD32(0x0000000f)
968#define LG_FBK_CFG0_CCKMCS1FBK FIELD32(0x000000f0)
969#define LG_FBK_CFG0_CCKMCS2FBK FIELD32(0x00000f00)
970#define LG_FBK_CFG0_CCKMCS3FBK FIELD32(0x0000f000)
971
972/*
973 * CCK_PROT_CFG: CCK Protection
974 * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
975 * PROTECT_CTRL: Protection control frame type for CCK TX
976 * 0:none, 1:RTS/CTS, 2:CTS-to-self
977 * PROTECT_NAV: TXOP protection type for CCK TX
978 * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect
979 * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
980 * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
981 * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
982 * TX_OP_ALLOW_MM40: CCK TXOP allowance, 0:disallow
983 * TX_OP_ALLOW_GF20: CCK TXOP allowance, 0:disallow
984 * TX_OP_ALLOW_GF40: CCK TXOP allowance, 0:disallow
985 * RTS_TH_EN: RTS threshold enable on CCK TX
986 */
987#define CCK_PROT_CFG 0x1364
988#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
989#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
990#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
991#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
992#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
993#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
994#define CCK_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
995#define CCK_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
996#define CCK_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
997#define CCK_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
998
999/*
1000 * OFDM_PROT_CFG: OFDM Protection
1001 */
1002#define OFDM_PROT_CFG 0x1368
1003#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1004#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1005#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1006#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1007#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1008#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1009#define OFDM_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1010#define OFDM_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1011#define OFDM_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1012#define OFDM_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1013
1014/*
1015 * MM20_PROT_CFG: MM20 Protection
1016 */
1017#define MM20_PROT_CFG 0x136c
1018#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1019#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1020#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1021#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1022#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1023#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1024#define MM20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1025#define MM20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1026#define MM20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1027#define MM20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1028
1029/*
1030 * MM40_PROT_CFG: MM40 Protection
1031 */
1032#define MM40_PROT_CFG 0x1370
1033#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1034#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1035#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1036#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1037#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1038#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1039#define MM40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1040#define MM40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1041#define MM40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1042#define MM40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1043
1044/*
1045 * GF20_PROT_CFG: GF20 Protection
1046 */
1047#define GF20_PROT_CFG 0x1374
1048#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1049#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1050#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1051#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1052#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1053#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1054#define GF20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1055#define GF20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1056#define GF20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1057#define GF20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1058
1059/*
1060 * GF40_PROT_CFG: GF40 Protection
1061 */
1062#define GF40_PROT_CFG 0x1378
1063#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1064#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1065#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1066#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1067#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1068#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1069#define GF40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1070#define GF40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1071#define GF40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1072#define GF40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1073
1074/*
1075 * EXP_CTS_TIME:
1076 */
1077#define EXP_CTS_TIME 0x137c
1078
1079/*
1080 * EXP_ACK_TIME:
1081 */
1082#define EXP_ACK_TIME 0x1380
1083
1084/*
1085 * RX_FILTER_CFG: RX configuration register.
1086 */
1087#define RX_FILTER_CFG 0x1400
1088#define RX_FILTER_CFG_DROP_CRC_ERROR FIELD32(0x00000001)
1089#define RX_FILTER_CFG_DROP_PHY_ERROR FIELD32(0x00000002)
1090#define RX_FILTER_CFG_DROP_NOT_TO_ME FIELD32(0x00000004)
1091#define RX_FILTER_CFG_DROP_NOT_MY_BSSD FIELD32(0x00000008)
1092#define RX_FILTER_CFG_DROP_VER_ERROR FIELD32(0x00000010)
1093#define RX_FILTER_CFG_DROP_MULTICAST FIELD32(0x00000020)
1094#define RX_FILTER_CFG_DROP_BROADCAST FIELD32(0x00000040)
1095#define RX_FILTER_CFG_DROP_DUPLICATE FIELD32(0x00000080)
1096#define RX_FILTER_CFG_DROP_CF_END_ACK FIELD32(0x00000100)
1097#define RX_FILTER_CFG_DROP_CF_END FIELD32(0x00000200)
1098#define RX_FILTER_CFG_DROP_ACK FIELD32(0x00000400)
1099#define RX_FILTER_CFG_DROP_CTS FIELD32(0x00000800)
1100#define RX_FILTER_CFG_DROP_RTS FIELD32(0x00001000)
1101#define RX_FILTER_CFG_DROP_PSPOLL FIELD32(0x00002000)
1102#define RX_FILTER_CFG_DROP_BA FIELD32(0x00004000)
1103#define RX_FILTER_CFG_DROP_BAR FIELD32(0x00008000)
1104#define RX_FILTER_CFG_DROP_CNTL FIELD32(0x00010000)
1105
1106/*
1107 * AUTO_RSP_CFG:
1108 * AUTORESPONDER: 0: disable, 1: enable
1109 * BAC_ACK_POLICY: 0:long, 1:short preamble
1110 * CTS_40_MMODE: Response CTS 40MHz duplicate mode
1111 * CTS_40_MREF: Response CTS 40MHz duplicate mode
1112 * AR_PREAMBLE: Auto responder preamble 0:long, 1:short preamble
1113 * DUAL_CTS_EN: Power bit value in control frame
1114 * ACK_CTS_PSM_BIT:Power bit value in control frame
1115 */
1116#define AUTO_RSP_CFG 0x1404
1117#define AUTO_RSP_CFG_AUTORESPONDER FIELD32(0x00000001)
1118#define AUTO_RSP_CFG_BAC_ACK_POLICY FIELD32(0x00000002)
1119#define AUTO_RSP_CFG_CTS_40_MMODE FIELD32(0x00000004)
1120#define AUTO_RSP_CFG_CTS_40_MREF FIELD32(0x00000008)
1121#define AUTO_RSP_CFG_AR_PREAMBLE FIELD32(0x00000010)
1122#define AUTO_RSP_CFG_DUAL_CTS_EN FIELD32(0x00000040)
1123#define AUTO_RSP_CFG_ACK_CTS_PSM_BIT FIELD32(0x00000080)
1124
1125/*
1126 * LEGACY_BASIC_RATE:
1127 */
1128#define LEGACY_BASIC_RATE 0x1408
1129
1130/*
1131 * HT_BASIC_RATE:
1132 */
1133#define HT_BASIC_RATE 0x140c
1134
1135/*
1136 * HT_CTRL_CFG:
1137 */
1138#define HT_CTRL_CFG 0x1410
1139
1140/*
1141 * SIFS_COST_CFG:
1142 */
1143#define SIFS_COST_CFG 0x1414
1144
1145/*
1146 * RX_PARSER_CFG:
1147 * Set NAV for all received frames
1148 */
1149#define RX_PARSER_CFG 0x1418
1150
1151/*
1152 * TX_SEC_CNT0:
1153 */
1154#define TX_SEC_CNT0 0x1500
1155
1156/*
1157 * RX_SEC_CNT0:
1158 */
1159#define RX_SEC_CNT0 0x1504
1160
1161/*
1162 * CCMP_FC_MUTE:
1163 */
1164#define CCMP_FC_MUTE 0x1508
1165
1166/*
1167 * TXOP_HLDR_ADDR0:
1168 */
1169#define TXOP_HLDR_ADDR0 0x1600
1170
1171/*
1172 * TXOP_HLDR_ADDR1:
1173 */
1174#define TXOP_HLDR_ADDR1 0x1604
1175
1176/*
1177 * TXOP_HLDR_ET:
1178 */
1179#define TXOP_HLDR_ET 0x1608
1180
1181/*
1182 * QOS_CFPOLL_RA_DW0:
1183 */
1184#define QOS_CFPOLL_RA_DW0 0x160c
1185
1186/*
1187 * QOS_CFPOLL_RA_DW1:
1188 */
1189#define QOS_CFPOLL_RA_DW1 0x1610
1190
1191/*
1192 * QOS_CFPOLL_QC:
1193 */
1194#define QOS_CFPOLL_QC 0x1614
1195
1196/*
1197 * RX_STA_CNT0: RX PLCP error count & RX CRC error count
1198 */
1199#define RX_STA_CNT0 0x1700
1200#define RX_STA_CNT0_CRC_ERR FIELD32(0x0000ffff)
1201#define RX_STA_CNT0_PHY_ERR FIELD32(0xffff0000)
1202
1203/*
1204 * RX_STA_CNT1: RX False CCA count & RX LONG frame count
1205 */
1206#define RX_STA_CNT1 0x1704
1207#define RX_STA_CNT1_FALSE_CCA FIELD32(0x0000ffff)
1208#define RX_STA_CNT1_PLCP_ERR FIELD32(0xffff0000)
1209
1210/*
1211 * RX_STA_CNT2:
1212 */
1213#define RX_STA_CNT2 0x1708
1214#define RX_STA_CNT2_RX_DUPLI_COUNT FIELD32(0x0000ffff)
1215#define RX_STA_CNT2_RX_FIFO_OVERFLOW FIELD32(0xffff0000)
1216
1217/*
1218 * TX_STA_CNT0: TX Beacon count
1219 */
1220#define TX_STA_CNT0 0x170c
1221#define TX_STA_CNT0_TX_FAIL_COUNT FIELD32(0x0000ffff)
1222#define TX_STA_CNT0_TX_BEACON_COUNT FIELD32(0xffff0000)
1223
1224/*
1225 * TX_STA_CNT1: TX tx count
1226 */
1227#define TX_STA_CNT1 0x1710
1228#define TX_STA_CNT1_TX_SUCCESS FIELD32(0x0000ffff)
1229#define TX_STA_CNT1_TX_RETRANSMIT FIELD32(0xffff0000)
1230
1231/*
1232 * TX_STA_CNT2: TX tx count
1233 */
1234#define TX_STA_CNT2 0x1714
1235#define TX_STA_CNT2_TX_ZERO_LEN_COUNT FIELD32(0x0000ffff)
1236#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000)
1237
1238/*
1239 * TX_STA_FIFO: TX Result for specific PID status fifo register
1240 */
1241#define TX_STA_FIFO 0x1718
1242#define TX_STA_FIFO_VALID FIELD32(0x00000001)
1243#define TX_STA_FIFO_PID_TYPE FIELD32(0x0000001e)
1244#define TX_STA_FIFO_TX_SUCCESS FIELD32(0x00000020)
1245#define TX_STA_FIFO_TX_AGGRE FIELD32(0x00000040)
1246#define TX_STA_FIFO_TX_ACK_REQUIRED FIELD32(0x00000080)
1247#define TX_STA_FIFO_WCID FIELD32(0x0000ff00)
1248#define TX_STA_FIFO_MCS FIELD32(0x007f0000)
1249#define TX_STA_FIFO_PHYMODE FIELD32(0xc0000000)
1250
1251/*
1252 * TX_AGG_CNT: Debug counter
1253 */
1254#define TX_AGG_CNT 0x171c
1255#define TX_AGG_CNT_NON_AGG_TX_COUNT FIELD32(0x0000ffff)
1256#define TX_AGG_CNT_AGG_TX_COUNT FIELD32(0xffff0000)
1257
1258/*
1259 * TX_AGG_CNT0:
1260 */
1261#define TX_AGG_CNT0 0x1720
1262#define TX_AGG_CNT0_AGG_SIZE_1_COUNT FIELD32(0x0000ffff)
1263#define TX_AGG_CNT0_AGG_SIZE_2_COUNT FIELD32(0xffff0000)
1264
1265/*
1266 * TX_AGG_CNT1:
1267 */
1268#define TX_AGG_CNT1 0x1724
1269#define TX_AGG_CNT1_AGG_SIZE_3_COUNT FIELD32(0x0000ffff)
1270#define TX_AGG_CNT1_AGG_SIZE_4_COUNT FIELD32(0xffff0000)
1271
1272/*
1273 * TX_AGG_CNT2:
1274 */
1275#define TX_AGG_CNT2 0x1728
1276#define TX_AGG_CNT2_AGG_SIZE_5_COUNT FIELD32(0x0000ffff)
1277#define TX_AGG_CNT2_AGG_SIZE_6_COUNT FIELD32(0xffff0000)
1278
1279/*
1280 * TX_AGG_CNT3:
1281 */
1282#define TX_AGG_CNT3 0x172c
1283#define TX_AGG_CNT3_AGG_SIZE_7_COUNT FIELD32(0x0000ffff)
1284#define TX_AGG_CNT3_AGG_SIZE_8_COUNT FIELD32(0xffff0000)
1285
1286/*
1287 * TX_AGG_CNT4:
1288 */
1289#define TX_AGG_CNT4 0x1730
1290#define TX_AGG_CNT4_AGG_SIZE_9_COUNT FIELD32(0x0000ffff)
1291#define TX_AGG_CNT4_AGG_SIZE_10_COUNT FIELD32(0xffff0000)
1292
1293/*
1294 * TX_AGG_CNT5:
1295 */
1296#define TX_AGG_CNT5 0x1734
1297#define TX_AGG_CNT5_AGG_SIZE_11_COUNT FIELD32(0x0000ffff)
1298#define TX_AGG_CNT5_AGG_SIZE_12_COUNT FIELD32(0xffff0000)
1299
1300/*
1301 * TX_AGG_CNT6:
1302 */
1303#define TX_AGG_CNT6 0x1738
1304#define TX_AGG_CNT6_AGG_SIZE_13_COUNT FIELD32(0x0000ffff)
1305#define TX_AGG_CNT6_AGG_SIZE_14_COUNT FIELD32(0xffff0000)
1306
1307/*
1308 * TX_AGG_CNT7:
1309 */
1310#define TX_AGG_CNT7 0x173c
1311#define TX_AGG_CNT7_AGG_SIZE_15_COUNT FIELD32(0x0000ffff)
1312#define TX_AGG_CNT7_AGG_SIZE_16_COUNT FIELD32(0xffff0000)
1313
1314/*
1315 * MPDU_DENSITY_CNT:
1316 * TX_ZERO_DEL: TX zero length delimiter count
1317 * RX_ZERO_DEL: RX zero length delimiter count
1318 */
1319#define MPDU_DENSITY_CNT 0x1740
1320#define MPDU_DENSITY_CNT_TX_ZERO_DEL FIELD32(0x0000ffff)
1321#define MPDU_DENSITY_CNT_RX_ZERO_DEL FIELD32(0xffff0000)
1322
1323/*
1324 * Security key table memory.
1325 * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry
1326 * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry
1327 * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry
1328 * MAC_WCID_ATTRIBUTE_BASE: 4-byte * 256-entry
1329 * SHARED_KEY_TABLE_BASE: 32 bytes * 32-entry
1330 * SHARED_KEY_MODE_BASE: 4 bits * 32-entry
1331 */
1332#define MAC_WCID_BASE 0x1800
1333#define PAIRWISE_KEY_TABLE_BASE 0x4000
1334#define MAC_IVEIV_TABLE_BASE 0x6000
1335#define MAC_WCID_ATTRIBUTE_BASE 0x6800
1336#define SHARED_KEY_TABLE_BASE 0x6c00
1337#define SHARED_KEY_MODE_BASE 0x7000
1338
1339#define MAC_WCID_ENTRY(__idx) \
1340 ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
1341#define PAIRWISE_KEY_ENTRY(__idx) \
1342 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1343#define MAC_IVEIV_ENTRY(__idx) \
1344 ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
1345#define MAC_WCID_ATTR_ENTRY(__idx) \
1346 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
1347#define SHARED_KEY_ENTRY(__idx) \
1348 ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1349#define SHARED_KEY_MODE_ENTRY(__idx) \
1350 ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
1351
1352struct mac_wcid_entry {
1353 u8 mac[6];
1354 u8 reserved[2];
1355} __attribute__ ((packed));
1356
1357struct hw_key_entry {
1358 u8 key[16];
1359 u8 tx_mic[8];
1360 u8 rx_mic[8];
1361} __attribute__ ((packed));
1362
1363struct mac_iveiv_entry {
1364 u8 iv[8];
1365} __attribute__ ((packed));
1366
1367/*
1368 * MAC_WCID_ATTRIBUTE:
1369 */
1370#define MAC_WCID_ATTRIBUTE_KEYTAB FIELD32(0x00000001)
1371#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e)
1372#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070)
1373#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380)
1374
1375/*
1376 * SHARED_KEY_MODE:
1377 */
1378#define SHARED_KEY_MODE_BSS0_KEY0 FIELD32(0x00000007)
1379#define SHARED_KEY_MODE_BSS0_KEY1 FIELD32(0x00000070)
1380#define SHARED_KEY_MODE_BSS0_KEY2 FIELD32(0x00000700)
1381#define SHARED_KEY_MODE_BSS0_KEY3 FIELD32(0x00007000)
1382#define SHARED_KEY_MODE_BSS1_KEY0 FIELD32(0x00070000)
1383#define SHARED_KEY_MODE_BSS1_KEY1 FIELD32(0x00700000)
1384#define SHARED_KEY_MODE_BSS1_KEY2 FIELD32(0x07000000)
1385#define SHARED_KEY_MODE_BSS1_KEY3 FIELD32(0x70000000)
1386
1387/*
1388 * HOST-MCU communication
1389 */
1390
1391/*
1392 * H2M_MAILBOX_CSR: Host-to-MCU Mailbox.
1393 */
1394#define H2M_MAILBOX_CSR 0x7010
1395#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff)
1396#define H2M_MAILBOX_CSR_ARG1 FIELD32(0x0000ff00)
1397#define H2M_MAILBOX_CSR_CMD_TOKEN FIELD32(0x00ff0000)
1398#define H2M_MAILBOX_CSR_OWNER FIELD32(0xff000000)
1399
1400/*
1401 * H2M_MAILBOX_CID:
1402 */
1403#define H2M_MAILBOX_CID 0x7014
1404#define H2M_MAILBOX_CID_CMD0 FIELD32(0x000000ff)
1405#define H2M_MAILBOX_CID_CMD1 FIELD32(0x0000ff00)
1406#define H2M_MAILBOX_CID_CMD2 FIELD32(0x00ff0000)
1407#define H2M_MAILBOX_CID_CMD3 FIELD32(0xff000000)
1408
1409/*
1410 * H2M_MAILBOX_STATUS:
1411 */
1412#define H2M_MAILBOX_STATUS 0x701c
1413
1414/*
1415 * H2M_INT_SRC:
1416 */
1417#define H2M_INT_SRC 0x7024
1418
1419/*
1420 * H2M_BBP_AGENT:
1421 */
1422#define H2M_BBP_AGENT 0x7028
1423
1424/*
1425 * MCU_LEDCS: LED control for MCU Mailbox.
1426 */
1427#define MCU_LEDCS_LED_MODE FIELD8(0x1f)
1428#define MCU_LEDCS_POLARITY FIELD8(0x01)
1429
1430/*
1431 * HW_CS_CTS_BASE:
1432 * Carrier-sense CTS frame base address.
1433 * It's where mac stores carrier-sense frame for carrier-sense function.
1434 */
1435#define HW_CS_CTS_BASE 0x7700
1436
1437/*
1438 * HW_DFS_CTS_BASE:
1439 * FS CTS frame base address. It's where mac stores CTS frame for DFS.
1440 */
1441#define HW_DFS_CTS_BASE 0x7780
1442
1443/*
1444 * TXRX control registers - base address 0x3000
1445 */
1446
1447/*
1448 * TXRX_CSR1:
1449 * rt2860b UNKNOWN reg use R/O Reg Addr 0x77d0 first..
1450 */
1451#define TXRX_CSR1 0x77d0
1452
1453/*
1454 * HW_DEBUG_SETTING_BASE:
1455 * since NULL frame won't be that long (256 byte)
1456 * We steal 16 tail bytes to save debugging settings
1457 */
1458#define HW_DEBUG_SETTING_BASE 0x77f0
1459#define HW_DEBUG_SETTING_BASE2 0x7770
1460
1461/*
1462 * HW_BEACON_BASE
1463 * In order to support maximum 8 MBSS and its maximum length
1464 * is 512 bytes for each beacon
1465 * Three section discontinue memory segments will be used.
1466 * 1. The original region for BCN 0~3
1467 * 2. Extract memory from FCE table for BCN 4~5
1468 * 3. Extract memory from Pair-wise key table for BCN 6~7
1469 * It occupied those memory of wcid 238~253 for BCN 6
1470 * and wcid 222~237 for BCN 7
1471 *
1472 * IMPORTANT NOTE: Not sure why legacy driver does this,
1473 * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6.
1474 */
1475#define HW_BEACON_BASE0 0x7800
1476#define HW_BEACON_BASE1 0x7a00
1477#define HW_BEACON_BASE2 0x7c00
1478#define HW_BEACON_BASE3 0x7e00
1479#define HW_BEACON_BASE4 0x7200
1480#define HW_BEACON_BASE5 0x7400
1481#define HW_BEACON_BASE6 0x5dc0
1482#define HW_BEACON_BASE7 0x5bc0
1483
1484#define HW_BEACON_OFFSET(__index) \
1485 ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
1486 (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
1487 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
1488
1489/*
1490 * 8051 firmware image.
1491 */
1492#define FIRMWARE_RT2860 "rt2860.bin"
1493#define FIRMWARE_IMAGE_BASE 0x2000
1494
1495/*
1496 * BBP registers.
1497 * The wordsize of the BBP is 8 bits.
1498 */
1499
1500/*
1501 * BBP 1: TX Antenna
1502 */
1503#define BBP1_TX_POWER FIELD8(0x07)
1504#define BBP1_TX_ANTENNA FIELD8(0x18)
1505
1506/*
1507 * BBP 3: RX Antenna
1508 */
1509#define BBP3_RX_ANTENNA FIELD8(0x18)
1510#define BBP3_HT40_PLUS FIELD8(0x20)
1511
1512/*
1513 * BBP 4: Bandwidth
1514 */
1515#define BBP4_TX_BF FIELD8(0x01)
1516#define BBP4_BANDWIDTH FIELD8(0x18)
1517
1518/*
1519 * RFCSR registers
1520 * The wordsize of the RFCSR is 8 bits.
1521 */
1522
1523/*
1524 * RFCSR 6:
1525 */
1526#define RFCSR6_R FIELD8(0x03)
1527
1528/*
1529 * RFCSR 7:
1530 */
1531#define RFCSR7_RF_TUNING FIELD8(0x01)
1532
1533/*
1534 * RFCSR 12:
1535 */
1536#define RFCSR12_TX_POWER FIELD8(0x1f)
1537
1538/*
1539 * RFCSR 22:
1540 */
1541#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
1542
1543/*
1544 * RFCSR 23:
1545 */
1546#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
1547
1548/*
1549 * RFCSR 30:
1550 */
1551#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
1552
1553/*
1554 * RF registers
1555 */
1556
1557/*
1558 * RF 2
1559 */
1560#define RF2_ANTENNA_RX2 FIELD32(0x00000040)
1561#define RF2_ANTENNA_TX1 FIELD32(0x00004000)
1562#define RF2_ANTENNA_RX1 FIELD32(0x00020000)
1563
1564/*
1565 * RF 3
1566 */
1567#define RF3_TXPOWER_G FIELD32(0x00003e00)
1568#define RF3_TXPOWER_A_7DBM_BOOST FIELD32(0x00000200)
1569#define RF3_TXPOWER_A FIELD32(0x00003c00)
1570
1571/*
1572 * RF 4
1573 */
1574#define RF4_TXPOWER_G FIELD32(0x000007c0)
1575#define RF4_TXPOWER_A_7DBM_BOOST FIELD32(0x00000040)
1576#define RF4_TXPOWER_A FIELD32(0x00000780)
1577#define RF4_FREQ_OFFSET FIELD32(0x001f8000)
1578#define RF4_HT40 FIELD32(0x00200000)
1579
1580/*
1581 * EEPROM content.
1582 * The wordsize of the EEPROM is 16 bits.
1583 */
1584
1585/*
1586 * EEPROM Version
1587 */
1588#define EEPROM_VERSION 0x0001
1589#define EEPROM_VERSION_FAE FIELD16(0x00ff)
1590#define EEPROM_VERSION_VERSION FIELD16(0xff00)
1591
1592/*
1593 * HW MAC address.
1594 */
1595#define EEPROM_MAC_ADDR_0 0x0002
1596#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff)
1597#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00)
1598#define EEPROM_MAC_ADDR_1 0x0003
1599#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff)
1600#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00)
1601#define EEPROM_MAC_ADDR_2 0x0004
1602#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff)
1603#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
1604
1605/*
1606 * EEPROM ANTENNA config
1607 * RXPATH: 1: 1R, 2: 2R, 3: 3R
1608 * TXPATH: 1: 1T, 2: 2T
1609 */
1610#define EEPROM_ANTENNA 0x001a
1611#define EEPROM_ANTENNA_RXPATH FIELD16(0x000f)
1612#define EEPROM_ANTENNA_TXPATH FIELD16(0x00f0)
1613#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0f00)
1614
1615/*
1616 * EEPROM NIC config
1617 * CARDBUS_ACCEL: 0 - enable, 1 - disable
1618 */
1619#define EEPROM_NIC 0x001b
1620#define EEPROM_NIC_HW_RADIO FIELD16(0x0001)
1621#define EEPROM_NIC_DYNAMIC_TX_AGC FIELD16(0x0002)
1622#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0004)
1623#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0008)
1624#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0010)
1625#define EEPROM_NIC_BW40M_SB_BG FIELD16(0x0020)
1626#define EEPROM_NIC_BW40M_SB_A FIELD16(0x0040)
1627#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
1628#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
1629#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
1630
1631/*
1632 * EEPROM frequency
1633 */
1634#define EEPROM_FREQ 0x001d
1635#define EEPROM_FREQ_OFFSET FIELD16(0x00ff)
1636#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00)
1637#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000)
1638
1639/*
1640 * EEPROM LED
1641 * POLARITY_RDY_G: Polarity RDY_G setting.
1642 * POLARITY_RDY_A: Polarity RDY_A setting.
1643 * POLARITY_ACT: Polarity ACT setting.
1644 * POLARITY_GPIO_0: Polarity GPIO0 setting.
1645 * POLARITY_GPIO_1: Polarity GPIO1 setting.
1646 * POLARITY_GPIO_2: Polarity GPIO2 setting.
1647 * POLARITY_GPIO_3: Polarity GPIO3 setting.
1648 * POLARITY_GPIO_4: Polarity GPIO4 setting.
1649 * LED_MODE: Led mode.
1650 */
1651#define EEPROM_LED1 0x001e
1652#define EEPROM_LED2 0x001f
1653#define EEPROM_LED3 0x0020
1654#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
1655#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
1656#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
1657#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008)
1658#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010)
1659#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020)
1660#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040)
1661#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080)
1662#define EEPROM_LED_LED_MODE FIELD16(0x1f00)
1663
1664/*
1665 * EEPROM LNA
1666 */
1667#define EEPROM_LNA 0x0022
1668#define EEPROM_LNA_BG FIELD16(0x00ff)
1669#define EEPROM_LNA_A0 FIELD16(0xff00)
1670
1671/*
1672 * EEPROM RSSI BG offset
1673 */
1674#define EEPROM_RSSI_BG 0x0023
1675#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff)
1676#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00)
1677
1678/*
1679 * EEPROM RSSI BG2 offset
1680 */
1681#define EEPROM_RSSI_BG2 0x0024
1682#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff)
1683#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
1684
1685/*
1686 * EEPROM RSSI A offset
1687 */
1688#define EEPROM_RSSI_A 0x0025
1689#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff)
1690#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00)
1691
1692/*
1693 * EEPROM RSSI A2 offset
1694 */
1695#define EEPROM_RSSI_A2 0x0026
1696#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
1697#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
1698
1699/*
1700 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
1701 * This is delta in 40MHZ.
1702 * VALUE: Tx Power dalta value (MAX=4)
1703 * TYPE: 1: Plus the delta value, 0: minus the delta value
1704 * TXPOWER: Enable:
1705 */
1706#define EEPROM_TXPOWER_DELTA 0x0028
1707#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f)
1708#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040)
1709#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080)
1710
1711/*
1712 * EEPROM TXPOWER 802.11BG
1713 */
1714#define EEPROM_TXPOWER_BG1 0x0029
1715#define EEPROM_TXPOWER_BG2 0x0030
1716#define EEPROM_TXPOWER_BG_SIZE 7
1717#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff)
1718#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00)
1719
1720/*
1721 * EEPROM TXPOWER 802.11A
1722 */
1723#define EEPROM_TXPOWER_A1 0x003c
1724#define EEPROM_TXPOWER_A2 0x0053
1725#define EEPROM_TXPOWER_A_SIZE 6
1726#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff)
1727#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
1728
1729/*
1730 * EEPROM TXpower byrate: 20MHZ power
1731 */
1732#define EEPROM_TXPOWER_BYRATE 0x006f
1733
1734/*
1735 * EEPROM BBP.
1736 */
1737#define EEPROM_BBP_START 0x0078
1738#define EEPROM_BBP_SIZE 16
1739#define EEPROM_BBP_VALUE FIELD16(0x00ff)
1740#define EEPROM_BBP_REG_ID FIELD16(0xff00)
1741
1742/*
1743 * MCU mailbox commands.
1744 */
1745#define MCU_SLEEP 0x30
1746#define MCU_WAKEUP 0x31
1747#define MCU_RADIO_OFF 0x35
1748#define MCU_CURRENT 0x36
1749#define MCU_LED 0x50
1750#define MCU_LED_STRENGTH 0x51
1751#define MCU_LED_1 0x52
1752#define MCU_LED_2 0x53
1753#define MCU_LED_3 0x54
1754#define MCU_RADAR 0x60
1755#define MCU_BOOT_SIGNAL 0x72
1756#define MCU_BBP_SIGNAL 0x80
1757#define MCU_POWER_SAVE 0x83
1758
1759/*
1760 * MCU mailbox tokens
1761 */
1762#define TOKEN_WAKUP 3
1763
1764/*
1765 * DMA descriptor defines.
1766 */
1767#define TXD_DESC_SIZE ( 4 * sizeof(__le32) )
1768#define TXWI_DESC_SIZE ( 4 * sizeof(__le32) )
1769#define RXD_DESC_SIZE ( 4 * sizeof(__le32) )
1770#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
1771
1772/*
1773 * TX descriptor format for TX, PRIO and Beacon Ring.
1774 */
1775
1776/*
1777 * Word0
1778 */
1779#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
1780
1781/*
1782 * Word1
1783 */
1784#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
1785#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
1786#define TXD_W1_BURST FIELD32(0x00008000)
1787#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
1788#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
1789#define TXD_W1_DMA_DONE FIELD32(0x80000000)
1790
1791/*
1792 * Word2
1793 */
1794#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
1795
1796/*
1797 * Word3
1798 * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
1799 * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
1800 * 0:MGMT, 1:HCCA 2:EDCA
1801 */
1802#define TXD_W3_WIV FIELD32(0x01000000)
1803#define TXD_W3_QSEL FIELD32(0x06000000)
1804#define TXD_W3_TCO FIELD32(0x20000000)
1805#define TXD_W3_UCO FIELD32(0x40000000)
1806#define TXD_W3_ICO FIELD32(0x80000000)
1807
1808/*
1809 * TX WI structure
1810 */
1811
1812/*
1813 * Word0
1814 * FRAG: 1 To inform TKIP engine this is a fragment.
1815 * MIMO_PS: The remote peer is in dynamic MIMO-PS mode
1816 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
1817 * BW: Channel bandwidth 20MHz or 40 MHz
1818 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
1819 */
1820#define TXWI_W0_FRAG FIELD32(0x00000001)
1821#define TXWI_W0_MIMO_PS FIELD32(0x00000002)
1822#define TXWI_W0_CF_ACK FIELD32(0x00000004)
1823#define TXWI_W0_TS FIELD32(0x00000008)
1824#define TXWI_W0_AMPDU FIELD32(0x00000010)
1825#define TXWI_W0_MPDU_DENSITY FIELD32(0x000000e0)
1826#define TXWI_W0_TX_OP FIELD32(0x00000300)
1827#define TXWI_W0_MCS FIELD32(0x007f0000)
1828#define TXWI_W0_BW FIELD32(0x00800000)
1829#define TXWI_W0_SHORT_GI FIELD32(0x01000000)
1830#define TXWI_W0_STBC FIELD32(0x06000000)
1831#define TXWI_W0_IFS FIELD32(0x08000000)
1832#define TXWI_W0_PHYMODE FIELD32(0xc0000000)
1833
1834/*
1835 * Word1
1836 */
1837#define TXWI_W1_ACK FIELD32(0x00000001)
1838#define TXWI_W1_NSEQ FIELD32(0x00000002)
1839#define TXWI_W1_BW_WIN_SIZE FIELD32(0x000000fc)
1840#define TXWI_W1_WIRELESS_CLI_ID FIELD32(0x0000ff00)
1841#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1842#define TXWI_W1_PACKETID FIELD32(0xf0000000)
1843
1844/*
1845 * Word2
1846 */
1847#define TXWI_W2_IV FIELD32(0xffffffff)
1848
1849/*
1850 * Word3
1851 */
1852#define TXWI_W3_EIV FIELD32(0xffffffff)
1853
1854/*
1855 * RX descriptor format for RX Ring.
1856 */
1857
1858/*
1859 * Word0
1860 */
1861#define RXD_W0_SDP0 FIELD32(0xffffffff)
1862
1863/*
1864 * Word1
1865 */
1866#define RXD_W1_SDL1 FIELD32(0x00003fff)
1867#define RXD_W1_SDL0 FIELD32(0x3fff0000)
1868#define RXD_W1_LS0 FIELD32(0x40000000)
1869#define RXD_W1_DMA_DONE FIELD32(0x80000000)
1870
1871/*
1872 * Word2
1873 */
1874#define RXD_W2_SDP1 FIELD32(0xffffffff)
1875
1876/*
1877 * Word3
1878 * AMSDU: RX with 802.3 header, not 802.11 header.
1879 * DECRYPTED: This frame is being decrypted.
1880 */
1881#define RXD_W3_BA FIELD32(0x00000001)
1882#define RXD_W3_DATA FIELD32(0x00000002)
1883#define RXD_W3_NULLDATA FIELD32(0x00000004)
1884#define RXD_W3_FRAG FIELD32(0x00000008)
1885#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
1886#define RXD_W3_MULTICAST FIELD32(0x00000020)
1887#define RXD_W3_BROADCAST FIELD32(0x00000040)
1888#define RXD_W3_MY_BSS FIELD32(0x00000080)
1889#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
1890#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
1891#define RXD_W3_AMSDU FIELD32(0x00000800)
1892#define RXD_W3_HTC FIELD32(0x00001000)
1893#define RXD_W3_RSSI FIELD32(0x00002000)
1894#define RXD_W3_L2PAD FIELD32(0x00004000)
1895#define RXD_W3_AMPDU FIELD32(0x00008000)
1896#define RXD_W3_DECRYPTED FIELD32(0x00010000)
1897#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
1898#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
1899
1900/*
1901 * RX WI structure
1902 */
1903
1904/*
1905 * Word0
1906 */
1907#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
1908#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
1909#define RXWI_W0_BSSID FIELD32(0x00001c00)
1910#define RXWI_W0_UDF FIELD32(0x0000e000)
1911#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1912#define RXWI_W0_TID FIELD32(0xf0000000)
1913
1914/*
1915 * Word1
1916 */
1917#define RXWI_W1_FRAG FIELD32(0x0000000f)
1918#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
1919#define RXWI_W1_MCS FIELD32(0x007f0000)
1920#define RXWI_W1_BW FIELD32(0x00800000)
1921#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
1922#define RXWI_W1_STBC FIELD32(0x06000000)
1923#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
1924
1925/*
1926 * Word2
1927 */
1928#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
1929#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
1930#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
1931
1932/*
1933 * Word3
1934 */
1935#define RXWI_W3_SNR0 FIELD32(0x000000ff)
1936#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
1937
1938/*
1939 * Macros for converting txpower from EEPROM to mac80211 value
1940 * and from mac80211 value to register value.
1941 */
1942#define MIN_G_TXPOWER 0
1943#define MIN_A_TXPOWER -7
1944#define MAX_G_TXPOWER 31
1945#define MAX_A_TXPOWER 15
1946#define DEFAULT_TXPOWER 5
1947
1948#define TXPOWER_G_FROM_DEV(__txpower) \
1949 ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1950
1951#define TXPOWER_G_TO_DEV(__txpower) \
1952 clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
1953
1954#define TXPOWER_A_FROM_DEV(__txpower) \
1955 ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1956
1957#define TXPOWER_A_TO_DEV(__txpower) \
1958 clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
1959
1960#endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index a084077a1c61..9fe770f7d7bb 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1994,7 +1994,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1994 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size); 1994 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
1995 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID, 1995 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
1996 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? 1996 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
1997 (skbdesc->entry->entry_idx + 1) : 0xff); 1997 txdesc->key_idx : 0xff);
1998 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, 1998 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
1999 skb->len - txdesc->l2pad); 1999 skb->len - txdesc->l2pad);
2000 rt2x00_set_field32(&word, TXWI_W1_PACKETID, 2000 rt2x00_set_field32(&word, TXWI_W1_PACKETID,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 27bc6b7fbfde..196de8ab8153 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -158,6 +158,13 @@ struct rt2x00_chip {
158#define RT2561 0x0302 158#define RT2561 0x0302
159#define RT2661 0x0401 159#define RT2661 0x0401
160#define RT2571 0x1300 160#define RT2571 0x1300
161#define RT2860 0x0601 /* 2.4GHz PCI/CB */
162#define RT2860D 0x0681 /* 2.4GHz, 5GHz PCI/CB */
163#define RT2890 0x0701 /* 2.4GHz PCIe */
164#define RT2890D 0x0781 /* 2.4GHz, 5GHz PCIe */
165#define RT2880 0x2880 /* WSOC */
166#define RT3052 0x3052 /* WSOC */
167#define RT3090 0x3090 /* 2.4GHz PCIe */
161#define RT2870 0x1600 168#define RT2870 0x1600
162 169
163 u16 rf; 170 u16 rf;
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
new file mode 100644
index 000000000000..539568c48953
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -0,0 +1,159 @@
1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00soc
23 Abstract: rt2x00 generic soc device routines.
24 */
25
26#include <linux/bug.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/platform_device.h>
30
31#include "rt2x00.h"
32#include "rt2x00soc.h"
33
34static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev)
35{
36 kfree(rt2x00dev->rf);
37 rt2x00dev->rf = NULL;
38
39 kfree(rt2x00dev->eeprom);
40 rt2x00dev->eeprom = NULL;
41}
42
43static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
44{
45 struct platform_device *pdev = to_platform_device(rt2x00dev->dev);
46 struct resource *res;
47
48 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
49 if (!res)
50 return -ENODEV;
51
52 rt2x00dev->csr.base = (void __iomem *)KSEG1ADDR(res->start);
53 if (!rt2x00dev->csr.base)
54 goto exit;
55
56 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
57 if (!rt2x00dev->eeprom)
58 goto exit;
59
60 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
61 if (!rt2x00dev->rf)
62 goto exit;
63
64 return 0;
65
66exit:
67 ERROR_PROBE("Failed to allocate registers.\n");
68 rt2x00soc_free_reg(rt2x00dev);
69
70 return -ENOMEM;
71}
72
73int rt2x00soc_probe(struct platform_device *pdev,
74 const unsigned short chipset,
75 const struct rt2x00_ops *ops)
76{
77 struct ieee80211_hw *hw;
78 struct rt2x00_dev *rt2x00dev;
79 int retval;
80
81 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
82 if (!hw) {
83 ERROR_PROBE("Failed to allocate hardware.\n");
84 return -ENOMEM;
85 }
86
87 platform_set_drvdata(pdev, hw);
88
89 rt2x00dev = hw->priv;
90 rt2x00dev->dev = &pdev->dev;
91 rt2x00dev->ops = ops;
92 rt2x00dev->hw = hw;
93 rt2x00dev->irq = platform_get_irq(pdev, 0);
94 rt2x00dev->name = pdev->dev.driver->name;
95
96 rt2x00_set_chip_rt(rt2x00dev, chipset);
97
98 retval = rt2x00soc_alloc_reg(rt2x00dev);
99 if (retval)
100 goto exit_free_device;
101
102 retval = rt2x00lib_probe_dev(rt2x00dev);
103 if (retval)
104 goto exit_free_reg;
105
106 return 0;
107
108exit_free_reg:
109 rt2x00soc_free_reg(rt2x00dev);
110
111exit_free_device:
112 ieee80211_free_hw(hw);
113
114 return retval;
115}
116
117int rt2x00soc_remove(struct platform_device *pdev)
118{
119 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
120 struct rt2x00_dev *rt2x00dev = hw->priv;
121
122 /*
123 * Free all allocated data.
124 */
125 rt2x00lib_remove_dev(rt2x00dev);
126 rt2x00soc_free_reg(rt2x00dev);
127 ieee80211_free_hw(hw);
128
129 return 0;
130}
131EXPORT_SYMBOL_GPL(rt2x00soc_remove);
132
133#ifdef CONFIG_PM
134int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state)
135{
136 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
137 struct rt2x00_dev *rt2x00dev = hw->priv;
138
139 return rt2x00lib_suspend(rt2x00dev, state);
140}
141EXPORT_SYMBOL_GPL(rt2x00soc_suspend);
142
143int rt2x00soc_resume(struct platform_device *pdev)
144{
145 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
146 struct rt2x00_dev *rt2x00dev = hw->priv;
147
148 return rt2x00lib_resume(rt2x00dev);
149}
150EXPORT_SYMBOL_GPL(rt2x00soc_resume);
151#endif /* CONFIG_PM */
152
153/*
154 * rt2x00soc module information.
155 */
156MODULE_AUTHOR(DRV_PROJECT);
157MODULE_VERSION(DRV_VERSION);
158MODULE_DESCRIPTION("rt2x00 soc library");
159MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
new file mode 100644
index 000000000000..5cf114ac2b9c
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -0,0 +1,52 @@
1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00soc
23 Abstract: Data structures for the rt2x00soc module.
24 */
25
26#ifndef RT2X00SOC_H
27#define RT2X00SOC_H
28
29#define KSEG1ADDR(__ptr) __ptr
30
31#define __rt2x00soc_probe(__chipset, __ops) \
32static int __rt2x00soc_probe(struct platform_device *pdev) \
33{ \
34 return rt2x00soc_probe(pdev, (__chipset), (__ops)); \
35}
36
37/*
38 * SoC driver handlers.
39 */
40int rt2x00soc_probe(struct platform_device *pdev,
41 const unsigned short chipset,
42 const struct rt2x00_ops *ops);
43int rt2x00soc_remove(struct platform_device *pdev);
44#ifdef CONFIG_PM
45int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state);
46int rt2x00soc_resume(struct platform_device *pdev);
47#else
48#define rt2x00soc_suspend NULL
49#define rt2x00soc_resume NULL
50#endif /* CONFIG_PM */
51
52#endif /* RT2X00SOC_H */
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index b8f5ee33445e..14e7bb210075 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2389,10 +2389,13 @@ static struct usb_device_id rt73usb_device_table[] = {
2389 { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) }, 2389 { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) },
2390 { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) }, 2390 { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) },
2391 /* MSI */ 2391 /* MSI */
2392 { USB_DEVICE(0x0db0, 0x4600), USB_DEVICE_DATA(&rt73usb_ops) },
2392 { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) }, 2393 { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) },
2393 { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) }, 2394 { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) },
2394 { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) }, 2395 { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) },
2395 { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) }, 2396 { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) },
2397 /* Ovislink */
2398 { USB_DEVICE(0x1b75, 0x7318), USB_DEVICE_DATA(&rt73usb_ops) },
2396 /* Ralink */ 2399 /* Ralink */
2397 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, 2400 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
2398 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, 2401 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
@@ -2420,6 +2423,8 @@ static struct usb_device_id rt73usb_device_table[] = {
2420 /* Planex */ 2423 /* Planex */
2421 { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) }, 2424 { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) },
2422 { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) }, 2425 { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) },
2426 /* WideTell */
2427 { USB_DEVICE(0x7167, 0x3840), USB_DEVICE_DATA(&rt73usb_ops) },
2423 /* Zcom */ 2428 /* Zcom */
2424 { USB_DEVICE(0x0cde, 0x001c), USB_DEVICE_DATA(&rt73usb_ops) }, 2429 { USB_DEVICE(0x0cde, 0x001c), USB_DEVICE_DATA(&rt73usb_ops) },
2425 /* ZyXEL */ 2430 /* ZyXEL */
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 1103256ad989..48b0bfd6c55a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -183,8 +183,11 @@ static int wl1251_chip_wakeup(struct wl1251 *wl)
183 wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)", 183 wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)",
184 wl->chip_id); 184 wl->chip_id);
185 break; 185 break;
186 case CHIP_ID_1251_PG10:
187 case CHIP_ID_1251_PG11: 186 case CHIP_ID_1251_PG11:
187 wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG11)",
188 wl->chip_id);
189 break;
190 case CHIP_ID_1251_PG10:
188 default: 191 default:
189 wl1251_error("unsupported chip id: 0x%x", wl->chip_id); 192 wl1251_error("unsupported chip id: 0x%x", wl->chip_id);
190 ret = -ENODEV; 193 ret = -ENODEV;
@@ -1426,4 +1429,4 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
1426MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core"); 1429MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
1427MODULE_LICENSE("GPL"); 1430MODULE_LICENSE("GPL");
1428MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 1431MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
1429MODULE_ALIAS("spi:wl12xx"); 1432MODULE_ALIAS("spi:wl1251");
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 17c54b59ef86..601fe0d67827 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -153,7 +153,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
153 beacon ? "beacon" : ""); 153 beacon ? "beacon" : "");
154 154
155 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 155 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
156 ieee80211_rx(wl->hw, skb); 156 ieee80211_rx_ni(wl->hw, skb);
157} 157}
158 158
159static void wl1251_rx_ack(struct wl1251 *wl) 159static void wl1251_rx_ack(struct wl1251 *wl)
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 14eff2b3d4c6..2cf8a2169d43 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -307,7 +307,7 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
307 307
308static struct spi_driver wl1251_spi_driver = { 308static struct spi_driver wl1251_spi_driver = {
309 .driver = { 309 .driver = {
310 .name = "wl12xx", 310 .name = "wl1251",
311 .bus = &spi_bus_type, 311 .bus = &spi_bus_type,
312 .owner = THIS_MODULE, 312 .owner = THIS_MODULE,
313 }, 313 },
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 55818f94017b..566f1521ec22 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -32,6 +32,8 @@
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34 34
35#include "wl1271_conf.h"
36
35#define DRIVER_NAME "wl1271" 37#define DRIVER_NAME "wl1271"
36#define DRIVER_PREFIX DRIVER_NAME ": " 38#define DRIVER_PREFIX DRIVER_NAME ": "
37 39
@@ -97,21 +99,42 @@ enum {
97 } while (0) 99 } while (0)
98 100
99#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \ 101#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
100 CFG_BSSID_FILTER_EN) 102 CFG_BSSID_FILTER_EN | \
103 CFG_MC_FILTER_EN)
101 104
102#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \ 105#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
103 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \ 106 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
104 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ 107 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
105 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) 108 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
106 109
110#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
111
107#define WL1271_FW_NAME "wl1271-fw.bin" 112#define WL1271_FW_NAME "wl1271-fw.bin"
108#define WL1271_NVS_NAME "wl1271-nvs.bin" 113#define WL1271_NVS_NAME "wl1271-nvs.bin"
109 114
110#define WL1271_BUSY_WORD_LEN 8 115/*
116 * Enable/disable 802.11a support for WL1273
117 */
118#undef WL1271_80211A_ENABLED
119
120/*
121 * FIXME: for the wl1271, a busy word count of 1 here will result in a more
122 * optimal SPI interface. There is some SPI bug however, causing RXS time outs
123 * with this mode occasionally on boot, so lets have three for now. A value of
124 * three should make sure, that the chipset will always be ready, though this
125 * will impact throughput and latencies slightly.
126 */
127#define WL1271_BUSY_WORD_CNT 3
128#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
111 129
112#define WL1271_ELP_HW_STATE_ASLEEP 0 130#define WL1271_ELP_HW_STATE_ASLEEP 0
113#define WL1271_ELP_HW_STATE_IRQ 1 131#define WL1271_ELP_HW_STATE_IRQ 1
114 132
133#define WL1271_DEFAULT_BEACON_INT 100
134#define WL1271_DEFAULT_DTIM_PERIOD 1
135
136#define ACX_TX_DESCRIPTORS 32
137
115enum wl1271_state { 138enum wl1271_state {
116 WL1271_STATE_OFF, 139 WL1271_STATE_OFF,
117 WL1271_STATE_ON, 140 WL1271_STATE_ON,
@@ -134,6 +157,8 @@ struct wl1271_partition {
134struct wl1271_partition_set { 157struct wl1271_partition_set {
135 struct wl1271_partition mem; 158 struct wl1271_partition mem;
136 struct wl1271_partition reg; 159 struct wl1271_partition reg;
160 struct wl1271_partition mem2;
161 struct wl1271_partition mem3;
137}; 162};
138 163
139struct wl1271; 164struct wl1271;
@@ -258,15 +283,15 @@ struct wl1271_debugfs {
258 283
259/* FW status registers */ 284/* FW status registers */
260struct wl1271_fw_status { 285struct wl1271_fw_status {
261 u32 intr; 286 __le32 intr;
262 u8 fw_rx_counter; 287 u8 fw_rx_counter;
263 u8 drv_rx_counter; 288 u8 drv_rx_counter;
264 u8 reserved; 289 u8 reserved;
265 u8 tx_results_counter; 290 u8 tx_results_counter;
266 u32 rx_pkt_descs[NUM_RX_PKT_DESC]; 291 __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
267 u32 tx_released_blks[NUM_TX_QUEUES]; 292 __le32 tx_released_blks[NUM_TX_QUEUES];
268 u32 fw_localtime; 293 __le32 fw_localtime;
269 u32 padding[2]; 294 __le32 padding[2];
270} __attribute__ ((packed)); 295} __attribute__ ((packed));
271 296
272struct wl1271_rx_mem_pool_addr { 297struct wl1271_rx_mem_pool_addr {
@@ -274,6 +299,15 @@ struct wl1271_rx_mem_pool_addr {
274 u32 addr_extra; 299 u32 addr_extra;
275}; 300};
276 301
302struct wl1271_scan {
303 u8 state;
304 u8 ssid[IW_ESSID_MAX_SIZE+1];
305 size_t ssid_len;
306 u8 active;
307 u8 high_prio;
308 u8 probe_requests;
309};
310
277struct wl1271 { 311struct wl1271 {
278 struct ieee80211_hw *hw; 312 struct ieee80211_hw *hw;
279 bool mac80211_registered; 313 bool mac80211_registered;
@@ -288,10 +322,7 @@ struct wl1271 {
288 enum wl1271_state state; 322 enum wl1271_state state;
289 struct mutex mutex; 323 struct mutex mutex;
290 324
291 int physical_mem_addr; 325 struct wl1271_partition_set part;
292 int physical_reg_addr;
293 int virtual_mem_addr;
294 int virtual_reg_addr;
295 326
296 struct wl1271_chip chip; 327 struct wl1271_chip chip;
297 328
@@ -308,7 +339,6 @@ struct wl1271 {
308 u8 bss_type; 339 u8 bss_type;
309 u8 ssid[IW_ESSID_MAX_SIZE + 1]; 340 u8 ssid[IW_ESSID_MAX_SIZE + 1];
310 u8 ssid_len; 341 u8 ssid_len;
311 u8 listen_int;
312 int channel; 342 int channel;
313 343
314 struct wl1271_acx_mem_map *target_mem_map; 344 struct wl1271_acx_mem_map *target_mem_map;
@@ -332,10 +362,14 @@ struct wl1271 {
332 bool tx_queue_stopped; 362 bool tx_queue_stopped;
333 363
334 struct work_struct tx_work; 364 struct work_struct tx_work;
335 struct work_struct filter_work;
336 365
337 /* Pending TX frames */ 366 /* Pending TX frames */
338 struct sk_buff *tx_frames[16]; 367 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
368
369 /* Security sequence number counters */
370 u8 tx_security_last_seq;
371 u16 tx_security_seq_16;
372 u32 tx_security_seq_32;
339 373
340 /* FW Rx counter */ 374 /* FW Rx counter */
341 u32 rx_counter; 375 u32 rx_counter;
@@ -354,10 +388,17 @@ struct wl1271 {
354 388
355 /* Are we currently scanning */ 389 /* Are we currently scanning */
356 bool scanning; 390 bool scanning;
391 struct wl1271_scan scan;
357 392
358 /* Our association ID */ 393 /* Our association ID */
359 u16 aid; 394 u16 aid;
360 395
396 /* currently configured rate set */
397 u32 basic_rate_set;
398
399 /* The current band */
400 enum ieee80211_band band;
401
361 /* Default key (for WEP) */ 402 /* Default key (for WEP) */
362 u32 default_key; 403 u32 default_key;
363 404
@@ -368,6 +409,7 @@ struct wl1271 {
368 bool elp; 409 bool elp;
369 410
370 struct completion *elp_compl; 411 struct completion *elp_compl;
412 struct delayed_work elp_work;
371 413
372 /* we can be in psm, but not in elp, we have to differentiate */ 414 /* we can be in psm, but not in elp, we have to differentiate */
373 bool psm; 415 bool psm;
@@ -383,11 +425,20 @@ struct wl1271 {
383 425
384 u32 buffer_32; 426 u32 buffer_32;
385 u32 buffer_cmd; 427 u32 buffer_cmd;
386 u8 buffer_busyword[WL1271_BUSY_WORD_LEN]; 428 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
387 struct wl1271_rx_descriptor *rx_descriptor;
388 429
389 struct wl1271_fw_status *fw_status; 430 struct wl1271_fw_status *fw_status;
390 struct wl1271_tx_hw_res_if *tx_res_if; 431 struct wl1271_tx_hw_res_if *tx_res_if;
432
433 struct ieee80211_vif *vif;
434
435 /* Used for a workaround to send disconnect before rejoining */
436 bool joined;
437
438 /* Current chipset configuration */
439 struct conf_drv_settings conf;
440
441 struct list_head list;
391}; 442};
392 443
393int wl1271_plt_start(struct wl1271 *wl); 444int wl1271_plt_start(struct wl1271 *wl);
@@ -404,4 +455,13 @@ int wl1271_plt_stop(struct wl1271 *wl);
404/* WL1271 needs a 200ms sleep after power on */ 455/* WL1271 needs a 200ms sleep after power on */
405#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ 456#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
406 457
458static inline bool wl1271_11a_enabled(void)
459{
460#ifdef WL1271_80211A_ENABLED
461 return true;
462#else
463 return false;
464#endif
465}
466
407#endif 467#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index f622a4092615..bf5a8680a462 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -34,8 +34,7 @@
34#include "wl1271_spi.h" 34#include "wl1271_spi.h"
35#include "wl1271_ps.h" 35#include "wl1271_ps.h"
36 36
37int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event, 37int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
38 u8 listen_interval)
39{ 38{
40 struct acx_wake_up_condition *wake_up; 39 struct acx_wake_up_condition *wake_up;
41 int ret; 40 int ret;
@@ -48,8 +47,8 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event,
48 goto out; 47 goto out;
49 } 48 }
50 49
51 wake_up->wake_up_event = wake_up_event; 50 wake_up->wake_up_event = wl->conf.conn.wake_up_event;
52 wake_up->listen_interval = listen_interval; 51 wake_up->listen_interval = wl->conf.conn.listen_interval;
53 52
54 ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS, 53 ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS,
55 wake_up, sizeof(*wake_up)); 54 wake_up, sizeof(*wake_up));
@@ -137,7 +136,12 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
137 goto out; 136 goto out;
138 } 137 }
139 138
140 acx->current_tx_power = power * 10; 139 /*
140 * FIXME: This is a workaround needed while we don't the correct
141 * calibration, to avoid distortions
142 */
143 /* acx->current_tx_power = power * 10; */
144 acx->current_tx_power = 70;
141 145
142 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); 146 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
143 if (ret < 0) { 147 if (ret < 0) {
@@ -193,7 +197,7 @@ int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map,
193 return 0; 197 return 0;
194} 198}
195 199
196int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time) 200int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl)
197{ 201{
198 struct acx_rx_msdu_lifetime *acx; 202 struct acx_rx_msdu_lifetime *acx;
199 int ret; 203 int ret;
@@ -206,7 +210,7 @@ int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time)
206 goto out; 210 goto out;
207 } 211 }
208 212
209 acx->lifetime = life_time; 213 acx->lifetime = cpu_to_le32(wl->conf.rx.rx_msdu_life_time);
210 ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME, 214 ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME,
211 acx, sizeof(*acx)); 215 acx, sizeof(*acx));
212 if (ret < 0) { 216 if (ret < 0) {
@@ -232,8 +236,8 @@ int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter)
232 goto out; 236 goto out;
233 } 237 }
234 238
235 rx_config->config_options = config; 239 rx_config->config_options = cpu_to_le32(config);
236 rx_config->filter_options = filter; 240 rx_config->filter_options = cpu_to_le32(filter);
237 241
238 ret = wl1271_cmd_configure(wl, ACX_RX_CFG, 242 ret = wl1271_cmd_configure(wl, ACX_RX_CFG,
239 rx_config, sizeof(*rx_config)); 243 rx_config, sizeof(*rx_config));
@@ -260,7 +264,7 @@ int wl1271_acx_pd_threshold(struct wl1271 *wl)
260 goto out; 264 goto out;
261 } 265 }
262 266
263 /* FIXME: threshold value not set */ 267 pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
264 268
265 ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd)); 269 ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
266 if (ret < 0) { 270 if (ret < 0) {
@@ -300,7 +304,8 @@ out:
300 return ret; 304 return ret;
301} 305}
302 306
303int wl1271_acx_group_address_tbl(struct wl1271 *wl) 307int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
308 void *mc_list, u32 mc_list_len)
304{ 309{
305 struct acx_dot11_grp_addr_tbl *acx; 310 struct acx_dot11_grp_addr_tbl *acx;
306 int ret; 311 int ret;
@@ -314,9 +319,9 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl)
314 } 319 }
315 320
316 /* MAC filtering */ 321 /* MAC filtering */
317 acx->enabled = 0; 322 acx->enabled = enable;
318 acx->num_groups = 0; 323 acx->num_groups = mc_list_len;
319 memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN); 324 memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
320 325
321 ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL, 326 ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
322 acx, sizeof(*acx)); 327 acx, sizeof(*acx));
@@ -343,8 +348,8 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
343 348
344 wl1271_debug(DEBUG_ACX, "acx service period timeout"); 349 wl1271_debug(DEBUG_ACX, "acx service period timeout");
345 350
346 rx_timeout->ps_poll_timeout = RX_TIMEOUT_PS_POLL_DEF; 351 rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
347 rx_timeout->upsd_timeout = RX_TIMEOUT_UPSD_DEF; 352 rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
348 353
349 ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT, 354 ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT,
350 rx_timeout, sizeof(*rx_timeout)); 355 rx_timeout, sizeof(*rx_timeout));
@@ -372,7 +377,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold)
372 goto out; 377 goto out;
373 } 378 }
374 379
375 rts->threshold = rts_threshold; 380 rts->threshold = cpu_to_le16(rts_threshold);
376 381
377 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); 382 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
378 if (ret < 0) { 383 if (ret < 0) {
@@ -385,20 +390,29 @@ out:
385 return ret; 390 return ret;
386} 391}
387 392
388int wl1271_acx_beacon_filter_opt(struct wl1271 *wl) 393int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
389{ 394{
390 struct acx_beacon_filter_option *beacon_filter; 395 struct acx_beacon_filter_option *beacon_filter = NULL;
391 int ret; 396 int ret = 0;
392 397
393 wl1271_debug(DEBUG_ACX, "acx beacon filter opt"); 398 wl1271_debug(DEBUG_ACX, "acx beacon filter opt");
394 399
400 if (enable_filter &&
401 wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
402 goto out;
403
395 beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL); 404 beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL);
396 if (!beacon_filter) { 405 if (!beacon_filter) {
397 ret = -ENOMEM; 406 ret = -ENOMEM;
398 goto out; 407 goto out;
399 } 408 }
400 409
401 beacon_filter->enable = 0; 410 beacon_filter->enable = enable_filter;
411
412 /*
413 * When set to zero, and the filter is enabled, beacons
414 * without the unicast TIM bit set are dropped.
415 */
402 beacon_filter->max_num_beacons = 0; 416 beacon_filter->max_num_beacons = 0;
403 417
404 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT, 418 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT,
@@ -416,7 +430,9 @@ out:
416int wl1271_acx_beacon_filter_table(struct wl1271 *wl) 430int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
417{ 431{
418 struct acx_beacon_filter_ie_table *ie_table; 432 struct acx_beacon_filter_ie_table *ie_table;
433 int i, idx = 0;
419 int ret; 434 int ret;
435 bool vendor_spec = false;
420 436
421 wl1271_debug(DEBUG_ACX, "acx beacon filter table"); 437 wl1271_debug(DEBUG_ACX, "acx beacon filter table");
422 438
@@ -426,8 +442,32 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
426 goto out; 442 goto out;
427 } 443 }
428 444
445 /* configure default beacon pass-through rules */
429 ie_table->num_ie = 0; 446 ie_table->num_ie = 0;
430 memset(ie_table->table, 0, BEACON_FILTER_TABLE_MAX_SIZE); 447 for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
448 struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
449 ie_table->table[idx++] = r->ie;
450 ie_table->table[idx++] = r->rule;
451
452 if (r->ie == WLAN_EID_VENDOR_SPECIFIC) {
453 /* only one vendor specific ie allowed */
454 if (vendor_spec)
455 continue;
456
457 /* for vendor specific rules configure the
458 additional fields */
459 memcpy(&(ie_table->table[idx]), r->oui,
460 CONF_BCN_IE_OUI_LEN);
461 idx += CONF_BCN_IE_OUI_LEN;
462 ie_table->table[idx++] = r->type;
463 memcpy(&(ie_table->table[idx]), r->version,
464 CONF_BCN_IE_VER_LEN);
465 idx += CONF_BCN_IE_VER_LEN;
466 vendor_spec = true;
467 }
468
469 ie_table->num_ie++;
470 }
431 471
432 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE, 472 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE,
433 ie_table, sizeof(*ie_table)); 473 ie_table, sizeof(*ie_table));
@@ -441,6 +481,36 @@ out:
441 return ret; 481 return ret;
442} 482}
443 483
484int wl1271_acx_conn_monit_params(struct wl1271 *wl)
485{
486 struct acx_conn_monit_params *acx;
487 int ret;
488
489 wl1271_debug(DEBUG_ACX, "acx connection monitor parameters");
490
491 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
492 if (!acx) {
493 ret = -ENOMEM;
494 goto out;
495 }
496
497 acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold);
498 acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout);
499
500 ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
501 acx, sizeof(*acx));
502 if (ret < 0) {
503 wl1271_warning("failed to set connection monitor "
504 "parameters: %d", ret);
505 goto out;
506 }
507
508out:
509 kfree(acx);
510 return ret;
511}
512
513
444int wl1271_acx_sg_enable(struct wl1271 *wl) 514int wl1271_acx_sg_enable(struct wl1271 *wl)
445{ 515{
446 struct acx_bt_wlan_coex *pta; 516 struct acx_bt_wlan_coex *pta;
@@ -470,6 +540,7 @@ out:
470int wl1271_acx_sg_cfg(struct wl1271 *wl) 540int wl1271_acx_sg_cfg(struct wl1271 *wl)
471{ 541{
472 struct acx_bt_wlan_coex_param *param; 542 struct acx_bt_wlan_coex_param *param;
543 struct conf_sg_settings *c = &wl->conf.sg;
473 int ret; 544 int ret;
474 545
475 wl1271_debug(DEBUG_ACX, "acx sg cfg"); 546 wl1271_debug(DEBUG_ACX, "acx sg cfg");
@@ -481,34 +552,19 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
481 } 552 }
482 553
483 /* BT-WLAN coext parameters */ 554 /* BT-WLAN coext parameters */
484 param->min_rate = RATE_INDEX_24MBPS; 555 param->per_threshold = cpu_to_le32(c->per_threshold);
485 param->bt_hp_max_time = PTA_BT_HP_MAXTIME_DEF; 556 param->max_scan_compensation_time =
486 param->wlan_hp_max_time = PTA_WLAN_HP_MAX_TIME_DEF; 557 cpu_to_le32(c->max_scan_compensation_time);
487 param->sense_disable_timer = PTA_SENSE_DISABLE_TIMER_DEF; 558 param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval);
488 param->rx_time_bt_hp = PTA_PROTECTIVE_RX_TIME_DEF; 559 param->load_ratio = c->load_ratio;
489 param->tx_time_bt_hp = PTA_PROTECTIVE_TX_TIME_DEF; 560 param->auto_ps_mode = c->auto_ps_mode;
490 param->rx_time_bt_hp_fast = PTA_PROTECTIVE_RX_TIME_FAST_DEF; 561 param->probe_req_compensation = c->probe_req_compensation;
491 param->tx_time_bt_hp_fast = PTA_PROTECTIVE_TX_TIME_FAST_DEF; 562 param->scan_window_compensation = c->scan_window_compensation;
492 param->wlan_cycle_fast = PTA_CYCLE_TIME_FAST_DEF; 563 param->antenna_config = c->antenna_config;
493 param->bt_anti_starvation_period = PTA_ANTI_STARVE_PERIOD_DEF; 564 param->beacon_miss_threshold = c->beacon_miss_threshold;
494 param->next_bt_lp_packet = PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF; 565 param->rate_adaptation_threshold =
495 param->wake_up_beacon = PTA_TIME_BEFORE_BEACON_DEF; 566 cpu_to_le32(c->rate_adaptation_threshold);
496 param->hp_dm_max_guard_time = PTA_HPDM_MAX_TIME_DEF; 567 param->rate_adaptation_snr = c->rate_adaptation_snr;
497 param->next_wlan_packet = PTA_TIME_OUT_NEXT_WLAN_DEF;
498 param->antenna_type = PTA_ANTENNA_TYPE_DEF;
499 param->signal_type = PTA_SIGNALING_TYPE_DEF;
500 param->afh_leverage_on = PTA_AFH_LEVERAGE_ON_DEF;
501 param->quiet_cycle_num = PTA_NUMBER_QUIET_CYCLE_DEF;
502 param->max_cts = PTA_MAX_NUM_CTS_DEF;
503 param->wlan_packets_num = PTA_NUMBER_OF_WLAN_PACKETS_DEF;
504 param->bt_packets_num = PTA_NUMBER_OF_BT_PACKETS_DEF;
505 param->missed_rx_avalanche = PTA_RX_FOR_AVALANCHE_DEF;
506 param->wlan_elp_hp = PTA_ELP_HP_DEF;
507 param->bt_anti_starvation_cycles = PTA_ANTI_STARVE_NUM_CYCLE_DEF;
508 param->ack_mode_dual_ant = PTA_ACK_MODE_DEF;
509 param->pa_sd_enable = PTA_ALLOW_PA_SD_DEF;
510 param->pta_auto_mode_enable = PTA_AUTO_MODE_NO_CTS_DEF;
511 param->bt_hp_respected_num = PTA_BT_HP_RESPECTED_DEF;
512 568
513 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); 569 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
514 if (ret < 0) { 570 if (ret < 0) {
@@ -534,8 +590,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)
534 goto out; 590 goto out;
535 } 591 }
536 592
537 detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D; 593 detection->rx_cca_threshold = cpu_to_le16(wl->conf.rx.rx_cca_threshold);
538 detection->tx_energy_detection = 0; 594 detection->tx_energy_detection = wl->conf.tx.tx_energy_detection;
539 595
540 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, 596 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
541 detection, sizeof(*detection)); 597 detection, sizeof(*detection));
@@ -562,10 +618,10 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
562 goto out; 618 goto out;
563 } 619 }
564 620
565 bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE; 621 bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
566 bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE; 622 bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
567 bb->rx_broadcast_in_ps = RX_BROADCAST_IN_PS_DEF_VALUE; 623 bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
568 bb->ps_poll_threshold = CONSECUTIVE_PS_POLL_FAILURE_DEF; 624 bb->ps_poll_threshold = wl->conf.conn.ps_poll_threshold;
569 625
570 ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb)); 626 ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb));
571 if (ret < 0) { 627 if (ret < 0) {
@@ -591,7 +647,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
591 goto out; 647 goto out;
592 } 648 }
593 649
594 acx_aid->aid = aid; 650 acx_aid->aid = cpu_to_le16(aid);
595 651
596 ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); 652 ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
597 if (ret < 0) { 653 if (ret < 0) {
@@ -618,9 +674,8 @@ int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask)
618 } 674 }
619 675
620 /* high event mask is unused */ 676 /* high event mask is unused */
621 mask->high_event_mask = 0xffffffff; 677 mask->high_event_mask = cpu_to_le32(0xffffffff);
622 678 mask->event_mask = cpu_to_le32(event_mask);
623 mask->event_mask = event_mask;
624 679
625 ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK, 680 ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK,
626 mask, sizeof(*mask)); 681 mask, sizeof(*mask));
@@ -703,9 +758,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
703 return 0; 758 return 0;
704} 759}
705 760
706int wl1271_acx_rate_policies(struct wl1271 *wl) 761int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
707{ 762{
708 struct acx_rate_policy *acx; 763 struct acx_rate_policy *acx;
764 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
709 int ret = 0; 765 int ret = 0;
710 766
711 wl1271_debug(DEBUG_ACX, "acx rate policies"); 767 wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -718,11 +774,11 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
718 } 774 }
719 775
720 /* configure one default (one-size-fits-all) rate class */ 776 /* configure one default (one-size-fits-all) rate class */
721 acx->rate_class_cnt = 1; 777 acx->rate_class_cnt = cpu_to_le32(1);
722 acx->rate_class[0].enabled_rates = ACX_RATE_MASK_ALL; 778 acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates);
723 acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT; 779 acx->rate_class[0].short_retry_limit = c->short_retry_limit;
724 acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT; 780 acx->rate_class[0].long_retry_limit = c->long_retry_limit;
725 acx->rate_class[0].aflags = 0; 781 acx->rate_class[0].aflags = c->aflags;
726 782
727 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); 783 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
728 if (ret < 0) { 784 if (ret < 0) {
@@ -749,22 +805,14 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl)
749 goto out; 805 goto out;
750 } 806 }
751 807
752 /* 808 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
753 * FIXME: Configure each AC with appropriate values (most suitable 809 struct conf_tx_ac_category *c = &(wl->conf.tx.ac_conf[i]);
754 * values will probably be different for each AC. 810 acx->ac = c->ac;
755 */ 811 acx->cw_min = c->cw_min;
756 for (i = 0; i < WL1271_ACX_AC_COUNT; i++) { 812 acx->cw_max = cpu_to_le16(c->cw_max);
757 acx->ac = i; 813 acx->aifsn = c->aifsn;
758
759 /*
760 * FIXME: The following default values originate from
761 * the TI reference driver. What do they mean?
762 */
763 acx->cw_min = 15;
764 acx->cw_max = 63;
765 acx->aifsn = 3;
766 acx->reserved = 0; 814 acx->reserved = 0;
767 acx->tx_op_limit = 0; 815 acx->tx_op_limit = cpu_to_le16(c->tx_op_limit);
768 816
769 ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx)); 817 ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
770 if (ret < 0) { 818 if (ret < 0) {
@@ -793,12 +841,15 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl)
793 goto out; 841 goto out;
794 } 842 }
795 843
796 /* FIXME: configure each TID with a different AC reference */ 844 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
797 for (i = 0; i < WL1271_ACX_TID_COUNT; i++) { 845 struct conf_tx_tid *c = &(wl->conf.tx.tid_conf[i]);
798 acx->queue_id = i; 846 acx->queue_id = c->queue_id;
799 acx->tsid = WL1271_ACX_AC_BE; 847 acx->channel_type = c->channel_type;
800 acx->ps_scheme = WL1271_ACX_PS_SCHEME_LEGACY; 848 acx->tsid = c->tsid;
801 acx->ack_policy = WL1271_ACX_ACK_POLICY_LEGACY; 849 acx->ps_scheme = c->ps_scheme;
850 acx->ack_policy = c->ack_policy;
851 acx->apsd_conf[0] = cpu_to_le32(c->apsd_conf[0]);
852 acx->apsd_conf[1] = cpu_to_le32(c->apsd_conf[1]);
802 853
803 ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx)); 854 ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
804 if (ret < 0) { 855 if (ret < 0) {
@@ -826,7 +877,7 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl)
826 goto out; 877 goto out;
827 } 878 }
828 879
829 acx->frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD; 880 acx->frag_threshold = cpu_to_le16(wl->conf.tx.frag_threshold);
830 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx)); 881 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx));
831 if (ret < 0) { 882 if (ret < 0) {
832 wl1271_warning("Setting of frag threshold failed: %d", ret); 883 wl1271_warning("Setting of frag threshold failed: %d", ret);
@@ -852,8 +903,8 @@ int wl1271_acx_tx_config_options(struct wl1271 *wl)
852 goto out; 903 goto out;
853 } 904 }
854 905
855 acx->tx_compl_timeout = WL1271_ACX_TX_COMPL_TIMEOUT; 906 acx->tx_compl_timeout = cpu_to_le16(wl->conf.tx.tx_compl_timeout);
856 acx->tx_compl_threshold = WL1271_ACX_TX_COMPL_THRESHOLD; 907 acx->tx_compl_threshold = cpu_to_le16(wl->conf.tx.tx_compl_threshold);
857 ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx)); 908 ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx));
858 if (ret < 0) { 909 if (ret < 0) {
859 wl1271_warning("Setting of tx options failed: %d", ret); 910 wl1271_warning("Setting of tx options failed: %d", ret);
@@ -879,11 +930,11 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
879 } 930 }
880 931
881 /* memory config */ 932 /* memory config */
882 mem_conf->num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS); 933 mem_conf->num_stations = DEFAULT_NUM_STATIONS;
883 mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS; 934 mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS;
884 mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS; 935 mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS;
885 mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES; 936 mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES;
886 mem_conf->total_tx_descriptors = ACX_TX_DESCRIPTORS; 937 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
887 938
888 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, 939 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
889 sizeof(*mem_conf)); 940 sizeof(*mem_conf));
@@ -906,7 +957,7 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
906 return ret; 957 return ret;
907 958
908 wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map), 959 wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
909 GFP_KERNEL); 960 GFP_KERNEL);
910 if (!wl->target_mem_map) { 961 if (!wl->target_mem_map) {
911 wl1271_error("couldn't allocate target memory map"); 962 wl1271_error("couldn't allocate target memory map");
912 return -ENOMEM; 963 return -ENOMEM;
@@ -923,7 +974,8 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
923 } 974 }
924 975
925 /* initialize TX block book keeping */ 976 /* initialize TX block book keeping */
926 wl->tx_blocks_available = wl->target_mem_map->num_tx_mem_blocks; 977 wl->tx_blocks_available =
978 le32_to_cpu(wl->target_mem_map->num_tx_mem_blocks);
927 wl1271_debug(DEBUG_TX, "available tx blocks: %d", 979 wl1271_debug(DEBUG_TX, "available tx blocks: %d",
928 wl->tx_blocks_available); 980 wl->tx_blocks_available);
929 981
@@ -943,10 +995,10 @@ int wl1271_acx_init_rx_interrupt(struct wl1271 *wl)
943 goto out; 995 goto out;
944 } 996 }
945 997
946 rx_conf->threshold = WL1271_RX_INTR_THRESHOLD_DEF; 998 rx_conf->threshold = cpu_to_le16(wl->conf.rx.irq_pkt_threshold);
947 rx_conf->timeout = WL1271_RX_INTR_TIMEOUT_DEF; 999 rx_conf->timeout = cpu_to_le16(wl->conf.rx.irq_timeout);
948 rx_conf->mblk_threshold = USHORT_MAX; /* Disabled */ 1000 rx_conf->mblk_threshold = cpu_to_le16(wl->conf.rx.irq_blk_threshold);
949 rx_conf->queue_type = RX_QUEUE_TYPE_RX_LOW_PRIORITY; 1001 rx_conf->queue_type = wl->conf.rx.queue_type;
950 1002
951 ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf, 1003 ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf,
952 sizeof(*rx_conf)); 1004 sizeof(*rx_conf));
@@ -959,3 +1011,124 @@ out:
959 kfree(rx_conf); 1011 kfree(rx_conf);
960 return ret; 1012 return ret;
961} 1013}
1014
1015int wl1271_acx_smart_reflex(struct wl1271 *wl)
1016{
1017 struct acx_smart_reflex_state *sr_state = NULL;
1018 struct acx_smart_reflex_config_params *sr_param = NULL;
1019 int i, ret;
1020
1021 wl1271_debug(DEBUG_ACX, "acx smart reflex");
1022
1023 sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
1024 if (!sr_param) {
1025 ret = -ENOMEM;
1026 goto out;
1027 }
1028
1029 for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
1030 struct conf_mart_reflex_err_table *e =
1031 &(wl->conf.init.sr_err_tbl[i]);
1032
1033 sr_param->error_table[i].len = e->len;
1034 sr_param->error_table[i].upper_limit = e->upper_limit;
1035 memcpy(sr_param->error_table[i].values, e->values, e->len);
1036 }
1037
1038 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
1039 sr_param, sizeof(*sr_param));
1040 if (ret < 0) {
1041 wl1271_warning("failed to set smart reflex params: %d", ret);
1042 goto out;
1043 }
1044
1045 sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
1046 if (!sr_state) {
1047 ret = -ENOMEM;
1048 goto out;
1049 }
1050
1051 /* enable smart reflex */
1052 sr_state->enable = wl->conf.init.sr_enable;
1053
1054 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
1055 sr_state, sizeof(*sr_state));
1056 if (ret < 0) {
1057 wl1271_warning("failed to set smart reflex params: %d", ret);
1058 goto out;
1059 }
1060
1061out:
1062 kfree(sr_state);
1063 kfree(sr_param);
1064 return ret;
1065
1066}
1067
1068int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
1069{
1070 struct wl1271_acx_bet_enable *acx = NULL;
1071 int ret = 0;
1072
1073 wl1271_debug(DEBUG_ACX, "acx bet enable");
1074
1075 if (enable && wl->conf.conn.bet_enable == CONF_BET_MODE_DISABLE)
1076 goto out;
1077
1078 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1079 if (!acx) {
1080 ret = -ENOMEM;
1081 goto out;
1082 }
1083
1084 acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
1085 acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
1086
1087 ret = wl1271_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
1088 if (ret < 0) {
1089 wl1271_warning("acx bet enable failed: %d", ret);
1090 goto out;
1091 }
1092
1093out:
1094 kfree(acx);
1095 return ret;
1096}
1097
1098int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1099 u8 version)
1100{
1101 struct wl1271_acx_arp_filter *acx;
1102 int ret;
1103
1104 wl1271_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
1105
1106 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1107 if (!acx) {
1108 ret = -ENOMEM;
1109 goto out;
1110 }
1111
1112 acx->version = version;
1113 acx->enable = enable;
1114
1115 if (enable == true) {
1116 if (version == ACX_IPV4_VERSION)
1117 memcpy(acx->address, address, ACX_IPV4_ADDR_SIZE);
1118 else if (version == ACX_IPV6_VERSION)
1119 memcpy(acx->address, address, sizeof(acx->address));
1120 else
1121 wl1271_error("Invalid IP version");
1122 }
1123
1124 ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER,
1125 acx, sizeof(*acx));
1126 if (ret < 0) {
1127 wl1271_warning("failed to set arp ip filter: %d", ret);
1128 goto out;
1129 }
1130
1131out:
1132 kfree(acx);
1133 return ret;
1134}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 9068daaf0ddf..2ce0a8128542 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -61,8 +61,9 @@
61 WL1271_ACX_INTR_HW_AVAILABLE | \ 61 WL1271_ACX_INTR_HW_AVAILABLE | \
62 WL1271_ACX_INTR_DATA) 62 WL1271_ACX_INTR_DATA)
63 63
64#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \ 64#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \
65 WL1271_ACX_INTR_EVENT_B | \ 65 WL1271_ACX_INTR_EVENT_B | \
66 WL1271_ACX_INTR_HW_AVAILABLE | \
66 WL1271_ACX_INTR_DATA) 67 WL1271_ACX_INTR_DATA)
67 68
68/* Target's information element */ 69/* Target's information element */
@@ -70,11 +71,11 @@ struct acx_header {
70 struct wl1271_cmd_header cmd; 71 struct wl1271_cmd_header cmd;
71 72
72 /* acx (or information element) header */ 73 /* acx (or information element) header */
73 u16 id; 74 __le16 id;
74 75
75 /* payload length (not including headers */ 76 /* payload length (not including headers */
76 u16 len; 77 __le16 len;
77}; 78} __attribute__ ((packed));
78 79
79struct acx_error_counter { 80struct acx_error_counter {
80 struct acx_header header; 81 struct acx_header header;
@@ -82,21 +83,21 @@ struct acx_error_counter {
82 /* The number of PLCP errors since the last time this */ 83 /* The number of PLCP errors since the last time this */
83 /* information element was interrogated. This field is */ 84 /* information element was interrogated. This field is */
84 /* automatically cleared when it is interrogated.*/ 85 /* automatically cleared when it is interrogated.*/
85 u32 PLCP_error; 86 __le32 PLCP_error;
86 87
87 /* The number of FCS errors since the last time this */ 88 /* The number of FCS errors since the last time this */
88 /* information element was interrogated. This field is */ 89 /* information element was interrogated. This field is */
89 /* automatically cleared when it is interrogated.*/ 90 /* automatically cleared when it is interrogated.*/
90 u32 FCS_error; 91 __le32 FCS_error;
91 92
92 /* The number of MPDUs without PLCP header errors received*/ 93 /* The number of MPDUs without PLCP header errors received*/
93 /* since the last time this information element was interrogated. */ 94 /* since the last time this information element was interrogated. */
94 /* This field is automatically cleared when it is interrogated.*/ 95 /* This field is automatically cleared when it is interrogated.*/
95 u32 valid_frame; 96 __le32 valid_frame;
96 97
97 /* the number of missed sequence numbers in the squentially */ 98 /* the number of missed sequence numbers in the squentially */
98 /* values of frames seq numbers */ 99 /* values of frames seq numbers */
99 u32 seq_num_miss; 100 __le32 seq_num_miss;
100} __attribute__ ((packed)); 101} __attribute__ ((packed));
101 102
102struct acx_revision { 103struct acx_revision {
@@ -125,7 +126,7 @@ struct acx_revision {
125 * (1 = first spin, 2 = second spin, and so on). 126 * (1 = first spin, 2 = second spin, and so on).
126 * bits 24 - 31: Chip ID - The WiLink chip ID. 127 * bits 24 - 31: Chip ID - The WiLink chip ID.
127 */ 128 */
128 u32 hw_version; 129 __le32 hw_version;
129} __attribute__ ((packed)); 130} __attribute__ ((packed));
130 131
131enum wl1271_psm_mode { 132enum wl1271_psm_mode {
@@ -170,7 +171,6 @@ enum {
170#define DP_RX_PACKET_RING_CHUNK_NUM 2 171#define DP_RX_PACKET_RING_CHUNK_NUM 2
171#define DP_TX_PACKET_RING_CHUNK_NUM 2 172#define DP_TX_PACKET_RING_CHUNK_NUM 2
172#define DP_TX_COMPLETE_TIME_OUT 20 173#define DP_TX_COMPLETE_TIME_OUT 20
173#define FW_TX_CMPLT_BLOCK_SIZE 16
174 174
175#define TX_MSDU_LIFETIME_MIN 0 175#define TX_MSDU_LIFETIME_MIN 0
176#define TX_MSDU_LIFETIME_MAX 3000 176#define TX_MSDU_LIFETIME_MAX 3000
@@ -186,7 +186,7 @@ struct acx_rx_msdu_lifetime {
186 * The maximum amount of time, in TU, before the 186 * The maximum amount of time, in TU, before the
187 * firmware discards the MSDU. 187 * firmware discards the MSDU.
188 */ 188 */
189 u32 lifetime; 189 __le32 lifetime;
190} __attribute__ ((packed)); 190} __attribute__ ((packed));
191 191
192/* 192/*
@@ -273,14 +273,14 @@ struct acx_rx_msdu_lifetime {
273struct acx_rx_config { 273struct acx_rx_config {
274 struct acx_header header; 274 struct acx_header header;
275 275
276 u32 config_options; 276 __le32 config_options;
277 u32 filter_options; 277 __le32 filter_options;
278} __attribute__ ((packed)); 278} __attribute__ ((packed));
279 279
280struct acx_packet_detection { 280struct acx_packet_detection {
281 struct acx_header header; 281 struct acx_header header;
282 282
283 u32 threshold; 283 __le32 threshold;
284} __attribute__ ((packed)); 284} __attribute__ ((packed));
285 285
286 286
@@ -302,8 +302,8 @@ struct acx_slot {
302} __attribute__ ((packed)); 302} __attribute__ ((packed));
303 303
304 304
305#define ADDRESS_GROUP_MAX (8) 305#define ACX_MC_ADDRESS_GROUP_MAX (8)
306#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ADDRESS_GROUP_MAX) 306#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX)
307 307
308struct acx_dot11_grp_addr_tbl { 308struct acx_dot11_grp_addr_tbl {
309 struct acx_header header; 309 struct acx_header header;
@@ -314,40 +314,17 @@ struct acx_dot11_grp_addr_tbl {
314 u8 mac_table[ADDRESS_GROUP_MAX_LEN]; 314 u8 mac_table[ADDRESS_GROUP_MAX_LEN];
315} __attribute__ ((packed)); 315} __attribute__ ((packed));
316 316
317
318#define RX_TIMEOUT_PS_POLL_MIN 0
319#define RX_TIMEOUT_PS_POLL_MAX (200000)
320#define RX_TIMEOUT_PS_POLL_DEF (15)
321#define RX_TIMEOUT_UPSD_MIN 0
322#define RX_TIMEOUT_UPSD_MAX (200000)
323#define RX_TIMEOUT_UPSD_DEF (15)
324
325struct acx_rx_timeout { 317struct acx_rx_timeout {
326 struct acx_header header; 318 struct acx_header header;
327 319
328 /* 320 __le16 ps_poll_timeout;
329 * The longest time the STA will wait to receive 321 __le16 upsd_timeout;
330 * traffic from the AP after a PS-poll has been
331 * transmitted.
332 */
333 u16 ps_poll_timeout;
334
335 /*
336 * The longest time the STA will wait to receive
337 * traffic from the AP after a frame has been sent
338 * from an UPSD enabled queue.
339 */
340 u16 upsd_timeout;
341} __attribute__ ((packed)); 322} __attribute__ ((packed));
342 323
343#define RTS_THRESHOLD_MIN 0
344#define RTS_THRESHOLD_MAX 4096
345#define RTS_THRESHOLD_DEF 2347
346
347struct acx_rts_threshold { 324struct acx_rts_threshold {
348 struct acx_header header; 325 struct acx_header header;
349 326
350 u16 threshold; 327 __le16 threshold;
351 u8 pad[2]; 328 u8 pad[2];
352} __attribute__ ((packed)); 329} __attribute__ ((packed));
353 330
@@ -408,6 +385,13 @@ struct acx_beacon_filter_ie_table {
408 u8 pad[3]; 385 u8 pad[3];
409} __attribute__ ((packed)); 386} __attribute__ ((packed));
410 387
388struct acx_conn_monit_params {
389 struct acx_header header;
390
391 __le32 synch_fail_thold; /* number of beacons missed */
392 __le32 bss_lose_timeout; /* number of TU's from synch fail */
393} __attribute__ ((packed));
394
411enum { 395enum {
412 SG_ENABLE = 0, 396 SG_ENABLE = 0,
413 SG_DISABLE, 397 SG_DISABLE,
@@ -431,6 +415,25 @@ struct acx_bt_wlan_coex {
431 u8 pad[3]; 415 u8 pad[3];
432} __attribute__ ((packed)); 416} __attribute__ ((packed));
433 417
418struct acx_smart_reflex_state {
419 struct acx_header header;
420
421 u8 enable;
422 u8 padding[3];
423} __attribute__ ((packed));
424
425struct smart_reflex_err_table {
426 u8 len;
427 s8 upper_limit;
428 s8 values[14];
429} __attribute__ ((packed));
430
431struct acx_smart_reflex_config_params {
432 struct acx_header header;
433
434 struct smart_reflex_err_table error_table[3];
435} __attribute__ ((packed));
436
434#define PTA_ANTENNA_TYPE_DEF (0) 437#define PTA_ANTENNA_TYPE_DEF (0)
435#define PTA_BT_HP_MAXTIME_DEF (2000) 438#define PTA_BT_HP_MAXTIME_DEF (2000)
436#define PTA_WLAN_HP_MAX_TIME_DEF (5000) 439#define PTA_WLAN_HP_MAX_TIME_DEF (5000)
@@ -463,150 +466,34 @@ struct acx_bt_wlan_coex {
463struct acx_bt_wlan_coex_param { 466struct acx_bt_wlan_coex_param {
464 struct acx_header header; 467 struct acx_header header;
465 468
466 /* 469 __le32 per_threshold;
467 * The minimum rate of a received WLAN packet in the STA, 470 __le32 max_scan_compensation_time;
468 * during protective mode, of which a new BT-HP request 471 __le16 nfs_sample_interval;
469 * during this Rx will always be respected and gain the antenna. 472 u8 load_ratio;
470 */ 473 u8 auto_ps_mode;
471 u32 min_rate; 474 u8 probe_req_compensation;
472 475 u8 scan_window_compensation;
473 /* Max time the BT HP will be respected. */ 476 u8 antenna_config;
474 u16 bt_hp_max_time; 477 u8 beacon_miss_threshold;
475 478 __le32 rate_adaptation_threshold;
476 /* Max time the WLAN HP will be respected. */ 479 s8 rate_adaptation_snr;
477 u16 wlan_hp_max_time; 480 u8 padding[3];
478
479 /*
480 * The time between the last BT activity
481 * and the moment when the sense mode returns
482 * to SENSE_INACTIVE.
483 */
484 u16 sense_disable_timer;
485
486 /* Time before the next BT HP instance */
487 u16 rx_time_bt_hp;
488 u16 tx_time_bt_hp;
489
490 /* range: 10-20000 default: 1500 */
491 u16 rx_time_bt_hp_fast;
492 u16 tx_time_bt_hp_fast;
493
494 /* range: 2000-65535 default: 8700 */
495 u16 wlan_cycle_fast;
496
497 /* range: 0 - 15000 (Msec) default: 1000 */
498 u16 bt_anti_starvation_period;
499
500 /* range 400-10000(Usec) default: 3000 */
501 u16 next_bt_lp_packet;
502
503 /* Deafult: worst case for BT DH5 traffic */
504 u16 wake_up_beacon;
505
506 /* range: 0-50000(Usec) default: 1050 */
507 u16 hp_dm_max_guard_time;
508
509 /*
510 * This is to prevent both BT & WLAN antenna
511 * starvation.
512 * Range: 100-50000(Usec) default:2550
513 */
514 u16 next_wlan_packet;
515
516 /* 0 -> shared antenna */
517 u8 antenna_type;
518
519 /*
520 * 0 -> TI legacy
521 * 1 -> Palau
522 */
523 u8 signal_type;
524
525 /*
526 * BT AFH status
527 * 0 -> no AFH
528 * 1 -> from dedicated GPIO
529 * 2 -> AFH on (from host)
530 */
531 u8 afh_leverage_on;
532
533 /*
534 * The number of cycles during which no
535 * TX will be sent after 1 cycle of RX
536 * transaction in protective mode
537 */
538 u8 quiet_cycle_num;
539
540 /*
541 * The maximum number of CTSs that will
542 * be sent for receiving RX packet in
543 * protective mode
544 */
545 u8 max_cts;
546
547 /*
548 * The number of WLAN packets
549 * transferred in common mode before
550 * switching to BT.
551 */
552 u8 wlan_packets_num;
553
554 /*
555 * The number of BT packets
556 * transferred in common mode before
557 * switching to WLAN.
558 */
559 u8 bt_packets_num;
560
561 /* range: 1-255 default: 5 */
562 u8 missed_rx_avalanche;
563
564 /* range: 0-1 default: 1 */
565 u8 wlan_elp_hp;
566
567 /* range: 0 - 15 default: 4 */
568 u8 bt_anti_starvation_cycles;
569
570 u8 ack_mode_dual_ant;
571
572 /*
573 * Allow PA_SD assertion/de-assertion
574 * during enabled BT activity.
575 */
576 u8 pa_sd_enable;
577
578 /*
579 * Enable/Disable PTA in auto mode:
580 * Support Both Active & P.S modes
581 */
582 u8 pta_auto_mode_enable;
583
584 /* range: 0 - 20 default: 1 */
585 u8 bt_hp_respected_num;
586} __attribute__ ((packed)); 481} __attribute__ ((packed));
587 482
588#define CCA_THRSH_ENABLE_ENERGY_D 0x140A
589#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF
590
591struct acx_energy_detection { 483struct acx_energy_detection {
592 struct acx_header header; 484 struct acx_header header;
593 485
594 /* The RX Clear Channel Assessment threshold in the PHY */ 486 /* The RX Clear Channel Assessment threshold in the PHY */
595 u16 rx_cca_threshold; 487 __le16 rx_cca_threshold;
596 u8 tx_energy_detection; 488 u8 tx_energy_detection;
597 u8 pad; 489 u8 pad;
598} __attribute__ ((packed)); 490} __attribute__ ((packed));
599 491
600#define BCN_RX_TIMEOUT_DEF_VALUE 10000
601#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000
602#define RX_BROADCAST_IN_PS_DEF_VALUE 1
603#define CONSECUTIVE_PS_POLL_FAILURE_DEF 4
604
605struct acx_beacon_broadcast { 492struct acx_beacon_broadcast {
606 struct acx_header header; 493 struct acx_header header;
607 494
608 u16 beacon_rx_timeout; 495 __le16 beacon_rx_timeout;
609 u16 broadcast_timeout; 496 __le16 broadcast_timeout;
610 497
611 /* Enables receiving of broadcast packets in PS mode */ 498 /* Enables receiving of broadcast packets in PS mode */
612 u8 rx_broadcast_in_ps; 499 u8 rx_broadcast_in_ps;
@@ -619,8 +506,8 @@ struct acx_beacon_broadcast {
619struct acx_event_mask { 506struct acx_event_mask {
620 struct acx_header header; 507 struct acx_header header;
621 508
622 u32 event_mask; 509 __le32 event_mask;
623 u32 high_event_mask; /* Unused */ 510 __le32 high_event_mask; /* Unused */
624} __attribute__ ((packed)); 511} __attribute__ ((packed));
625 512
626#define CFG_RX_FCS BIT(2) 513#define CFG_RX_FCS BIT(2)
@@ -657,11 +544,15 @@ struct acx_event_mask {
657#define SCAN_TRIGGERED BIT(2) 544#define SCAN_TRIGGERED BIT(2)
658#define SCAN_PRIORITY_HIGH BIT(3) 545#define SCAN_PRIORITY_HIGH BIT(3)
659 546
547/* When set, disable HW encryption */
548#define DF_ENCRYPTION_DISABLE 0x01
549#define DF_SNIFF_MODE_ENABLE 0x80
550
660struct acx_feature_config { 551struct acx_feature_config {
661 struct acx_header header; 552 struct acx_header header;
662 553
663 u32 options; 554 __le32 options;
664 u32 data_flow_options; 555 __le32 data_flow_options;
665} __attribute__ ((packed)); 556} __attribute__ ((packed));
666 557
667struct acx_current_tx_power { 558struct acx_current_tx_power {
@@ -671,14 +562,6 @@ struct acx_current_tx_power {
671 u8 padding[3]; 562 u8 padding[3];
672} __attribute__ ((packed)); 563} __attribute__ ((packed));
673 564
674enum acx_wake_up_event {
675 WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/
676 WAKE_UP_EVENT_DTIM_BITMAP = 0x02, /* Wake on every DTIM*/
677 WAKE_UP_EVENT_N_DTIM_BITMAP = 0x04, /* Wake on every Nth DTIM */
678 WAKE_UP_EVENT_N_BEACONS_BITMAP = 0x08, /* Wake on every Nth Beacon */
679 WAKE_UP_EVENT_BITS_MASK = 0x0F
680};
681
682struct acx_wake_up_condition { 565struct acx_wake_up_condition {
683 struct acx_header header; 566 struct acx_header header;
684 567
@@ -693,7 +576,7 @@ struct acx_aid {
693 /* 576 /*
694 * To be set when associated with an AP. 577 * To be set when associated with an AP.
695 */ 578 */
696 u16 aid; 579 __le16 aid;
697 u8 pad[2]; 580 u8 pad[2];
698} __attribute__ ((packed)); 581} __attribute__ ((packed));
699 582
@@ -725,152 +608,152 @@ struct acx_ctsprotect {
725} __attribute__ ((packed)); 608} __attribute__ ((packed));
726 609
727struct acx_tx_statistics { 610struct acx_tx_statistics {
728 u32 internal_desc_overflow; 611 __le32 internal_desc_overflow;
729} __attribute__ ((packed)); 612} __attribute__ ((packed));
730 613
731struct acx_rx_statistics { 614struct acx_rx_statistics {
732 u32 out_of_mem; 615 __le32 out_of_mem;
733 u32 hdr_overflow; 616 __le32 hdr_overflow;
734 u32 hw_stuck; 617 __le32 hw_stuck;
735 u32 dropped; 618 __le32 dropped;
736 u32 fcs_err; 619 __le32 fcs_err;
737 u32 xfr_hint_trig; 620 __le32 xfr_hint_trig;
738 u32 path_reset; 621 __le32 path_reset;
739 u32 reset_counter; 622 __le32 reset_counter;
740} __attribute__ ((packed)); 623} __attribute__ ((packed));
741 624
742struct acx_dma_statistics { 625struct acx_dma_statistics {
743 u32 rx_requested; 626 __le32 rx_requested;
744 u32 rx_errors; 627 __le32 rx_errors;
745 u32 tx_requested; 628 __le32 tx_requested;
746 u32 tx_errors; 629 __le32 tx_errors;
747} __attribute__ ((packed)); 630} __attribute__ ((packed));
748 631
749struct acx_isr_statistics { 632struct acx_isr_statistics {
750 /* host command complete */ 633 /* host command complete */
751 u32 cmd_cmplt; 634 __le32 cmd_cmplt;
752 635
753 /* fiqisr() */ 636 /* fiqisr() */
754 u32 fiqs; 637 __le32 fiqs;
755 638
756 /* (INT_STS_ND & INT_TRIG_RX_HEADER) */ 639 /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
757 u32 rx_headers; 640 __le32 rx_headers;
758 641
759 /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */ 642 /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
760 u32 rx_completes; 643 __le32 rx_completes;
761 644
762 /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */ 645 /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
763 u32 rx_mem_overflow; 646 __le32 rx_mem_overflow;
764 647
765 /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */ 648 /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
766 u32 rx_rdys; 649 __le32 rx_rdys;
767 650
768 /* irqisr() */ 651 /* irqisr() */
769 u32 irqs; 652 __le32 irqs;
770 653
771 /* (INT_STS_ND & INT_TRIG_TX_PROC) */ 654 /* (INT_STS_ND & INT_TRIG_TX_PROC) */
772 u32 tx_procs; 655 __le32 tx_procs;
773 656
774 /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */ 657 /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
775 u32 decrypt_done; 658 __le32 decrypt_done;
776 659
777 /* (INT_STS_ND & INT_TRIG_DMA0) */ 660 /* (INT_STS_ND & INT_TRIG_DMA0) */
778 u32 dma0_done; 661 __le32 dma0_done;
779 662
780 /* (INT_STS_ND & INT_TRIG_DMA1) */ 663 /* (INT_STS_ND & INT_TRIG_DMA1) */
781 u32 dma1_done; 664 __le32 dma1_done;
782 665
783 /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */ 666 /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
784 u32 tx_exch_complete; 667 __le32 tx_exch_complete;
785 668
786 /* (INT_STS_ND & INT_TRIG_COMMAND) */ 669 /* (INT_STS_ND & INT_TRIG_COMMAND) */
787 u32 commands; 670 __le32 commands;
788 671
789 /* (INT_STS_ND & INT_TRIG_RX_PROC) */ 672 /* (INT_STS_ND & INT_TRIG_RX_PROC) */
790 u32 rx_procs; 673 __le32 rx_procs;
791 674
792 /* (INT_STS_ND & INT_TRIG_PM_802) */ 675 /* (INT_STS_ND & INT_TRIG_PM_802) */
793 u32 hw_pm_mode_changes; 676 __le32 hw_pm_mode_changes;
794 677
795 /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */ 678 /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
796 u32 host_acknowledges; 679 __le32 host_acknowledges;
797 680
798 /* (INT_STS_ND & INT_TRIG_PM_PCI) */ 681 /* (INT_STS_ND & INT_TRIG_PM_PCI) */
799 u32 pci_pm; 682 __le32 pci_pm;
800 683
801 /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */ 684 /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
802 u32 wakeups; 685 __le32 wakeups;
803 686
804 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ 687 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
805 u32 low_rssi; 688 __le32 low_rssi;
806} __attribute__ ((packed)); 689} __attribute__ ((packed));
807 690
808struct acx_wep_statistics { 691struct acx_wep_statistics {
809 /* WEP address keys configured */ 692 /* WEP address keys configured */
810 u32 addr_key_count; 693 __le32 addr_key_count;
811 694
812 /* default keys configured */ 695 /* default keys configured */
813 u32 default_key_count; 696 __le32 default_key_count;
814 697
815 u32 reserved; 698 __le32 reserved;
816 699
817 /* number of times that WEP key not found on lookup */ 700 /* number of times that WEP key not found on lookup */
818 u32 key_not_found; 701 __le32 key_not_found;
819 702
820 /* number of times that WEP key decryption failed */ 703 /* number of times that WEP key decryption failed */
821 u32 decrypt_fail; 704 __le32 decrypt_fail;
822 705
823 /* WEP packets decrypted */ 706 /* WEP packets decrypted */
824 u32 packets; 707 __le32 packets;
825 708
826 /* WEP decrypt interrupts */ 709 /* WEP decrypt interrupts */
827 u32 interrupt; 710 __le32 interrupt;
828} __attribute__ ((packed)); 711} __attribute__ ((packed));
829 712
830#define ACX_MISSED_BEACONS_SPREAD 10 713#define ACX_MISSED_BEACONS_SPREAD 10
831 714
832struct acx_pwr_statistics { 715struct acx_pwr_statistics {
833 /* the amount of enters into power save mode (both PD & ELP) */ 716 /* the amount of enters into power save mode (both PD & ELP) */
834 u32 ps_enter; 717 __le32 ps_enter;
835 718
836 /* the amount of enters into ELP mode */ 719 /* the amount of enters into ELP mode */
837 u32 elp_enter; 720 __le32 elp_enter;
838 721
839 /* the amount of missing beacon interrupts to the host */ 722 /* the amount of missing beacon interrupts to the host */
840 u32 missing_bcns; 723 __le32 missing_bcns;
841 724
842 /* the amount of wake on host-access times */ 725 /* the amount of wake on host-access times */
843 u32 wake_on_host; 726 __le32 wake_on_host;
844 727
845 /* the amount of wake on timer-expire */ 728 /* the amount of wake on timer-expire */
846 u32 wake_on_timer_exp; 729 __le32 wake_on_timer_exp;
847 730
848 /* the number of packets that were transmitted with PS bit set */ 731 /* the number of packets that were transmitted with PS bit set */
849 u32 tx_with_ps; 732 __le32 tx_with_ps;
850 733
851 /* the number of packets that were transmitted with PS bit clear */ 734 /* the number of packets that were transmitted with PS bit clear */
852 u32 tx_without_ps; 735 __le32 tx_without_ps;
853 736
854 /* the number of received beacons */ 737 /* the number of received beacons */
855 u32 rcvd_beacons; 738 __le32 rcvd_beacons;
856 739
857 /* the number of entering into PowerOn (power save off) */ 740 /* the number of entering into PowerOn (power save off) */
858 u32 power_save_off; 741 __le32 power_save_off;
859 742
860 /* the number of entries into power save mode */ 743 /* the number of entries into power save mode */
861 u16 enable_ps; 744 __le16 enable_ps;
862 745
863 /* 746 /*
864 * the number of exits from power save, not including failed PS 747 * the number of exits from power save, not including failed PS
865 * transitions 748 * transitions
866 */ 749 */
867 u16 disable_ps; 750 __le16 disable_ps;
868 751
869 /* 752 /*
870 * the number of times the TSF counter was adjusted because 753 * the number of times the TSF counter was adjusted because
871 * of drift 754 * of drift
872 */ 755 */
873 u32 fix_tsf_ps; 756 __le32 fix_tsf_ps;
874 757
875 /* Gives statistics about the spread continuous missed beacons. 758 /* Gives statistics about the spread continuous missed beacons.
876 * The 16 LSB are dedicated for the PS mode. 759 * The 16 LSB are dedicated for the PS mode.
@@ -881,53 +764,53 @@ struct acx_pwr_statistics {
881 * ... 764 * ...
882 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons. 765 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
883 */ 766 */
884 u32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD]; 767 __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
885 768
886 /* the number of beacons in awake mode */ 769 /* the number of beacons in awake mode */
887 u32 rcvd_awake_beacons; 770 __le32 rcvd_awake_beacons;
888} __attribute__ ((packed)); 771} __attribute__ ((packed));
889 772
890struct acx_mic_statistics { 773struct acx_mic_statistics {
891 u32 rx_pkts; 774 __le32 rx_pkts;
892 u32 calc_failure; 775 __le32 calc_failure;
893} __attribute__ ((packed)); 776} __attribute__ ((packed));
894 777
895struct acx_aes_statistics { 778struct acx_aes_statistics {
896 u32 encrypt_fail; 779 __le32 encrypt_fail;
897 u32 decrypt_fail; 780 __le32 decrypt_fail;
898 u32 encrypt_packets; 781 __le32 encrypt_packets;
899 u32 decrypt_packets; 782 __le32 decrypt_packets;
900 u32 encrypt_interrupt; 783 __le32 encrypt_interrupt;
901 u32 decrypt_interrupt; 784 __le32 decrypt_interrupt;
902} __attribute__ ((packed)); 785} __attribute__ ((packed));
903 786
904struct acx_event_statistics { 787struct acx_event_statistics {
905 u32 heart_beat; 788 __le32 heart_beat;
906 u32 calibration; 789 __le32 calibration;
907 u32 rx_mismatch; 790 __le32 rx_mismatch;
908 u32 rx_mem_empty; 791 __le32 rx_mem_empty;
909 u32 rx_pool; 792 __le32 rx_pool;
910 u32 oom_late; 793 __le32 oom_late;
911 u32 phy_transmit_error; 794 __le32 phy_transmit_error;
912 u32 tx_stuck; 795 __le32 tx_stuck;
913} __attribute__ ((packed)); 796} __attribute__ ((packed));
914 797
915struct acx_ps_statistics { 798struct acx_ps_statistics {
916 u32 pspoll_timeouts; 799 __le32 pspoll_timeouts;
917 u32 upsd_timeouts; 800 __le32 upsd_timeouts;
918 u32 upsd_max_sptime; 801 __le32 upsd_max_sptime;
919 u32 upsd_max_apturn; 802 __le32 upsd_max_apturn;
920 u32 pspoll_max_apturn; 803 __le32 pspoll_max_apturn;
921 u32 pspoll_utilization; 804 __le32 pspoll_utilization;
922 u32 upsd_utilization; 805 __le32 upsd_utilization;
923} __attribute__ ((packed)); 806} __attribute__ ((packed));
924 807
925struct acx_rxpipe_statistics { 808struct acx_rxpipe_statistics {
926 u32 rx_prep_beacon_drop; 809 __le32 rx_prep_beacon_drop;
927 u32 descr_host_int_trig_rx_data; 810 __le32 descr_host_int_trig_rx_data;
928 u32 beacon_buffer_thres_host_int_trig_rx_data; 811 __le32 beacon_buffer_thres_host_int_trig_rx_data;
929 u32 missed_beacon_host_int_trig_rx_data; 812 __le32 missed_beacon_host_int_trig_rx_data;
930 u32 tx_xfr_host_int_trig_rx_data; 813 __le32 tx_xfr_host_int_trig_rx_data;
931} __attribute__ ((packed)); 814} __attribute__ ((packed));
932 815
933struct acx_statistics { 816struct acx_statistics {
@@ -946,13 +829,8 @@ struct acx_statistics {
946 struct acx_rxpipe_statistics rxpipe; 829 struct acx_rxpipe_statistics rxpipe;
947} __attribute__ ((packed)); 830} __attribute__ ((packed));
948 831
949#define ACX_MAX_RATE_CLASSES 8
950#define ACX_RATE_MASK_UNSPECIFIED 0
951#define ACX_RATE_MASK_ALL 0x1eff
952#define ACX_RATE_RETRY_LIMIT 10
953
954struct acx_rate_class { 832struct acx_rate_class {
955 u32 enabled_rates; 833 __le32 enabled_rates;
956 u8 short_retry_limit; 834 u8 short_retry_limit;
957 u8 long_retry_limit; 835 u8 long_retry_limit;
958 u8 aflags; 836 u8 aflags;
@@ -962,47 +840,20 @@ struct acx_rate_class {
962struct acx_rate_policy { 840struct acx_rate_policy {
963 struct acx_header header; 841 struct acx_header header;
964 842
965 u32 rate_class_cnt; 843 __le32 rate_class_cnt;
966 struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; 844 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
967} __attribute__ ((packed)); 845} __attribute__ ((packed));
968 846
969#define WL1271_ACX_AC_COUNT 4
970
971struct acx_ac_cfg { 847struct acx_ac_cfg {
972 struct acx_header header; 848 struct acx_header header;
973 u8 ac; 849 u8 ac;
974 u8 cw_min; 850 u8 cw_min;
975 u16 cw_max; 851 __le16 cw_max;
976 u8 aifsn; 852 u8 aifsn;
977 u8 reserved; 853 u8 reserved;
978 u16 tx_op_limit; 854 __le16 tx_op_limit;
979} __attribute__ ((packed)); 855} __attribute__ ((packed));
980 856
981enum wl1271_acx_ac {
982 WL1271_ACX_AC_BE = 0,
983 WL1271_ACX_AC_BK = 1,
984 WL1271_ACX_AC_VI = 2,
985 WL1271_ACX_AC_VO = 3,
986 WL1271_ACX_AC_CTS2SELF = 4,
987 WL1271_ACX_AC_ANY_TID = 0x1F,
988 WL1271_ACX_AC_INVALID = 0xFF,
989};
990
991enum wl1271_acx_ps_scheme {
992 WL1271_ACX_PS_SCHEME_LEGACY = 0,
993 WL1271_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
994 WL1271_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
995 WL1271_ACX_PS_SCHEME_SAPSD = 3,
996};
997
998enum wl1271_acx_ack_policy {
999 WL1271_ACX_ACK_POLICY_LEGACY = 0,
1000 WL1271_ACX_ACK_POLICY_NO_ACK = 1,
1001 WL1271_ACX_ACK_POLICY_BLOCK = 2,
1002};
1003
1004#define WL1271_ACX_TID_COUNT 7
1005
1006struct acx_tid_config { 857struct acx_tid_config {
1007 struct acx_header header; 858 struct acx_header header;
1008 u8 queue_id; 859 u8 queue_id;
@@ -1011,22 +862,19 @@ struct acx_tid_config {
1011 u8 ps_scheme; 862 u8 ps_scheme;
1012 u8 ack_policy; 863 u8 ack_policy;
1013 u8 padding[3]; 864 u8 padding[3];
1014 u32 apsd_conf[2]; 865 __le32 apsd_conf[2];
1015} __attribute__ ((packed)); 866} __attribute__ ((packed));
1016 867
1017struct acx_frag_threshold { 868struct acx_frag_threshold {
1018 struct acx_header header; 869 struct acx_header header;
1019 u16 frag_threshold; 870 __le16 frag_threshold;
1020 u8 padding[2]; 871 u8 padding[2];
1021} __attribute__ ((packed)); 872} __attribute__ ((packed));
1022 873
1023#define WL1271_ACX_TX_COMPL_TIMEOUT 5
1024#define WL1271_ACX_TX_COMPL_THRESHOLD 5
1025
1026struct acx_tx_config_options { 874struct acx_tx_config_options {
1027 struct acx_header header; 875 struct acx_header header;
1028 u16 tx_compl_timeout; /* msec */ 876 __le16 tx_compl_timeout; /* msec */
1029 u16 tx_compl_threshold; /* number of packets */ 877 __le16 tx_compl_threshold; /* number of packets */
1030} __attribute__ ((packed)); 878} __attribute__ ((packed));
1031 879
1032#define ACX_RX_MEM_BLOCKS 64 880#define ACX_RX_MEM_BLOCKS 64
@@ -1041,79 +889,87 @@ struct wl1271_acx_config_memory {
1041 u8 tx_min_mem_block_num; 889 u8 tx_min_mem_block_num;
1042 u8 num_stations; 890 u8 num_stations;
1043 u8 num_ssid_profiles; 891 u8 num_ssid_profiles;
1044 u32 total_tx_descriptors; 892 __le32 total_tx_descriptors;
1045} __attribute__ ((packed)); 893} __attribute__ ((packed));
1046 894
1047struct wl1271_acx_mem_map { 895struct wl1271_acx_mem_map {
1048 struct acx_header header; 896 struct acx_header header;
1049 897
1050 void *code_start; 898 __le32 code_start;
1051 void *code_end; 899 __le32 code_end;
1052 900
1053 void *wep_defkey_start; 901 __le32 wep_defkey_start;
1054 void *wep_defkey_end; 902 __le32 wep_defkey_end;
1055 903
1056 void *sta_table_start; 904 __le32 sta_table_start;
1057 void *sta_table_end; 905 __le32 sta_table_end;
1058 906
1059 void *packet_template_start; 907 __le32 packet_template_start;
1060 void *packet_template_end; 908 __le32 packet_template_end;
1061 909
1062 /* Address of the TX result interface (control block) */ 910 /* Address of the TX result interface (control block) */
1063 u32 tx_result; 911 __le32 tx_result;
1064 u32 tx_result_queue_start; 912 __le32 tx_result_queue_start;
1065 913
1066 void *queue_memory_start; 914 __le32 queue_memory_start;
1067 void *queue_memory_end; 915 __le32 queue_memory_end;
1068 916
1069 u32 packet_memory_pool_start; 917 __le32 packet_memory_pool_start;
1070 u32 packet_memory_pool_end; 918 __le32 packet_memory_pool_end;
1071 919
1072 void *debug_buffer1_start; 920 __le32 debug_buffer1_start;
1073 void *debug_buffer1_end; 921 __le32 debug_buffer1_end;
1074 922
1075 void *debug_buffer2_start; 923 __le32 debug_buffer2_start;
1076 void *debug_buffer2_end; 924 __le32 debug_buffer2_end;
1077 925
1078 /* Number of blocks FW allocated for TX packets */ 926 /* Number of blocks FW allocated for TX packets */
1079 u32 num_tx_mem_blocks; 927 __le32 num_tx_mem_blocks;
1080 928
1081 /* Number of blocks FW allocated for RX packets */ 929 /* Number of blocks FW allocated for RX packets */
1082 u32 num_rx_mem_blocks; 930 __le32 num_rx_mem_blocks;
1083 931
1084 /* the following 4 fields are valid in SLAVE mode only */ 932 /* the following 4 fields are valid in SLAVE mode only */
1085 u8 *tx_cbuf; 933 u8 *tx_cbuf;
1086 u8 *rx_cbuf; 934 u8 *rx_cbuf;
1087 void *rx_ctrl; 935 __le32 rx_ctrl;
1088 void *tx_ctrl; 936 __le32 tx_ctrl;
1089} __attribute__ ((packed)); 937} __attribute__ ((packed));
1090 938
1091enum wl1271_acx_rx_queue_type {
1092 RX_QUEUE_TYPE_RX_LOW_PRIORITY, /* All except the high priority */
1093 RX_QUEUE_TYPE_RX_HIGH_PRIORITY, /* Management and voice packets */
1094 RX_QUEUE_TYPE_NUM,
1095 RX_QUEUE_TYPE_MAX = USHORT_MAX
1096};
1097
1098#define WL1271_RX_INTR_THRESHOLD_DEF 0 /* no pacing, send interrupt on
1099 * every event */
1100#define WL1271_RX_INTR_THRESHOLD_MIN 0
1101#define WL1271_RX_INTR_THRESHOLD_MAX 15
1102
1103#define WL1271_RX_INTR_TIMEOUT_DEF 5
1104#define WL1271_RX_INTR_TIMEOUT_MIN 1
1105#define WL1271_RX_INTR_TIMEOUT_MAX 100
1106
1107struct wl1271_acx_rx_config_opt { 939struct wl1271_acx_rx_config_opt {
1108 struct acx_header header; 940 struct acx_header header;
1109 941
1110 u16 mblk_threshold; 942 __le16 mblk_threshold;
1111 u16 threshold; 943 __le16 threshold;
1112 u16 timeout; 944 __le16 timeout;
1113 u8 queue_type; 945 u8 queue_type;
1114 u8 reserved; 946 u8 reserved;
1115} __attribute__ ((packed)); 947} __attribute__ ((packed));
1116 948
949
950struct wl1271_acx_bet_enable {
951 struct acx_header header;
952
953 u8 enable;
954 u8 max_consecutive;
955 u8 padding[2];
956} __attribute__ ((packed));
957
958#define ACX_IPV4_VERSION 4
959#define ACX_IPV6_VERSION 6
960#define ACX_IPV4_ADDR_SIZE 4
961struct wl1271_acx_arp_filter {
962 struct acx_header header;
963 u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
964 u8 enable; /* 1 to enable ARP filtering, 0 to disable */
965 u8 padding[2];
966 u8 address[16]; /* The configured device IP address - all ARP
967 requests directed to this IP address will pass
968 through. For IPv4, the first four bytes are
969 used. */
970} __attribute__((packed));
971
972
1117enum { 973enum {
1118 ACX_WAKE_UP_CONDITIONS = 0x0002, 974 ACX_WAKE_UP_CONDITIONS = 0x0002,
1119 ACX_MEM_CFG = 0x0003, 975 ACX_MEM_CFG = 0x0003,
@@ -1170,6 +1026,9 @@ enum {
1170 ACX_PEER_HT_CAP = 0x0057, 1026 ACX_PEER_HT_CAP = 0x0057,
1171 ACX_HT_BSS_OPERATION = 0x0058, 1027 ACX_HT_BSS_OPERATION = 0x0058,
1172 ACX_COEX_ACTIVITY = 0x0059, 1028 ACX_COEX_ACTIVITY = 0x0059,
1029 ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
1030 ACX_SET_SMART_REFLEX_STATE = 0x005B,
1031 ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
1173 DOT11_RX_MSDU_LIFE_TIME = 0x1004, 1032 DOT11_RX_MSDU_LIFE_TIME = 0x1004,
1174 DOT11_CUR_TX_PWR = 0x100D, 1033 DOT11_CUR_TX_PWR = 0x100D,
1175 DOT11_RX_DOT11_MODE = 0x1012, 1034 DOT11_RX_DOT11_MODE = 0x1012,
@@ -1182,23 +1041,24 @@ enum {
1182}; 1041};
1183 1042
1184 1043
1185int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event, 1044int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
1186 u8 listen_interval);
1187int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth); 1045int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
1188int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len); 1046int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len);
1189int wl1271_acx_tx_power(struct wl1271 *wl, int power); 1047int wl1271_acx_tx_power(struct wl1271 *wl, int power);
1190int wl1271_acx_feature_cfg(struct wl1271 *wl); 1048int wl1271_acx_feature_cfg(struct wl1271 *wl);
1191int wl1271_acx_mem_map(struct wl1271 *wl, 1049int wl1271_acx_mem_map(struct wl1271 *wl,
1192 struct acx_header *mem_map, size_t len); 1050 struct acx_header *mem_map, size_t len);
1193int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time); 1051int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
1194int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter); 1052int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter);
1195int wl1271_acx_pd_threshold(struct wl1271 *wl); 1053int wl1271_acx_pd_threshold(struct wl1271 *wl);
1196int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time); 1054int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
1197int wl1271_acx_group_address_tbl(struct wl1271 *wl); 1055int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
1056 void *mc_list, u32 mc_list_len);
1198int wl1271_acx_service_period_timeout(struct wl1271 *wl); 1057int wl1271_acx_service_period_timeout(struct wl1271 *wl);
1199int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold); 1058int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
1200int wl1271_acx_beacon_filter_opt(struct wl1271 *wl); 1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
1201int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
1061int wl1271_acx_conn_monit_params(struct wl1271 *wl);
1202int wl1271_acx_sg_enable(struct wl1271 *wl); 1062int wl1271_acx_sg_enable(struct wl1271 *wl);
1203int wl1271_acx_sg_cfg(struct wl1271 *wl); 1063int wl1271_acx_sg_cfg(struct wl1271 *wl);
1204int wl1271_acx_cca_threshold(struct wl1271 *wl); 1064int wl1271_acx_cca_threshold(struct wl1271 *wl);
@@ -1207,9 +1067,9 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
1207int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask); 1067int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
1208int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble); 1068int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
1209int wl1271_acx_cts_protect(struct wl1271 *wl, 1069int wl1271_acx_cts_protect(struct wl1271 *wl,
1210 enum acx_ctsprotect_type ctsprotect); 1070 enum acx_ctsprotect_type ctsprotect);
1211int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1071int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
1212int wl1271_acx_rate_policies(struct wl1271 *wl); 1072int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates);
1213int wl1271_acx_ac_cfg(struct wl1271 *wl); 1073int wl1271_acx_ac_cfg(struct wl1271 *wl);
1214int wl1271_acx_tid_cfg(struct wl1271 *wl); 1074int wl1271_acx_tid_cfg(struct wl1271 *wl);
1215int wl1271_acx_frag_threshold(struct wl1271 *wl); 1075int wl1271_acx_frag_threshold(struct wl1271 *wl);
@@ -1217,5 +1077,9 @@ int wl1271_acx_tx_config_options(struct wl1271 *wl);
1217int wl1271_acx_mem_cfg(struct wl1271 *wl); 1077int wl1271_acx_mem_cfg(struct wl1271 *wl);
1218int wl1271_acx_init_mem_config(struct wl1271 *wl); 1078int wl1271_acx_init_mem_config(struct wl1271 *wl);
1219int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1079int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1080int wl1271_acx_smart_reflex(struct wl1271 *wl);
1081int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
1082int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1083 u8 version);
1220 1084
1221#endif /* __WL1271_ACX_H__ */ 1085#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 8228ef474a7e..ba4a2b4f0f56 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -39,6 +39,14 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
39 .start = REGISTERS_BASE, 39 .start = REGISTERS_BASE,
40 .size = 0x00008800 40 .size = 0x00008800
41 }, 41 },
42 .mem2 = {
43 .start = 0x00000000,
44 .size = 0x00000000
45 },
46 .mem3 = {
47 .start = 0x00000000,
48 .size = 0x00000000
49 },
42 }, 50 },
43 51
44 [PART_WORK] = { 52 [PART_WORK] = {
@@ -48,7 +56,15 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
48 }, 56 },
49 .reg = { 57 .reg = {
50 .start = REGISTERS_BASE, 58 .start = REGISTERS_BASE,
51 .size = 0x0000b000 59 .size = 0x0000a000
60 },
61 .mem2 = {
62 .start = 0x003004f8,
63 .size = 0x00000004
64 },
65 .mem3 = {
66 .start = 0x00040404,
67 .size = 0x00000000
52 }, 68 },
53 }, 69 },
54 70
@@ -60,6 +76,14 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
60 .reg = { 76 .reg = {
61 .start = DRPW_BASE, 77 .start = DRPW_BASE,
62 .size = 0x00006000 78 .size = 0x00006000
79 },
80 .mem2 = {
81 .start = 0x00000000,
82 .size = 0x00000000
83 },
84 .mem3 = {
85 .start = 0x00000000,
86 .size = 0x00000000
63 } 87 }
64 } 88 }
65}; 89};
@@ -69,19 +93,19 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
69 u32 cpu_ctrl; 93 u32 cpu_ctrl;
70 94
71 /* 10.5.0 run the firmware (I) */ 95 /* 10.5.0 run the firmware (I) */
72 cpu_ctrl = wl1271_reg_read32(wl, ACX_REG_ECPU_CONTROL); 96 cpu_ctrl = wl1271_spi_read32(wl, ACX_REG_ECPU_CONTROL);
73 97
74 /* 10.5.1 run the firmware (II) */ 98 /* 10.5.1 run the firmware (II) */
75 cpu_ctrl |= flag; 99 cpu_ctrl |= flag;
76 wl1271_reg_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); 100 wl1271_spi_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
77} 101}
78 102
79static void wl1271_boot_fw_version(struct wl1271 *wl) 103static void wl1271_boot_fw_version(struct wl1271 *wl)
80{ 104{
81 struct wl1271_static_data static_data; 105 struct wl1271_static_data static_data;
82 106
83 wl1271_spi_mem_read(wl, wl->cmd_box_addr, 107 wl1271_spi_read(wl, wl->cmd_box_addr,
84 &static_data, sizeof(static_data)); 108 &static_data, sizeof(static_data), false);
85 109
86 strncpy(wl->chip.fw_ver, static_data.fw_version, 110 strncpy(wl->chip.fw_ver, static_data.fw_version,
87 sizeof(wl->chip.fw_ver)); 111 sizeof(wl->chip.fw_ver));
@@ -93,8 +117,9 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
93static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, 117static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
94 size_t fw_data_len, u32 dest) 118 size_t fw_data_len, u32 dest)
95{ 119{
120 struct wl1271_partition_set partition;
96 int addr, chunk_num, partition_limit; 121 int addr, chunk_num, partition_limit;
97 u8 *p; 122 u8 *p, *chunk;
98 123
99 /* whal_FwCtrl_LoadFwImageSm() */ 124 /* whal_FwCtrl_LoadFwImageSm() */
100 125
@@ -103,16 +128,20 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
103 wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d", 128 wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
104 fw_data_len, CHUNK_SIZE); 129 fw_data_len, CHUNK_SIZE);
105 130
106
107 if ((fw_data_len % 4) != 0) { 131 if ((fw_data_len % 4) != 0) {
108 wl1271_error("firmware length not multiple of four"); 132 wl1271_error("firmware length not multiple of four");
109 return -EIO; 133 return -EIO;
110 } 134 }
111 135
112 wl1271_set_partition(wl, dest, 136 chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
113 part_table[PART_DOWN].mem.size, 137 if (!chunk) {
114 part_table[PART_DOWN].reg.start, 138 wl1271_error("allocation for firmware upload chunk failed");
115 part_table[PART_DOWN].reg.size); 139 return -ENOMEM;
140 }
141
142 memcpy(&partition, &part_table[PART_DOWN], sizeof(partition));
143 partition.mem.start = dest;
144 wl1271_set_partition(wl, &partition);
116 145
117 /* 10.1 set partition limit and chunk num */ 146 /* 10.1 set partition limit and chunk num */
118 chunk_num = 0; 147 chunk_num = 0;
@@ -125,21 +154,17 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
125 addr = dest + chunk_num * CHUNK_SIZE; 154 addr = dest + chunk_num * CHUNK_SIZE;
126 partition_limit = chunk_num * CHUNK_SIZE + 155 partition_limit = chunk_num * CHUNK_SIZE +
127 part_table[PART_DOWN].mem.size; 156 part_table[PART_DOWN].mem.size;
128 157 partition.mem.start = addr;
129 /* FIXME: Over 80 chars! */ 158 wl1271_set_partition(wl, &partition);
130 wl1271_set_partition(wl,
131 addr,
132 part_table[PART_DOWN].mem.size,
133 part_table[PART_DOWN].reg.start,
134 part_table[PART_DOWN].reg.size);
135 } 159 }
136 160
137 /* 10.3 upload the chunk */ 161 /* 10.3 upload the chunk */
138 addr = dest + chunk_num * CHUNK_SIZE; 162 addr = dest + chunk_num * CHUNK_SIZE;
139 p = buf + chunk_num * CHUNK_SIZE; 163 p = buf + chunk_num * CHUNK_SIZE;
164 memcpy(chunk, p, CHUNK_SIZE);
140 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", 165 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
141 p, addr); 166 p, addr);
142 wl1271_spi_mem_write(wl, addr, p, CHUNK_SIZE); 167 wl1271_spi_write(wl, addr, chunk, CHUNK_SIZE, false);
143 168
144 chunk_num++; 169 chunk_num++;
145 } 170 }
@@ -147,28 +172,31 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
147 /* 10.4 upload the last chunk */ 172 /* 10.4 upload the last chunk */
148 addr = dest + chunk_num * CHUNK_SIZE; 173 addr = dest + chunk_num * CHUNK_SIZE;
149 p = buf + chunk_num * CHUNK_SIZE; 174 p = buf + chunk_num * CHUNK_SIZE;
175 memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
150 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x", 176 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
151 fw_data_len % CHUNK_SIZE, p, addr); 177 fw_data_len % CHUNK_SIZE, p, addr);
152 wl1271_spi_mem_write(wl, addr, p, fw_data_len % CHUNK_SIZE); 178 wl1271_spi_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
153 179
180 kfree(chunk);
154 return 0; 181 return 0;
155} 182}
156 183
157static int wl1271_boot_upload_firmware(struct wl1271 *wl) 184static int wl1271_boot_upload_firmware(struct wl1271 *wl)
158{ 185{
159 u32 chunks, addr, len; 186 u32 chunks, addr, len;
187 int ret = 0;
160 u8 *fw; 188 u8 *fw;
161 189
162 fw = wl->fw; 190 fw = wl->fw;
163 chunks = be32_to_cpup((u32 *) fw); 191 chunks = be32_to_cpup((__be32 *) fw);
164 fw += sizeof(u32); 192 fw += sizeof(u32);
165 193
166 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); 194 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
167 195
168 while (chunks--) { 196 while (chunks--) {
169 addr = be32_to_cpup((u32 *) fw); 197 addr = be32_to_cpup((__be32 *) fw);
170 fw += sizeof(u32); 198 fw += sizeof(u32);
171 len = be32_to_cpup((u32 *) fw); 199 len = be32_to_cpup((__be32 *) fw);
172 fw += sizeof(u32); 200 fw += sizeof(u32);
173 201
174 if (len > 300000) { 202 if (len > 300000) {
@@ -177,11 +205,13 @@ static int wl1271_boot_upload_firmware(struct wl1271 *wl)
177 } 205 }
178 wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u", 206 wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
179 chunks, addr, len); 207 chunks, addr, len);
180 wl1271_boot_upload_firmware_chunk(wl, fw, len, addr); 208 ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
209 if (ret != 0)
210 break;
181 fw += len; 211 fw += len;
182 } 212 }
183 213
184 return 0; 214 return ret;
185} 215}
186 216
187static int wl1271_boot_upload_nvs(struct wl1271 *wl) 217static int wl1271_boot_upload_nvs(struct wl1271 *wl)
@@ -235,7 +265,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
235 wl1271_debug(DEBUG_BOOT, 265 wl1271_debug(DEBUG_BOOT,
236 "nvs burst write 0x%x: 0x%x", 266 "nvs burst write 0x%x: 0x%x",
237 dest_addr, val); 267 dest_addr, val);
238 wl1271_reg_write32(wl, dest_addr, val); 268 wl1271_spi_write32(wl, dest_addr, val);
239 269
240 nvs_ptr += 4; 270 nvs_ptr += 4;
241 dest_addr += 4; 271 dest_addr += 4;
@@ -253,20 +283,18 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
253 /* FIXME: The driver sets the partition here, but this is not needed, 283 /* FIXME: The driver sets the partition here, but this is not needed,
254 since it sets to the same one as currently in use */ 284 since it sets to the same one as currently in use */
255 /* Now we must set the partition correctly */ 285 /* Now we must set the partition correctly */
256 wl1271_set_partition(wl, 286 wl1271_set_partition(wl, &part_table[PART_WORK]);
257 part_table[PART_WORK].mem.start,
258 part_table[PART_WORK].mem.size,
259 part_table[PART_WORK].reg.start,
260 part_table[PART_WORK].reg.size);
261 287
262 /* Copy the NVS tables to a new block to ensure alignment */ 288 /* Copy the NVS tables to a new block to ensure alignment */
263 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); 289 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
290 if (!nvs_aligned)
291 return -ENOMEM;
264 292
265 /* And finally we upload the NVS tables */ 293 /* And finally we upload the NVS tables */
266 /* FIXME: In wl1271, we upload everything at once. 294 /* FIXME: In wl1271, we upload everything at once.
267 No endianness handling needed here?! The ref driver doesn't do 295 No endianness handling needed here?! The ref driver doesn't do
268 anything about it at this point */ 296 anything about it at this point */
269 wl1271_spi_mem_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len); 297 wl1271_spi_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
270 298
271 kfree(nvs_aligned); 299 kfree(nvs_aligned);
272 return 0; 300 return 0;
@@ -275,9 +303,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
275static void wl1271_boot_enable_interrupts(struct wl1271 *wl) 303static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
276{ 304{
277 enable_irq(wl->irq); 305 enable_irq(wl->irq);
278 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, 306 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
279 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 307 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
280 wl1271_reg_write32(wl, HI_CFG, HI_CFG_DEF_VAL); 308 wl1271_spi_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
281} 309}
282 310
283static int wl1271_boot_soft_reset(struct wl1271 *wl) 311static int wl1271_boot_soft_reset(struct wl1271 *wl)
@@ -286,12 +314,13 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
286 u32 boot_data; 314 u32 boot_data;
287 315
288 /* perform soft reset */ 316 /* perform soft reset */
289 wl1271_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); 317 wl1271_spi_write32(wl, ACX_REG_SLV_SOFT_RESET,
318 ACX_SLV_SOFT_RESET_BIT);
290 319
291 /* SOFT_RESET is self clearing */ 320 /* SOFT_RESET is self clearing */
292 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); 321 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
293 while (1) { 322 while (1) {
294 boot_data = wl1271_reg_read32(wl, ACX_REG_SLV_SOFT_RESET); 323 boot_data = wl1271_spi_read32(wl, ACX_REG_SLV_SOFT_RESET);
295 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); 324 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
296 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) 325 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
297 break; 326 break;
@@ -307,10 +336,10 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
307 } 336 }
308 337
309 /* disable Rx/Tx */ 338 /* disable Rx/Tx */
310 wl1271_reg_write32(wl, ENABLE, 0x0); 339 wl1271_spi_write32(wl, ENABLE, 0x0);
311 340
312 /* disable auto calibration on start*/ 341 /* disable auto calibration on start*/
313 wl1271_reg_write32(wl, SPARE_A2, 0xffff); 342 wl1271_spi_write32(wl, SPARE_A2, 0xffff);
314 343
315 return 0; 344 return 0;
316} 345}
@@ -322,7 +351,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
322 351
323 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); 352 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
324 353
325 chip_id = wl1271_reg_read32(wl, CHIP_ID_B); 354 chip_id = wl1271_spi_read32(wl, CHIP_ID_B);
326 355
327 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); 356 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
328 357
@@ -335,7 +364,8 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
335 loop = 0; 364 loop = 0;
336 while (loop++ < INIT_LOOP) { 365 while (loop++ < INIT_LOOP) {
337 udelay(INIT_LOOP_DELAY); 366 udelay(INIT_LOOP_DELAY);
338 interrupt = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 367 interrupt = wl1271_spi_read32(wl,
368 ACX_REG_INTERRUPT_NO_CLEAR);
339 369
340 if (interrupt == 0xffffffff) { 370 if (interrupt == 0xffffffff) {
341 wl1271_error("error reading hardware complete " 371 wl1271_error("error reading hardware complete "
@@ -344,7 +374,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
344 } 374 }
345 /* check that ACX_INTR_INIT_COMPLETE is enabled */ 375 /* check that ACX_INTR_INIT_COMPLETE is enabled */
346 else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) { 376 else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
347 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK, 377 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
348 WL1271_ACX_INTR_INIT_COMPLETE); 378 WL1271_ACX_INTR_INIT_COMPLETE);
349 break; 379 break;
350 } 380 }
@@ -357,17 +387,13 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
357 } 387 }
358 388
359 /* get hardware config command mail box */ 389 /* get hardware config command mail box */
360 wl->cmd_box_addr = wl1271_reg_read32(wl, REG_COMMAND_MAILBOX_PTR); 390 wl->cmd_box_addr = wl1271_spi_read32(wl, REG_COMMAND_MAILBOX_PTR);
361 391
362 /* get hardware config event mail box */ 392 /* get hardware config event mail box */
363 wl->event_box_addr = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR); 393 wl->event_box_addr = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
364 394
365 /* set the working partition to its "running" mode offset */ 395 /* set the working partition to its "running" mode offset */
366 wl1271_set_partition(wl, 396 wl1271_set_partition(wl, &part_table[PART_WORK]);
367 part_table[PART_WORK].mem.start,
368 part_table[PART_WORK].mem.size,
369 part_table[PART_WORK].reg.start,
370 part_table[PART_WORK].reg.size);
371 397
372 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x", 398 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
373 wl->cmd_box_addr, wl->event_box_addr); 399 wl->cmd_box_addr, wl->event_box_addr);
@@ -379,11 +405,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
379 * ready to receive event from the command mailbox 405 * ready to receive event from the command mailbox
380 */ 406 */
381 407
382 /* enable gpio interrupts */ 408 /* unmask required mbox events */
383 wl1271_boot_enable_interrupts(wl); 409 wl->event_mask = BSS_LOSE_EVENT_ID |
384 410 SCAN_COMPLETE_EVENT_ID;
385 /* unmask all mbox events */
386 wl->event_mask = 0xffffffff;
387 411
388 ret = wl1271_event_unmask(wl); 412 ret = wl1271_event_unmask(wl);
389 if (ret < 0) { 413 if (ret < 0) {
@@ -399,34 +423,13 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
399 423
400static int wl1271_boot_write_irq_polarity(struct wl1271 *wl) 424static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
401{ 425{
402 u32 polarity, status, i; 426 u32 polarity;
403
404 wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY);
405 wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_READ);
406
407 /* Wait until the command is complete (ie. bit 18 is set) */
408 for (i = 0; i < OCP_CMD_LOOP; i++) {
409 polarity = wl1271_reg_read32(wl, OCP_DATA_READ);
410 if (polarity & OCP_READY_MASK)
411 break;
412 }
413 if (i == OCP_CMD_LOOP) {
414 wl1271_error("OCP command timeout!");
415 return -EIO;
416 }
417 427
418 status = polarity & OCP_STATUS_MASK; 428 polarity = wl1271_top_reg_read(wl, OCP_REG_POLARITY);
419 if (status != OCP_STATUS_OK) {
420 wl1271_error("OCP command failed (%d)", status);
421 return -EIO;
422 }
423 429
424 /* We use HIGH polarity, so unset the LOW bit */ 430 /* We use HIGH polarity, so unset the LOW bit */
425 polarity &= ~POLARITY_LOW; 431 polarity &= ~POLARITY_LOW;
426 432 wl1271_top_reg_write(wl, OCP_REG_POLARITY, polarity);
427 wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY);
428 wl1271_reg_write32(wl, OCP_DATA_WRITE, polarity);
429 wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_WRITE);
430 433
431 return 0; 434 return 0;
432} 435}
@@ -436,16 +439,32 @@ int wl1271_boot(struct wl1271 *wl)
436 int ret = 0; 439 int ret = 0;
437 u32 tmp, clk, pause; 440 u32 tmp, clk, pause;
438 441
439 if (REF_CLOCK == 0 || REF_CLOCK == 2) 442 if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4)
440 /* ref clk: 19.2/38.4 */ 443 /* ref clk: 19.2/38.4/38.4-XTAL */
441 clk = 0x3; 444 clk = 0x3;
442 else if (REF_CLOCK == 1 || REF_CLOCK == 3) 445 else if (REF_CLOCK == 1 || REF_CLOCK == 3)
443 /* ref clk: 26/52 */ 446 /* ref clk: 26/52 */
444 clk = 0x5; 447 clk = 0x5;
445 448
446 wl1271_reg_write32(wl, PLL_PARAMETERS, clk); 449 if (REF_CLOCK != 0) {
450 u16 val;
451 /* Set clock type */
452 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
453 val &= FREF_CLK_TYPE_BITS;
454 val |= CLK_REQ_PRCM;
455 wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
456 } else {
457 u16 val;
458 /* Set clock polarity */
459 val = wl1271_top_reg_read(wl, OCP_REG_CLK_POLARITY);
460 val &= FREF_CLK_POLARITY_BITS;
461 val |= CLK_REQ_OUTN_SEL;
462 wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
463 }
464
465 wl1271_spi_write32(wl, PLL_PARAMETERS, clk);
447 466
448 pause = wl1271_reg_read32(wl, PLL_PARAMETERS); 467 pause = wl1271_spi_read32(wl, PLL_PARAMETERS);
449 468
450 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); 469 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
451 470
@@ -454,39 +473,31 @@ int wl1271_boot(struct wl1271 *wl)
454 * 0x3ff (magic number ). How does 473 * 0x3ff (magic number ). How does
455 * this work?! */ 474 * this work?! */
456 pause |= WU_COUNTER_PAUSE_VAL; 475 pause |= WU_COUNTER_PAUSE_VAL;
457 wl1271_reg_write32(wl, WU_COUNTER_PAUSE, pause); 476 wl1271_spi_write32(wl, WU_COUNTER_PAUSE, pause);
458 477
459 /* Continue the ELP wake up sequence */ 478 /* Continue the ELP wake up sequence */
460 wl1271_reg_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); 479 wl1271_spi_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
461 udelay(500); 480 udelay(500);
462 481
463 wl1271_set_partition(wl, 482 wl1271_set_partition(wl, &part_table[PART_DRPW]);
464 part_table[PART_DRPW].mem.start,
465 part_table[PART_DRPW].mem.size,
466 part_table[PART_DRPW].reg.start,
467 part_table[PART_DRPW].reg.size);
468 483
469 /* Read-modify-write DRPW_SCRATCH_START register (see next state) 484 /* Read-modify-write DRPW_SCRATCH_START register (see next state)
470 to be used by DRPw FW. The RTRIM value will be added by the FW 485 to be used by DRPw FW. The RTRIM value will be added by the FW
471 before taking DRPw out of reset */ 486 before taking DRPw out of reset */
472 487
473 wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START); 488 wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
474 clk = wl1271_reg_read32(wl, DRPW_SCRATCH_START); 489 clk = wl1271_spi_read32(wl, DRPW_SCRATCH_START);
475 490
476 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 491 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
477 492
478 /* 2 */ 493 /* 2 */
479 clk |= (REF_CLOCK << 1) << 4; 494 clk |= (REF_CLOCK << 1) << 4;
480 wl1271_reg_write32(wl, DRPW_SCRATCH_START, clk); 495 wl1271_spi_write32(wl, DRPW_SCRATCH_START, clk);
481 496
482 wl1271_set_partition(wl, 497 wl1271_set_partition(wl, &part_table[PART_WORK]);
483 part_table[PART_WORK].mem.start,
484 part_table[PART_WORK].mem.size,
485 part_table[PART_WORK].reg.start,
486 part_table[PART_WORK].reg.size);
487 498
488 /* Disable interrupts */ 499 /* Disable interrupts */
489 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 500 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
490 501
491 ret = wl1271_boot_soft_reset(wl); 502 ret = wl1271_boot_soft_reset(wl);
492 if (ret < 0) 503 if (ret < 0)
@@ -501,21 +512,22 @@ int wl1271_boot(struct wl1271 *wl)
501 * ACX_EEPROMLESS_IND_REG */ 512 * ACX_EEPROMLESS_IND_REG */
502 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG"); 513 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
503 514
504 wl1271_reg_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG); 515 wl1271_spi_write32(wl, ACX_EEPROMLESS_IND_REG,
516 ACX_EEPROMLESS_IND_REG);
505 517
506 tmp = wl1271_reg_read32(wl, CHIP_ID_B); 518 tmp = wl1271_spi_read32(wl, CHIP_ID_B);
507 519
508 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); 520 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
509 521
510 /* 6. read the EEPROM parameters */ 522 /* 6. read the EEPROM parameters */
511 tmp = wl1271_reg_read32(wl, SCR_PAD2); 523 tmp = wl1271_spi_read32(wl, SCR_PAD2);
512 524
513 ret = wl1271_boot_write_irq_polarity(wl); 525 ret = wl1271_boot_write_irq_polarity(wl);
514 if (ret < 0) 526 if (ret < 0)
515 goto out; 527 goto out;
516 528
517 /* FIXME: Need to check whether this is really what we want */ 529 /* FIXME: Need to check whether this is really what we want */
518 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, 530 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
519 WL1271_ACX_ALL_EVENTS_VECTOR); 531 WL1271_ACX_ALL_EVENTS_VECTOR);
520 532
521 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly 533 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
@@ -530,6 +542,9 @@ int wl1271_boot(struct wl1271 *wl)
530 if (ret < 0) 542 if (ret < 0)
531 goto out; 543 goto out;
532 544
545 /* Enable firmware interrupts now */
546 wl1271_boot_enable_interrupts(wl);
547
533 /* set the wl1271 default filters */ 548 /* set the wl1271 default filters */
534 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 549 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
535 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 550 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index b0d8fb46a439..412443ee655a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -50,23 +50,17 @@ struct wl1271_static_data {
50#define WU_COUNTER_PAUSE_VAL 0x3FF 50#define WU_COUNTER_PAUSE_VAL 0x3FF
51#define WELP_ARM_COMMAND_VAL 0x4 51#define WELP_ARM_COMMAND_VAL 0x4
52 52
53#define OCP_CMD_LOOP 32 53#define OCP_REG_POLARITY 0x0064
54 54#define OCP_REG_CLK_TYPE 0x0448
55#define OCP_CMD_WRITE 0x1 55#define OCP_REG_CLK_POLARITY 0x0cb2
56#define OCP_CMD_READ 0x2
57
58#define OCP_READY_MASK BIT(18)
59#define OCP_STATUS_MASK (BIT(16) | BIT(17))
60
61#define OCP_STATUS_NO_RESP 0x00000
62#define OCP_STATUS_OK 0x10000
63#define OCP_STATUS_REQ_FAILED 0x20000
64#define OCP_STATUS_RESP_ERROR 0x30000
65
66#define OCP_REG_POLARITY 0x30032
67 56
68#define CMD_MBOX_ADDRESS 0x407B4 57#define CMD_MBOX_ADDRESS 0x407B4
69 58
70#define POLARITY_LOW BIT(1) 59#define POLARITY_LOW BIT(1)
71 60
61#define FREF_CLK_TYPE_BITS 0xfffffe7f
62#define CLK_REQ_PRCM 0x100
63#define FREF_CLK_POLARITY_BITS 0xfffff8ff
64#define CLK_REQ_OUTN_SEL 0x700
65
72#endif 66#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 2a4351ff54dc..0666328ce9ab 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -50,18 +50,18 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len)
50 int ret = 0; 50 int ret = 0;
51 51
52 cmd = buf; 52 cmd = buf;
53 cmd->id = id; 53 cmd->id = cpu_to_le16(id);
54 cmd->status = 0; 54 cmd->status = 0;
55 55
56 WARN_ON(len % 4 != 0); 56 WARN_ON(len % 4 != 0);
57 57
58 wl1271_spi_mem_write(wl, wl->cmd_box_addr, buf, len); 58 wl1271_spi_write(wl, wl->cmd_box_addr, buf, len, false);
59 59
60 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); 60 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
61 61
62 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); 62 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
63 63
64 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 64 intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
65 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { 65 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
66 if (time_after(jiffies, timeout)) { 66 if (time_after(jiffies, timeout)) {
67 wl1271_error("command complete timeout"); 67 wl1271_error("command complete timeout");
@@ -71,17 +71,17 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len)
71 71
72 msleep(1); 72 msleep(1);
73 73
74 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 74 intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
75 } 75 }
76 76
77 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK, 77 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
78 WL1271_ACX_INTR_CMD_COMPLETE); 78 WL1271_ACX_INTR_CMD_COMPLETE);
79 79
80out: 80out:
81 return ret; 81 return ret;
82} 82}
83 83
84int wl1271_cmd_cal_channel_tune(struct wl1271 *wl) 84static int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
85{ 85{
86 struct wl1271_cmd_cal_channel_tune *cmd; 86 struct wl1271_cmd_cal_channel_tune *cmd;
87 int ret = 0; 87 int ret = 0;
@@ -104,7 +104,7 @@ int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
104 return ret; 104 return ret;
105} 105}
106 106
107int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl) 107static int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
108{ 108{
109 struct wl1271_cmd_cal_update_ref_point *cmd; 109 struct wl1271_cmd_cal_update_ref_point *cmd;
110 int ret = 0; 110 int ret = 0;
@@ -129,7 +129,7 @@ int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
129 return ret; 129 return ret;
130} 130}
131 131
132int wl1271_cmd_cal_p2g(struct wl1271 *wl) 132static int wl1271_cmd_cal_p2g(struct wl1271 *wl)
133{ 133{
134 struct wl1271_cmd_cal_p2g *cmd; 134 struct wl1271_cmd_cal_p2g *cmd;
135 int ret = 0; 135 int ret = 0;
@@ -150,7 +150,7 @@ int wl1271_cmd_cal_p2g(struct wl1271 *wl)
150 return ret; 150 return ret;
151} 151}
152 152
153int wl1271_cmd_cal(struct wl1271 *wl) 153static int wl1271_cmd_cal(struct wl1271 *wl)
154{ 154{
155 /* 155 /*
156 * FIXME: we must make sure that we're not sleeping when calibration 156 * FIXME: we must make sure that we're not sleeping when calibration
@@ -175,11 +175,9 @@ int wl1271_cmd_cal(struct wl1271 *wl)
175 return ret; 175 return ret;
176} 176}
177 177
178int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval, 178int wl1271_cmd_join(struct wl1271 *wl)
179 u16 beacon_interval, u8 wait)
180{ 179{
181 static bool do_cal = true; 180 static bool do_cal = true;
182 unsigned long timeout;
183 struct wl1271_cmd_join *join; 181 struct wl1271_cmd_join *join;
184 int ret, i; 182 int ret, i;
185 u8 *bssid; 183 u8 *bssid;
@@ -193,6 +191,18 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
193 do_cal = false; 191 do_cal = false;
194 } 192 }
195 193
194 /* FIXME: This is a workaround, because with the current stack, we
195 * cannot know when we have disassociated. So, if we have already
196 * joined, we disconnect before joining again. */
197 if (wl->joined) {
198 ret = wl1271_cmd_disconnect(wl);
199 if (ret < 0) {
200 wl1271_error("failed to disconnect before rejoining");
201 goto out;
202 }
203
204 wl->joined = false;
205 }
196 206
197 join = kzalloc(sizeof(*join), GFP_KERNEL); 207 join = kzalloc(sizeof(*join), GFP_KERNEL);
198 if (!join) { 208 if (!join) {
@@ -207,15 +217,34 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
207 for (i = 0; i < ETH_ALEN; i++) 217 for (i = 0; i < ETH_ALEN; i++)
208 bssid[i] = wl->bssid[ETH_ALEN - i - 1]; 218 bssid[i] = wl->bssid[ETH_ALEN - i - 1];
209 219
210 join->rx_config_options = wl->rx_config; 220 join->rx_config_options = cpu_to_le32(wl->rx_config);
211 join->rx_filter_options = wl->rx_filter; 221 join->rx_filter_options = cpu_to_le32(wl->rx_filter);
222 join->bss_type = wl->bss_type;
212 223
213 join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS | 224 /*
214 RATE_MASK_5_5MBPS | RATE_MASK_11MBPS; 225 * FIXME: disable temporarily all filters because after commit
226 * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
227 * association. The filter logic needs to be implemented properly
228 * and once that is done, this hack can be removed.
229 */
230 join->rx_config_options = cpu_to_le32(0);
231 join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER);
232
233 if (wl->band == IEEE80211_BAND_2GHZ)
234 join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS |
235 CONF_HW_BIT_RATE_2MBPS |
236 CONF_HW_BIT_RATE_5_5MBPS |
237 CONF_HW_BIT_RATE_11MBPS);
238 else {
239 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
240 join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS |
241 CONF_HW_BIT_RATE_12MBPS |
242 CONF_HW_BIT_RATE_24MBPS);
243 }
244
245 join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT);
246 join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
215 247
216 join->beacon_interval = beacon_interval;
217 join->dtim_interval = dtim_interval;
218 join->bss_type = bss_type;
219 join->channel = wl->channel; 248 join->channel = wl->channel;
220 join->ssid_len = wl->ssid_len; 249 join->ssid_len = wl->ssid_len;
221 memcpy(join->ssid, wl->ssid, wl->ssid_len); 250 memcpy(join->ssid, wl->ssid, wl->ssid_len);
@@ -228,6 +257,10 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
228 257
229 join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET; 258 join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
230 259
260 /* reset TX security counters */
261 wl->tx_security_last_seq = 0;
262 wl->tx_security_seq_16 = 0;
263 wl->tx_security_seq_32 = 0;
231 264
232 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join)); 265 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join));
233 if (ret < 0) { 266 if (ret < 0) {
@@ -235,14 +268,13 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
235 goto out_free; 268 goto out_free;
236 } 269 }
237 270
238 timeout = msecs_to_jiffies(JOIN_TIMEOUT); 271 wl->joined = true;
239 272
240 /* 273 /*
241 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to 274 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
242 * simplify locking we just sleep instead, for now 275 * simplify locking we just sleep instead, for now
243 */ 276 */
244 if (wait) 277 msleep(10);
245 msleep(10);
246 278
247out_free: 279out_free:
248 kfree(join); 280 kfree(join);
@@ -274,19 +306,20 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
274 306
275 if (answer) { 307 if (answer) {
276 struct wl1271_command *cmd_answer; 308 struct wl1271_command *cmd_answer;
309 u16 status;
277 310
278 /* 311 /*
279 * The test command got in, we can read the answer. 312 * The test command got in, we can read the answer.
280 * The answer would be a wl1271_command, where the 313 * The answer would be a wl1271_command, where the
281 * parameter array contains the actual answer. 314 * parameter array contains the actual answer.
282 */ 315 */
283 wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, buf_len); 316 wl1271_spi_read(wl, wl->cmd_box_addr, buf, buf_len, false);
284 317
285 cmd_answer = buf; 318 cmd_answer = buf;
319 status = le16_to_cpu(cmd_answer->header.status);
286 320
287 if (cmd_answer->header.status != CMD_STATUS_SUCCESS) 321 if (status != CMD_STATUS_SUCCESS)
288 wl1271_error("TEST command answer error: %d", 322 wl1271_error("TEST command answer error: %d", status);
289 cmd_answer->header.status);
290 } 323 }
291 324
292 return 0; 325 return 0;
@@ -307,10 +340,10 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
307 340
308 wl1271_debug(DEBUG_CMD, "cmd interrogate"); 341 wl1271_debug(DEBUG_CMD, "cmd interrogate");
309 342
310 acx->id = id; 343 acx->id = cpu_to_le16(id);
311 344
312 /* payload length, does not include any headers */ 345 /* payload length, does not include any headers */
313 acx->len = len - sizeof(*acx); 346 acx->len = cpu_to_le16(len - sizeof(*acx));
314 347
315 ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx)); 348 ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx));
316 if (ret < 0) { 349 if (ret < 0) {
@@ -319,12 +352,12 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
319 } 352 }
320 353
321 /* the interrogate command got in, we can read the answer */ 354 /* the interrogate command got in, we can read the answer */
322 wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, len); 355 wl1271_spi_read(wl, wl->cmd_box_addr, buf, len, false);
323 356
324 acx = buf; 357 acx = buf;
325 if (acx->cmd.status != CMD_STATUS_SUCCESS) 358 if (le16_to_cpu(acx->cmd.status) != CMD_STATUS_SUCCESS)
326 wl1271_error("INTERROGATE command error: %d", 359 wl1271_error("INTERROGATE command error: %d",
327 acx->cmd.status); 360 le16_to_cpu(acx->cmd.status));
328 361
329out: 362out:
330 return ret; 363 return ret;
@@ -345,10 +378,10 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
345 378
346 wl1271_debug(DEBUG_CMD, "cmd configure"); 379 wl1271_debug(DEBUG_CMD, "cmd configure");
347 380
348 acx->id = id; 381 acx->id = cpu_to_le16(id);
349 382
350 /* payload length, does not include any headers */ 383 /* payload length, does not include any headers */
351 acx->len = len - sizeof(*acx); 384 acx->len = cpu_to_le16(len - sizeof(*acx));
352 385
353 ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len); 386 ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len);
354 if (ret < 0) { 387 if (ret < 0) {
@@ -414,8 +447,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
414 int ret = 0; 447 int ret = 0;
415 448
416 /* FIXME: this should be in ps.c */ 449 /* FIXME: this should be in ps.c */
417 ret = wl1271_acx_wake_up_conditions(wl, WAKE_UP_EVENT_DTIM_BITMAP, 450 ret = wl1271_acx_wake_up_conditions(wl);
418 wl->listen_int);
419 if (ret < 0) { 451 if (ret < 0) {
420 wl1271_error("couldn't set wake up conditions"); 452 wl1271_error("couldn't set wake up conditions");
421 goto out; 453 goto out;
@@ -433,7 +465,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
433 ps_params->send_null_data = 1; 465 ps_params->send_null_data = 1;
434 ps_params->retries = 5; 466 ps_params->retries = 5;
435 ps_params->hang_over_period = 128; 467 ps_params->hang_over_period = 128;
436 ps_params->null_data_rate = 1; /* 1 Mbps */ 468 ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
437 469
438 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, 470 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
439 sizeof(*ps_params)); 471 sizeof(*ps_params));
@@ -464,8 +496,8 @@ int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
464 WARN_ON(len > MAX_READ_SIZE); 496 WARN_ON(len > MAX_READ_SIZE);
465 len = min_t(size_t, len, MAX_READ_SIZE); 497 len = min_t(size_t, len, MAX_READ_SIZE);
466 498
467 cmd->addr = addr; 499 cmd->addr = cpu_to_le32(addr);
468 cmd->size = len; 500 cmd->size = cpu_to_le32(len);
469 501
470 ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd)); 502 ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd));
471 if (ret < 0) { 503 if (ret < 0) {
@@ -474,11 +506,11 @@ int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
474 } 506 }
475 507
476 /* the read command got in, we can now read the answer */ 508 /* the read command got in, we can now read the answer */
477 wl1271_spi_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd)); 509 wl1271_spi_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd), false);
478 510
479 if (cmd->header.status != CMD_STATUS_SUCCESS) 511 if (le16_to_cpu(cmd->header.status) != CMD_STATUS_SUCCESS)
480 wl1271_error("error in read command result: %d", 512 wl1271_error("error in read command result: %d",
481 cmd->header.status); 513 le16_to_cpu(cmd->header.status));
482 514
483 memcpy(answer, cmd->value, len); 515 memcpy(answer, cmd->value, len);
484 516
@@ -488,14 +520,31 @@ out:
488} 520}
489 521
490int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 522int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
491 u8 active_scan, u8 high_prio, u8 num_channels, 523 u8 active_scan, u8 high_prio, u8 band,
492 u8 probe_requests) 524 u8 probe_requests)
493{ 525{
494 526
495 struct wl1271_cmd_trigger_scan_to *trigger = NULL; 527 struct wl1271_cmd_trigger_scan_to *trigger = NULL;
496 struct wl1271_cmd_scan *params = NULL; 528 struct wl1271_cmd_scan *params = NULL;
497 int i, ret; 529 struct ieee80211_channel *channels;
530 int i, j, n_ch, ret;
498 u16 scan_options = 0; 531 u16 scan_options = 0;
532 u8 ieee_band;
533
534 if (band == WL1271_SCAN_BAND_2_4_GHZ)
535 ieee_band = IEEE80211_BAND_2GHZ;
536 else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled())
537 ieee_band = IEEE80211_BAND_2GHZ;
538 else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled())
539 ieee_band = IEEE80211_BAND_5GHZ;
540 else
541 return -EINVAL;
542
543 if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
544 return -EINVAL;
545
546 channels = wl->hw->wiphy->bands[ieee_band]->channels;
547 n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
499 548
500 if (wl->scanning) 549 if (wl->scanning)
501 return -EINVAL; 550 return -EINVAL;
@@ -512,32 +561,43 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
512 scan_options |= WL1271_SCAN_OPT_PASSIVE; 561 scan_options |= WL1271_SCAN_OPT_PASSIVE;
513 if (high_prio) 562 if (high_prio)
514 scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH; 563 scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH;
515 params->params.scan_options = scan_options; 564 params->params.scan_options = cpu_to_le16(scan_options);
516 565
517 params->params.num_channels = num_channels;
518 params->params.num_probe_requests = probe_requests; 566 params->params.num_probe_requests = probe_requests;
519 params->params.tx_rate = cpu_to_le32(RATE_MASK_2MBPS); 567 /* Let the fw autodetect suitable tx_rate for probes */
568 params->params.tx_rate = 0;
520 params->params.tid_trigger = 0; 569 params->params.tid_trigger = 0;
521 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; 570 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
522 571
523 for (i = 0; i < num_channels; i++) { 572 if (band == WL1271_SCAN_BAND_DUAL)
524 params->channels[i].min_duration = 573 params->params.band = WL1271_SCAN_BAND_2_4_GHZ;
525 cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION); 574 else
526 params->channels[i].max_duration = 575 params->params.band = band;
527 cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION); 576
528 memset(&params->channels[i].bssid_lsb, 0xff, 4); 577 for (i = 0, j = 0; i < n_ch && i < WL1271_SCAN_MAX_CHANNELS; i++) {
529 memset(&params->channels[i].bssid_msb, 0xff, 2); 578 if (!(channels[i].flags & IEEE80211_CHAN_DISABLED)) {
530 params->channels[i].early_termination = 0; 579 params->channels[j].min_duration =
531 params->channels[i].tx_power_att = WL1271_SCAN_CURRENT_TX_PWR; 580 cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION);
532 params->channels[i].channel = i + 1; 581 params->channels[j].max_duration =
582 cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION);
583 memset(&params->channels[j].bssid_lsb, 0xff, 4);
584 memset(&params->channels[j].bssid_msb, 0xff, 2);
585 params->channels[j].early_termination = 0;
586 params->channels[j].tx_power_att =
587 WL1271_SCAN_CURRENT_TX_PWR;
588 params->channels[j].channel = channels[i].hw_value;
589 j++;
590 }
533 } 591 }
534 592
593 params->params.num_channels = j;
594
535 if (len && ssid) { 595 if (len && ssid) {
536 params->params.ssid_len = len; 596 params->params.ssid_len = len;
537 memcpy(params->params.ssid, ssid, len); 597 memcpy(params->params.ssid, ssid, len);
538 } 598 }
539 599
540 ret = wl1271_cmd_build_probe_req(wl, ssid, len); 600 ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band);
541 if (ret < 0) { 601 if (ret < 0) {
542 wl1271_error("PROBE request template failed"); 602 wl1271_error("PROBE request template failed");
543 goto out; 603 goto out;
@@ -562,6 +622,19 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
562 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); 622 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
563 623
564 wl->scanning = true; 624 wl->scanning = true;
625 if (wl1271_11a_enabled()) {
626 wl->scan.state = band;
627 if (band == WL1271_SCAN_BAND_DUAL) {
628 wl->scan.active = active_scan;
629 wl->scan.high_prio = high_prio;
630 wl->scan.probe_requests = probe_requests;
631 if (len && ssid) {
632 wl->scan.ssid_len = len;
633 memcpy(wl->scan.ssid, ssid, len);
634 } else
635 wl->scan.ssid_len = 0;
636 }
637 }
565 638
566 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params)); 639 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params));
567 if (ret < 0) { 640 if (ret < 0) {
@@ -569,11 +642,12 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
569 goto out; 642 goto out;
570 } 643 }
571 644
572 wl1271_spi_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params)); 645 wl1271_spi_read(wl, wl->cmd_box_addr, params, sizeof(*params),
646 false);
573 647
574 if (params->header.status != CMD_STATUS_SUCCESS) { 648 if (le16_to_cpu(params->header.status) != CMD_STATUS_SUCCESS) {
575 wl1271_error("Scan command error: %d", 649 wl1271_error("Scan command error: %d",
576 params->header.status); 650 le16_to_cpu(params->header.status));
577 wl->scanning = false; 651 wl->scanning = false;
578 ret = -EIO; 652 ret = -EIO;
579 goto out; 653 goto out;
@@ -603,9 +677,9 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
603 677
604 cmd->len = cpu_to_le16(buf_len); 678 cmd->len = cpu_to_le16(buf_len);
605 cmd->template_type = template_id; 679 cmd->template_type = template_id;
606 cmd->enabled_rates = ACX_RATE_MASK_UNSPECIFIED; 680 cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates);
607 cmd->short_retry_limit = ACX_RATE_RETRY_LIMIT; 681 cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
608 cmd->long_retry_limit = ACX_RATE_RETRY_LIMIT; 682 cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
609 683
610 if (buf) 684 if (buf)
611 memcpy(cmd->template_data, buf, buf_len); 685 memcpy(cmd->template_data, buf, buf_len);
@@ -623,30 +697,62 @@ out:
623 return ret; 697 return ret;
624} 698}
625 699
626static int wl1271_build_basic_rates(char *rates) 700static int wl1271_build_basic_rates(char *rates, u8 band)
627{ 701{
628 u8 index = 0; 702 u8 index = 0;
629 703
630 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; 704 if (band == IEEE80211_BAND_2GHZ) {
631 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; 705 rates[index++] =
632 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB; 706 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
633 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB; 707 rates[index++] =
708 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
709 rates[index++] =
710 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
711 rates[index++] =
712 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
713 } else if (band == IEEE80211_BAND_5GHZ) {
714 rates[index++] =
715 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
716 rates[index++] =
717 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
718 rates[index++] =
719 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
720 } else {
721 wl1271_error("build_basic_rates invalid band: %d", band);
722 }
634 723
635 return index; 724 return index;
636} 725}
637 726
638static int wl1271_build_extended_rates(char *rates) 727static int wl1271_build_extended_rates(char *rates, u8 band)
639{ 728{
640 u8 index = 0; 729 u8 index = 0;
641 730
642 rates[index++] = IEEE80211_OFDM_RATE_6MB; 731 if (band == IEEE80211_BAND_2GHZ) {
643 rates[index++] = IEEE80211_OFDM_RATE_9MB; 732 rates[index++] = IEEE80211_OFDM_RATE_6MB;
644 rates[index++] = IEEE80211_OFDM_RATE_12MB; 733 rates[index++] = IEEE80211_OFDM_RATE_9MB;
645 rates[index++] = IEEE80211_OFDM_RATE_18MB; 734 rates[index++] = IEEE80211_OFDM_RATE_12MB;
646 rates[index++] = IEEE80211_OFDM_RATE_24MB; 735 rates[index++] = IEEE80211_OFDM_RATE_18MB;
647 rates[index++] = IEEE80211_OFDM_RATE_36MB; 736 rates[index++] = IEEE80211_OFDM_RATE_24MB;
648 rates[index++] = IEEE80211_OFDM_RATE_48MB; 737 rates[index++] = IEEE80211_OFDM_RATE_36MB;
649 rates[index++] = IEEE80211_OFDM_RATE_54MB; 738 rates[index++] = IEEE80211_OFDM_RATE_48MB;
739 rates[index++] = IEEE80211_OFDM_RATE_54MB;
740 } else if (band == IEEE80211_BAND_5GHZ) {
741 rates[index++] =
742 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
743 rates[index++] =
744 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
745 rates[index++] =
746 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
747 rates[index++] =
748 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
749 rates[index++] =
750 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
751 rates[index++] =
752 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
753 } else {
754 wl1271_error("build_basic_rates invalid band: %d", band);
755 }
650 756
651 return index; 757 return index;
652} 758}
@@ -665,7 +771,8 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
665 771
666 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); 772 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
667 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | 773 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
668 IEEE80211_STYPE_NULLFUNC); 774 IEEE80211_STYPE_NULLFUNC |
775 IEEE80211_FCTL_TODS);
669 776
670 return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template, 777 return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template,
671 sizeof(template)); 778 sizeof(template));
@@ -678,7 +785,10 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
678 785
679 memcpy(template.bssid, wl->bssid, ETH_ALEN); 786 memcpy(template.bssid, wl->bssid, ETH_ALEN);
680 memcpy(template.ta, wl->mac_addr, ETH_ALEN); 787 memcpy(template.ta, wl->mac_addr, ETH_ALEN);
681 template.aid = aid; 788
789 /* aid in PS-Poll has its two MSBs each set to 1 */
790 template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
791
682 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); 792 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
683 793
684 return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template, 794 return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template,
@@ -686,12 +796,14 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
686 796
687} 797}
688 798
689int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len) 799int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
800 u8 band)
690{ 801{
691 struct wl12xx_probe_req_template template; 802 struct wl12xx_probe_req_template template;
692 struct wl12xx_ie_rates *rates; 803 struct wl12xx_ie_rates *rates;
693 char *ptr; 804 char *ptr;
694 u16 size; 805 u16 size;
806 int ret;
695 807
696 ptr = (char *)&template; 808 ptr = (char *)&template;
697 size = sizeof(struct ieee80211_header); 809 size = sizeof(struct ieee80211_header);
@@ -713,20 +825,25 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len)
713 /* Basic Rates */ 825 /* Basic Rates */
714 rates = (struct wl12xx_ie_rates *)ptr; 826 rates = (struct wl12xx_ie_rates *)ptr;
715 rates->header.id = WLAN_EID_SUPP_RATES; 827 rates->header.id = WLAN_EID_SUPP_RATES;
716 rates->header.len = wl1271_build_basic_rates(rates->rates); 828 rates->header.len = wl1271_build_basic_rates(rates->rates, band);
717 size += sizeof(struct wl12xx_ie_header) + rates->header.len; 829 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
718 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len; 830 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
719 831
720 /* Extended rates */ 832 /* Extended rates */
721 rates = (struct wl12xx_ie_rates *)ptr; 833 rates = (struct wl12xx_ie_rates *)ptr;
722 rates->header.id = WLAN_EID_EXT_SUPP_RATES; 834 rates->header.id = WLAN_EID_EXT_SUPP_RATES;
723 rates->header.len = wl1271_build_extended_rates(rates->rates); 835 rates->header.len = wl1271_build_extended_rates(rates->rates, band);
724 size += sizeof(struct wl12xx_ie_header) + rates->header.len; 836 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
725 837
726 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size); 838 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
727 839
728 return wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, 840 if (band == IEEE80211_BAND_2GHZ)
729 &template, size); 841 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
842 &template, size);
843 else
844 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
845 &template, size);
846 return ret;
730} 847}
731 848
732int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) 849int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
@@ -743,7 +860,7 @@ int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
743 } 860 }
744 861
745 cmd->id = id; 862 cmd->id = id;
746 cmd->key_action = KEY_SET_ID; 863 cmd->key_action = cpu_to_le16(KEY_SET_ID);
747 cmd->key_type = KEY_WEP; 864 cmd->key_type = KEY_WEP;
748 865
749 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd)); 866 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd));
@@ -759,7 +876,8 @@ out:
759} 876}
760 877
761int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 878int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
762 u8 key_size, const u8 *key, const u8 *addr) 879 u8 key_size, const u8 *key, const u8 *addr,
880 u32 tx_seq_32, u16 tx_seq_16)
763{ 881{
764 struct wl1271_cmd_set_keys *cmd; 882 struct wl1271_cmd_set_keys *cmd;
765 int ret = 0; 883 int ret = 0;
@@ -773,16 +891,18 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
773 if (key_type != KEY_WEP) 891 if (key_type != KEY_WEP)
774 memcpy(cmd->addr, addr, ETH_ALEN); 892 memcpy(cmd->addr, addr, ETH_ALEN);
775 893
776 cmd->key_action = action; 894 cmd->key_action = cpu_to_le16(action);
777 cmd->key_size = key_size; 895 cmd->key_size = key_size;
778 cmd->key_type = key_type; 896 cmd->key_type = key_type;
779 897
898 cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
899 cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
900
780 /* we have only one SSID profile */ 901 /* we have only one SSID profile */
781 cmd->ssid_profile = 0; 902 cmd->ssid_profile = 0;
782 903
783 cmd->id = id; 904 cmd->id = id;
784 905
785 /* FIXME: this is from wl1251, needs to be checked */
786 if (key_type == KEY_TKIP) { 906 if (key_type == KEY_TKIP) {
787 /* 907 /*
788 * We get the key in the following form: 908 * We get the key in the following form:
@@ -811,3 +931,34 @@ out:
811 931
812 return ret; 932 return ret;
813} 933}
934
935int wl1271_cmd_disconnect(struct wl1271 *wl)
936{
937 struct wl1271_cmd_disconnect *cmd;
938 int ret = 0;
939
940 wl1271_debug(DEBUG_CMD, "cmd disconnect");
941
942 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
943 if (!cmd) {
944 ret = -ENOMEM;
945 goto out;
946 }
947
948 cmd->rx_config_options = cpu_to_le32(wl->rx_config);
949 cmd->rx_filter_options = cpu_to_le32(wl->rx_filter);
950 /* disconnect reason is not used in immediate disconnections */
951 cmd->type = DISCONNECT_IMMEDIATE;
952
953 ret = wl1271_cmd_send(wl, CMD_DISCONNECT, cmd, sizeof(*cmd));
954 if (ret < 0) {
955 wl1271_error("failed to send disconnect command");
956 goto out_free;
957 }
958
959out_free:
960 kfree(cmd);
961
962out:
963 return ret;
964}
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 951a8447a516..174b8209dbf3 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -30,8 +30,7 @@
30struct acx_header; 30struct acx_header;
31 31
32int wl1271_cmd_send(struct wl1271 *wl, u16 type, void *buf, size_t buf_len); 32int wl1271_cmd_send(struct wl1271 *wl, u16 type, void *buf, size_t buf_len);
33int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval, 33int wl1271_cmd_join(struct wl1271 *wl);
34 u16 beacon_interval, u8 wait);
35int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 34int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
36int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 35int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
37int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 36int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -40,16 +39,19 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
40int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 39int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
41 size_t len); 40 size_t len);
42int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 41int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
43 u8 active_scan, u8 high_prio, u8 num_channels, 42 u8 active_scan, u8 high_prio, u8 band,
44 u8 probe_requests); 43 u8 probe_requests);
45int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 44int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
46 void *buf, size_t buf_len); 45 void *buf, size_t buf_len);
47int wl1271_cmd_build_null_data(struct wl1271 *wl); 46int wl1271_cmd_build_null_data(struct wl1271 *wl);
48int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid); 47int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
49int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len); 48int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
49 u8 band);
50int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); 50int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
51int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 51int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
52 u8 key_size, const u8 *key, const u8 *addr); 52 u8 key_size, const u8 *key, const u8 *addr,
53 u32 tx_seq_32, u16 tx_seq_16);
54int wl1271_cmd_disconnect(struct wl1271 *wl);
53 55
54enum wl1271_commands { 56enum wl1271_commands {
55 CMD_INTERROGATE = 1, /*use this to read information elements*/ 57 CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -118,8 +120,8 @@ enum cmd_templ {
118#define WL1271_CMD_TEMPL_MAX_SIZE 252 120#define WL1271_CMD_TEMPL_MAX_SIZE 252
119 121
120struct wl1271_cmd_header { 122struct wl1271_cmd_header {
121 u16 id; 123 __le16 id;
122 u16 status; 124 __le16 status;
123 /* payload */ 125 /* payload */
124 u8 data[0]; 126 u8 data[0];
125} __attribute__ ((packed)); 127} __attribute__ ((packed));
@@ -172,17 +174,17 @@ struct cmd_read_write_memory {
172 struct wl1271_cmd_header header; 174 struct wl1271_cmd_header header;
173 175
174 /* The address of the memory to read from or write to.*/ 176 /* The address of the memory to read from or write to.*/
175 u32 addr; 177 __le32 addr;
176 178
177 /* The amount of data in bytes to read from or write to the WiLink 179 /* The amount of data in bytes to read from or write to the WiLink
178 * device.*/ 180 * device.*/
179 u32 size; 181 __le32 size;
180 182
181 /* The actual value read from or written to the Wilink. The source 183 /* The actual value read from or written to the Wilink. The source
182 of this field is the Host in WRITE command or the Wilink in READ 184 of this field is the Host in WRITE command or the Wilink in READ
183 command. */ 185 command. */
184 u8 value[MAX_READ_SIZE]; 186 u8 value[MAX_READ_SIZE];
185}; 187} __attribute__ ((packed));
186 188
187#define CMDMBOX_HEADER_LEN 4 189#define CMDMBOX_HEADER_LEN 4
188#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 190#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -196,22 +198,23 @@ enum {
196 198
197#define WL1271_JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */ 199#define WL1271_JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */
198#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1 200#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1
201#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10
199 202
200struct wl1271_cmd_join { 203struct wl1271_cmd_join {
201 struct wl1271_cmd_header header; 204 struct wl1271_cmd_header header;
202 205
203 u32 bssid_lsb; 206 __le32 bssid_lsb;
204 u16 bssid_msb; 207 __le16 bssid_msb;
205 u16 beacon_interval; /* in TBTTs */ 208 __le16 beacon_interval; /* in TBTTs */
206 u32 rx_config_options; 209 __le32 rx_config_options;
207 u32 rx_filter_options; 210 __le32 rx_filter_options;
208 211
209 /* 212 /*
210 * The target uses this field to determine the rate at 213 * The target uses this field to determine the rate at
211 * which to transmit control frame responses (such as 214 * which to transmit control frame responses (such as
212 * ACK or CTS frames). 215 * ACK or CTS frames).
213 */ 216 */
214 u32 basic_rate_set; 217 __le32 basic_rate_set;
215 u8 dtim_interval; 218 u8 dtim_interval;
216 /* 219 /*
217 * bits 0-2: This bitwise field specifies the type 220 * bits 0-2: This bitwise field specifies the type
@@ -240,10 +243,10 @@ struct cmd_enabledisable_path {
240struct wl1271_cmd_template_set { 243struct wl1271_cmd_template_set {
241 struct wl1271_cmd_header header; 244 struct wl1271_cmd_header header;
242 245
243 u16 len; 246 __le16 len;
244 u8 template_type; 247 u8 template_type;
245 u8 index; /* relevant only for KLV_TEMPLATE type */ 248 u8 index; /* relevant only for KLV_TEMPLATE type */
246 u32 enabled_rates; 249 __le32 enabled_rates;
247 u8 short_retry_limit; 250 u8 short_retry_limit;
248 u8 long_retry_limit; 251 u8 long_retry_limit;
249 u8 aflags; 252 u8 aflags;
@@ -280,18 +283,13 @@ struct wl1271_cmd_ps_params {
280 * to power save mode. 283 * to power save mode.
281 */ 284 */
282 u8 hang_over_period; 285 u8 hang_over_period;
283 u32 null_data_rate; 286 __le32 null_data_rate;
284} __attribute__ ((packed)); 287} __attribute__ ((packed));
285 288
286/* HW encryption keys */ 289/* HW encryption keys */
287#define NUM_ACCESS_CATEGORIES_COPY 4 290#define NUM_ACCESS_CATEGORIES_COPY 4
288#define MAX_KEY_SIZE 32 291#define MAX_KEY_SIZE 32
289 292
290/* When set, disable HW encryption */
291#define DF_ENCRYPTION_DISABLE 0x01
292/* When set, disable HW decryption */
293#define DF_SNIFF_MODE_ENABLE 0x80
294
295enum wl1271_cmd_key_action { 293enum wl1271_cmd_key_action {
296 KEY_ADD_OR_REPLACE = 1, 294 KEY_ADD_OR_REPLACE = 1,
297 KEY_REMOVE = 2, 295 KEY_REMOVE = 2,
@@ -316,9 +314,9 @@ struct wl1271_cmd_set_keys {
316 u8 addr[ETH_ALEN]; 314 u8 addr[ETH_ALEN];
317 315
318 /* key_action_e */ 316 /* key_action_e */
319 u16 key_action; 317 __le16 key_action;
320 318
321 u16 reserved_1; 319 __le16 reserved_1;
322 320
323 /* key size in bytes */ 321 /* key size in bytes */
324 u8 key_size; 322 u8 key_size;
@@ -334,8 +332,8 @@ struct wl1271_cmd_set_keys {
334 u8 id; 332 u8 id;
335 u8 reserved_2[6]; 333 u8 reserved_2[6];
336 u8 key[MAX_KEY_SIZE]; 334 u8 key[MAX_KEY_SIZE];
337 u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; 335 __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
338 u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; 336 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
339} __attribute__ ((packed)); 337} __attribute__ ((packed));
340 338
341 339
@@ -347,19 +345,22 @@ struct wl1271_cmd_set_keys {
347#define WL1271_SCAN_OPT_PRIORITY_HIGH 4 345#define WL1271_SCAN_OPT_PRIORITY_HIGH 4
348#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */ 346#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */
349#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */ 347#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */
348#define WL1271_SCAN_BAND_2_4_GHZ 0
349#define WL1271_SCAN_BAND_5_GHZ 1
350#define WL1271_SCAN_BAND_DUAL 2
350 351
351struct basic_scan_params { 352struct basic_scan_params {
352 u32 rx_config_options; 353 __le32 rx_config_options;
353 u32 rx_filter_options; 354 __le32 rx_filter_options;
354 /* Scan option flags (WL1271_SCAN_OPT_*) */ 355 /* Scan option flags (WL1271_SCAN_OPT_*) */
355 u16 scan_options; 356 __le16 scan_options;
356 /* Number of scan channels in the list (maximum 30) */ 357 /* Number of scan channels in the list (maximum 30) */
357 u8 num_channels; 358 u8 num_channels;
358 /* This field indicates the number of probe requests to send 359 /* This field indicates the number of probe requests to send
359 per channel for an active scan */ 360 per channel for an active scan */
360 u8 num_probe_requests; 361 u8 num_probe_requests;
361 /* Rate bit field for sending the probes */ 362 /* Rate bit field for sending the probes */
362 u32 tx_rate; 363 __le32 tx_rate;
363 u8 tid_trigger; 364 u8 tid_trigger;
364 u8 ssid_len; 365 u8 ssid_len;
365 /* in order to align */ 366 /* in order to align */
@@ -374,10 +375,10 @@ struct basic_scan_params {
374 375
375struct basic_scan_channel_params { 376struct basic_scan_channel_params {
376 /* Duration in TU to wait for frames on a channel for active scan */ 377 /* Duration in TU to wait for frames on a channel for active scan */
377 u32 min_duration; 378 __le32 min_duration;
378 u32 max_duration; 379 __le32 max_duration;
379 u32 bssid_lsb; 380 __le32 bssid_lsb;
380 u16 bssid_msb; 381 __le16 bssid_msb;
381 u8 early_termination; 382 u8 early_termination;
382 u8 tx_power_att; 383 u8 tx_power_att;
383 u8 channel; 384 u8 channel;
@@ -397,13 +398,13 @@ struct wl1271_cmd_scan {
397struct wl1271_cmd_trigger_scan_to { 398struct wl1271_cmd_trigger_scan_to {
398 struct wl1271_cmd_header header; 399 struct wl1271_cmd_header header;
399 400
400 u32 timeout; 401 __le32 timeout;
401}; 402} __attribute__ ((packed));
402 403
403struct wl1271_cmd_test_header { 404struct wl1271_cmd_test_header {
404 u8 id; 405 u8 id;
405 u8 padding[3]; 406 u8 padding[3];
406}; 407} __attribute__ ((packed));
407 408
408enum wl1271_channel_tune_bands { 409enum wl1271_channel_tune_bands {
409 WL1271_CHANNEL_TUNE_BAND_2_4, 410 WL1271_CHANNEL_TUNE_BAND_2_4,
@@ -425,7 +426,7 @@ struct wl1271_cmd_cal_channel_tune {
425 u8 band; 426 u8 band;
426 u8 channel; 427 u8 channel;
427 428
428 u16 radio_status; 429 __le16 radio_status;
429} __attribute__ ((packed)); 430} __attribute__ ((packed));
430 431
431struct wl1271_cmd_cal_update_ref_point { 432struct wl1271_cmd_cal_update_ref_point {
@@ -433,8 +434,8 @@ struct wl1271_cmd_cal_update_ref_point {
433 434
434 struct wl1271_cmd_test_header test; 435 struct wl1271_cmd_test_header test;
435 436
436 s32 ref_power; 437 __le32 ref_power;
437 s32 ref_detector; 438 __le32 ref_detector;
438 u8 sub_band; 439 u8 sub_band;
439 u8 padding[3]; 440 u8 padding[3];
440} __attribute__ ((packed)); 441} __attribute__ ((packed));
@@ -449,16 +450,42 @@ struct wl1271_cmd_cal_p2g {
449 450
450 struct wl1271_cmd_test_header test; 451 struct wl1271_cmd_test_header test;
451 452
452 u16 len; 453 __le16 len;
453 u8 buf[MAX_TLV_LENGTH]; 454 u8 buf[MAX_TLV_LENGTH];
454 u8 type; 455 u8 type;
455 u8 padding; 456 u8 padding;
456 457
457 s16 radio_status; 458 __le16 radio_status;
458 u8 nvs_version[MAX_NVS_VERSION_LENGTH]; 459 u8 nvs_version[MAX_NVS_VERSION_LENGTH];
459 460
460 u8 sub_band_mask; 461 u8 sub_band_mask;
461 u8 padding2; 462 u8 padding2;
462} __attribute__ ((packed)); 463} __attribute__ ((packed));
463 464
465
466/*
467 * There are three types of disconnections:
468 *
469 * DISCONNECT_IMMEDIATE: the fw doesn't send any frames
470 * DISCONNECT_DEAUTH: the fw generates a DEAUTH request with the reason
471 * we have passed
472 * DISCONNECT_DISASSOC: the fw generates a DESASSOC request with the reason
473 * we have passed
474 */
475enum wl1271_disconnect_type {
476 DISCONNECT_IMMEDIATE,
477 DISCONNECT_DEAUTH,
478 DISCONNECT_DISASSOC
479};
480
481struct wl1271_cmd_disconnect {
482 __le32 rx_config_options;
483 __le32 rx_filter_options;
484
485 __le16 reason;
486 u8 type;
487
488 u8 padding;
489} __attribute__ ((packed));
490
464#endif /* __WL1271_CMD_H__ */ 491#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
new file mode 100644
index 000000000000..061d47520a32
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -0,0 +1,911 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __WL1271_CONF_H__
25#define __WL1271_CONF_H__
26
27enum {
28 CONF_HW_BIT_RATE_1MBPS = BIT(0),
29 CONF_HW_BIT_RATE_2MBPS = BIT(1),
30 CONF_HW_BIT_RATE_5_5MBPS = BIT(2),
31 CONF_HW_BIT_RATE_6MBPS = BIT(3),
32 CONF_HW_BIT_RATE_9MBPS = BIT(4),
33 CONF_HW_BIT_RATE_11MBPS = BIT(5),
34 CONF_HW_BIT_RATE_12MBPS = BIT(6),
35 CONF_HW_BIT_RATE_18MBPS = BIT(7),
36 CONF_HW_BIT_RATE_22MBPS = BIT(8),
37 CONF_HW_BIT_RATE_24MBPS = BIT(9),
38 CONF_HW_BIT_RATE_36MBPS = BIT(10),
39 CONF_HW_BIT_RATE_48MBPS = BIT(11),
40 CONF_HW_BIT_RATE_54MBPS = BIT(12),
41 CONF_HW_BIT_RATE_MCS_0 = BIT(13),
42 CONF_HW_BIT_RATE_MCS_1 = BIT(14),
43 CONF_HW_BIT_RATE_MCS_2 = BIT(15),
44 CONF_HW_BIT_RATE_MCS_3 = BIT(16),
45 CONF_HW_BIT_RATE_MCS_4 = BIT(17),
46 CONF_HW_BIT_RATE_MCS_5 = BIT(18),
47 CONF_HW_BIT_RATE_MCS_6 = BIT(19),
48 CONF_HW_BIT_RATE_MCS_7 = BIT(20)
49};
50
51enum {
52 CONF_HW_RATE_INDEX_1MBPS = 0,
53 CONF_HW_RATE_INDEX_2MBPS = 1,
54 CONF_HW_RATE_INDEX_5_5MBPS = 2,
55 CONF_HW_RATE_INDEX_6MBPS = 3,
56 CONF_HW_RATE_INDEX_9MBPS = 4,
57 CONF_HW_RATE_INDEX_11MBPS = 5,
58 CONF_HW_RATE_INDEX_12MBPS = 6,
59 CONF_HW_RATE_INDEX_18MBPS = 7,
60 CONF_HW_RATE_INDEX_22MBPS = 8,
61 CONF_HW_RATE_INDEX_24MBPS = 9,
62 CONF_HW_RATE_INDEX_36MBPS = 10,
63 CONF_HW_RATE_INDEX_48MBPS = 11,
64 CONF_HW_RATE_INDEX_54MBPS = 12,
65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
66};
67
68struct conf_sg_settings {
69 /*
70 * Defines the PER threshold in PPM of the BT voice of which reaching
71 * this value will trigger raising the priority of the BT voice by
72 * the BT IP until next NFS sample interval time as defined in
73 * nfs_sample_interval.
74 *
75 * Unit: PER value in PPM (parts per million)
76 * #Error_packets / #Total_packets
77
78 * Range: u32
79 */
80 u32 per_threshold;
81
82 /*
83 * This value is an absolute time in micro-seconds to limit the
84 * maximum scan duration compensation while in SG
85 */
86 u32 max_scan_compensation_time;
87
88 /* Defines the PER threshold of the BT voice of which reaching this
89 * value will trigger raising the priority of the BT voice until next
90 * NFS sample interval time as defined in sample_interval.
91 *
92 * Unit: msec
93 * Range: 1-65000
94 */
95 u16 nfs_sample_interval;
96
97 /*
98 * Defines the load ratio for the BT.
99 * The WLAN ratio is: 100 - load_ratio
100 *
101 * Unit: Percent
102 * Range: 0-100
103 */
104 u8 load_ratio;
105
106 /*
107 * true - Co-ex is allowed to enter/exit P.S automatically and
108 * transparently to the host
109 *
110 * false - Co-ex is disallowed to enter/exit P.S and will trigger an
111 * event to the host to notify for the need to enter/exit P.S
112 * due to BT change state
113 *
114 */
115 u8 auto_ps_mode;
116
117 /*
118 * This parameter defines the compensation percentage of num of probe
119 * requests in case scan is initiated during BT voice/BT ACL
120 * guaranteed link.
121 *
122 * Unit: Percent
123 * Range: 0-255 (0 - No compensation)
124 */
125 u8 probe_req_compensation;
126
127 /*
128 * This parameter defines the compensation percentage of scan window
129 * size in case scan is initiated during BT voice/BT ACL Guaranteed
130 * link.
131 *
132 * Unit: Percent
133 * Range: 0-255 (0 - No compensation)
134 */
135 u8 scan_window_compensation;
136
137 /*
138 * Defines the antenna configuration.
139 *
140 * Range: 0 - Single Antenna; 1 - Dual Antenna
141 */
142 u8 antenna_config;
143
144 /*
145 * The percent out of the Max consecutive beacon miss roaming trigger
146 * which is the threshold for raising the priority of beacon
147 * reception.
148 *
149 * Range: 1-100
150 * N = MaxConsecutiveBeaconMiss
151 * P = coexMaxConsecutiveBeaconMissPrecent
152 * Threshold = MIN( N-1, round(N * P / 100))
153 */
154 u8 beacon_miss_threshold;
155
156 /*
157 * The RX rate threshold below which rate adaptation is assumed to be
158 * occurring at the AP which will raise priority for ACTIVE_RX and RX
159 * SP.
160 *
161 * Range: HW_BIT_RATE_*
162 */
163 u32 rate_adaptation_threshold;
164
165 /*
166 * The SNR above which the RX rate threshold indicating AP rate
167 * adaptation is valid
168 *
169 * Range: -128 - 127
170 */
171 s8 rate_adaptation_snr;
172};
173
174enum conf_rx_queue_type {
175 CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */
176 CONF_RX_QUEUE_TYPE_HIGH_PRIORITY, /* Management and voice packets */
177};
178
179struct conf_rx_settings {
180 /*
181 * The maximum amount of time, in TU, before the
182 * firmware discards the MSDU.
183 *
184 * Range: 0 - 0xFFFFFFFF
185 */
186 u32 rx_msdu_life_time;
187
188 /*
189 * Packet detection threshold in the PHY.
190 *
191 * FIXME: details unknown.
192 */
193 u32 packet_detection_threshold;
194
195 /*
196 * The longest time the STA will wait to receive traffic from the AP
197 * after a PS-poll has been transmitted.
198 *
199 * Range: 0 - 200000
200 */
201 u16 ps_poll_timeout;
202 /*
203 * The longest time the STA will wait to receive traffic from the AP
204 * after a frame has been sent from an UPSD enabled queue.
205 *
206 * Range: 0 - 200000
207 */
208 u16 upsd_timeout;
209
210 /*
211 * The number of octets in an MPDU, below which an RTS/CTS
212 * handshake is not performed.
213 *
214 * Range: 0 - 4096
215 */
216 u16 rts_threshold;
217
218 /*
219 * The RX Clear Channel Assessment threshold in the PHY
220 * (the energy threshold).
221 *
222 * Range: ENABLE_ENERGY_D == 0x140A
223 * DISABLE_ENERGY_D == 0xFFEF
224 */
225 u16 rx_cca_threshold;
226
227 /*
228 * Occupied Rx mem-blocks number which requires interrupting the host
229 * (0 = no buffering, 0xffff = disabled).
230 *
231 * Range: u16
232 */
233 u16 irq_blk_threshold;
234
235 /*
236 * Rx packets number which requires interrupting the host
237 * (0 = no buffering).
238 *
239 * Range: u16
240 */
241 u16 irq_pkt_threshold;
242
243 /*
244 * Max time in msec the FW may delay RX-Complete interrupt.
245 *
246 * Range: 1 - 100
247 */
248 u16 irq_timeout;
249
250 /*
251 * The RX queue type.
252 *
253 * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY,
254 */
255 u8 queue_type;
256};
257
258#define CONF_TX_MAX_RATE_CLASSES 8
259
260#define CONF_TX_RATE_MASK_UNSPECIFIED 0
261#define CONF_TX_RATE_MASK_ALL 0x1eff
262#define CONF_TX_RATE_RETRY_LIMIT 10
263
264struct conf_tx_rate_class {
265
266 /*
267 * The rates enabled for this rate class.
268 *
269 * Range: CONF_HW_BIT_RATE_* bit mask
270 */
271 u32 enabled_rates;
272
273 /*
274 * The dot11 short retry limit used for TX retries.
275 *
276 * Range: u8
277 */
278 u8 short_retry_limit;
279
280 /*
281 * The dot11 long retry limit used for TX retries.
282 *
283 * Range: u8
284 */
285 u8 long_retry_limit;
286
287 /*
288 * Flags controlling the attributes of TX transmission.
289 *
290 * Range: bit 0: Truncate - when set, FW attempts to send a frame stop
291 * when the total valid per-rate attempts have
292 * been exhausted; otherwise transmissions
293 * will continue at the lowest available rate
294 * until the appropriate one of the
295 * short_retry_limit, long_retry_limit,
296 * dot11_max_transmit_msdu_life_time, or
297 * max_tx_life_time, is exhausted.
298 * 1: Preamble Override - indicates if the preamble type
299 * should be used in TX.
300 * 2: Preamble Type - the type of the preamble to be used by
301 * the policy (0 - long preamble, 1 - short preamble.
302 */
303 u8 aflags;
304};
305
306#define CONF_TX_MAX_AC_COUNT 4
307
308/* Slot number setting to start transmission at PIFS interval */
309#define CONF_TX_AIFS_PIFS 1
310/* Slot number setting to start transmission at DIFS interval normal
311 * DCF access */
312#define CONF_TX_AIFS_DIFS 2
313
314
315enum conf_tx_ac {
316 CONF_TX_AC_BE = 0, /* best effort / legacy */
317 CONF_TX_AC_BK = 1, /* background */
318 CONF_TX_AC_VI = 2, /* video */
319 CONF_TX_AC_VO = 3, /* voice */
320 CONF_TX_AC_CTS2SELF = 4, /* fictious AC, follows AC_VO */
321 CONF_TX_AC_ANY_TID = 0x1f
322};
323
324struct conf_tx_ac_category {
325 /*
326 * The AC class identifier.
327 *
328 * Range: enum conf_tx_ac
329 */
330 u8 ac;
331
332 /*
333 * The contention window minimum size (in slots) for the access
334 * class.
335 *
336 * Range: u8
337 */
338 u8 cw_min;
339
340 /*
341 * The contention window maximum size (in slots) for the access
342 * class.
343 *
344 * Range: u8
345 */
346 u16 cw_max;
347
348 /*
349 * The AIF value (in slots) for the access class.
350 *
351 * Range: u8
352 */
353 u8 aifsn;
354
355 /*
356 * The TX Op Limit (in microseconds) for the access class.
357 *
358 * Range: u16
359 */
360 u16 tx_op_limit;
361};
362
363#define CONF_TX_MAX_TID_COUNT 7
364
365enum {
366 CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/
367 CONF_CHANNEL_TYPE_EDCF = 1, /* EDCA*/
368 CONF_CHANNEL_TYPE_HCCA = 2, /* HCCA*/
369};
370
371enum {
372 CONF_PS_SCHEME_LEGACY = 0,
373 CONF_PS_SCHEME_UPSD_TRIGGER = 1,
374 CONF_PS_SCHEME_LEGACY_PSPOLL = 2,
375 CONF_PS_SCHEME_SAPSD = 3,
376};
377
378enum {
379 CONF_ACK_POLICY_LEGACY = 0,
380 CONF_ACK_POLICY_NO_ACK = 1,
381 CONF_ACK_POLICY_BLOCK = 2,
382};
383
384
385struct conf_tx_tid {
386 u8 queue_id;
387 u8 channel_type;
388 u8 tsid;
389 u8 ps_scheme;
390 u8 ack_policy;
391 u32 apsd_conf[2];
392};
393
394struct conf_tx_settings {
395 /*
396 * The TX ED value for TELEC Enable/Disable.
397 *
398 * Range: 0, 1
399 */
400 u8 tx_energy_detection;
401
402 /*
403 * Configuration for rate classes for TX (currently only one
404 * rate class supported.)
405 */
406 struct conf_tx_rate_class rc_conf;
407
408 /*
409 * Configuration for access categories for TX rate control.
410 */
411 u8 ac_conf_count;
412 struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
413
414 /*
415 * Configuration for TID parameters.
416 */
417 u8 tid_conf_count;
418 struct conf_tx_tid tid_conf[CONF_TX_MAX_TID_COUNT];
419
420 /*
421 * The TX fragmentation threshold.
422 *
423 * Range: u16
424 */
425 u16 frag_threshold;
426
427 /*
428 * Max time in msec the FW may delay frame TX-Complete interrupt.
429 *
430 * Range: u16
431 */
432 u16 tx_compl_timeout;
433
434 /*
435 * Completed TX packet count which requires to issue the TX-Complete
436 * interrupt.
437 *
438 * Range: u16
439 */
440 u16 tx_compl_threshold;
441
442};
443
444enum {
445 CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/
446 CONF_WAKE_UP_EVENT_DTIM = 0x02, /* Wake on every DTIM*/
447 CONF_WAKE_UP_EVENT_N_DTIM = 0x04, /* Wake every Nth DTIM */
448 CONF_WAKE_UP_EVENT_N_BEACONS = 0x08, /* Wake every Nth beacon */
449 CONF_WAKE_UP_EVENT_BITS_MASK = 0x0F
450};
451
452#define CONF_MAX_BCN_FILT_IE_COUNT 32
453
454#define CONF_BCN_RULE_PASS_ON_CHANGE BIT(0)
455#define CONF_BCN_RULE_PASS_ON_APPEARANCE BIT(1)
456
457#define CONF_BCN_IE_OUI_LEN 3
458#define CONF_BCN_IE_VER_LEN 2
459
460struct conf_bcn_filt_rule {
461 /*
462 * IE number to which to associate a rule.
463 *
464 * Range: u8
465 */
466 u8 ie;
467
468 /*
469 * Rule to associate with the specific ie.
470 *
471 * Range: CONF_BCN_RULE_PASS_ON_*
472 */
473 u8 rule;
474
475 /*
476 * OUI for the vendor specifie IE (221)
477 */
478 u8 oui[CONF_BCN_IE_OUI_LEN];
479
480 /*
481 * Type for the vendor specifie IE (221)
482 */
483 u8 type;
484
485 /*
486 * Version for the vendor specifie IE (221)
487 */
488 u8 version[CONF_BCN_IE_VER_LEN];
489};
490
491#define CONF_MAX_RSSI_SNR_TRIGGERS 8
492
493enum {
494 CONF_TRIG_METRIC_RSSI_BEACON = 0,
495 CONF_TRIG_METRIC_RSSI_DATA,
496 CONF_TRIG_METRIC_SNR_BEACON,
497 CONF_TRIG_METRIC_SNR_DATA
498};
499
500enum {
501 CONF_TRIG_EVENT_TYPE_LEVEL = 0,
502 CONF_TRIG_EVENT_TYPE_EDGE
503};
504
505enum {
506 CONF_TRIG_EVENT_DIR_LOW = 0,
507 CONF_TRIG_EVENT_DIR_HIGH,
508 CONF_TRIG_EVENT_DIR_BIDIR
509};
510
511
512struct conf_sig_trigger {
513 /*
514 * The RSSI / SNR threshold value.
515 *
516 * FIXME: what is the range?
517 */
518 s16 threshold;
519
520 /*
521 * Minimum delay between two trigger events for this trigger in ms.
522 *
523 * Range: 0 - 60000
524 */
525 u16 pacing;
526
527 /*
528 * The measurement data source for this trigger.
529 *
530 * Range: CONF_TRIG_METRIC_*
531 */
532 u8 metric;
533
534 /*
535 * The trigger type of this trigger.
536 *
537 * Range: CONF_TRIG_EVENT_TYPE_*
538 */
539 u8 type;
540
541 /*
542 * The direction of the trigger.
543 *
544 * Range: CONF_TRIG_EVENT_DIR_*
545 */
546 u8 direction;
547
548 /*
549 * Hysteresis range of the trigger around the threshold (in dB)
550 *
551 * Range: u8
552 */
553 u8 hysteresis;
554
555 /*
556 * Index of the trigger rule.
557 *
558 * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1
559 */
560 u8 index;
561
562 /*
563 * Enable / disable this rule (to use for clearing rules.)
564 *
565 * Range: 1 - Enabled, 2 - Not enabled
566 */
567 u8 enable;
568};
569
570struct conf_sig_weights {
571
572 /*
573 * RSSI from beacons average weight.
574 *
575 * Range: u8
576 */
577 u8 rssi_bcn_avg_weight;
578
579 /*
580 * RSSI from data average weight.
581 *
582 * Range: u8
583 */
584 u8 rssi_pkt_avg_weight;
585
586 /*
587 * SNR from beacons average weight.
588 *
589 * Range: u8
590 */
591 u8 snr_bcn_avg_weight;
592
593 /*
594 * SNR from data average weight.
595 *
596 * Range: u8
597 */
598 u8 snr_pkt_avg_weight;
599};
600
601enum conf_bcn_filt_mode {
602 CONF_BCN_FILT_MODE_DISABLED = 0,
603 CONF_BCN_FILT_MODE_ENABLED = 1
604};
605
606enum conf_bet_mode {
607 CONF_BET_MODE_DISABLE = 0,
608 CONF_BET_MODE_ENABLE = 1,
609};
610
611struct conf_conn_settings {
612 /*
613 * Firmware wakeup conditions configuration. The host may set only
614 * one bit.
615 *
616 * Range: CONF_WAKE_UP_EVENT_*
617 */
618 u8 wake_up_event;
619
620 /*
621 * Listen interval for beacons or Dtims.
622 *
623 * Range: 0 for beacon and Dtim wakeup
624 * 1-10 for x Dtims
625 * 1-255 for x beacons
626 */
627 u8 listen_interval;
628
629 /*
630 * Enable or disable the beacon filtering.
631 *
632 * Range: CONF_BCN_FILT_MODE_*
633 */
634 enum conf_bcn_filt_mode bcn_filt_mode;
635
636 /*
637 * Configure Beacon filter pass-thru rules.
638 */
639 u8 bcn_filt_ie_count;
640 struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT];
641
642 /*
643 * The number of consequtive beacons to lose, before the firmware
644 * becomes out of synch.
645 *
646 * Range: u32
647 */
648 u32 synch_fail_thold;
649
650 /*
651 * After out-of-synch, the number of TU's to wait without a further
652 * received beacon (or probe response) before issuing the BSS_EVENT_LOSE
653 * event.
654 *
655 * Range: u32
656 */
657 u32 bss_lose_timeout;
658
659 /*
660 * Beacon receive timeout.
661 *
662 * Range: u32
663 */
664 u32 beacon_rx_timeout;
665
666 /*
667 * Broadcast receive timeout.
668 *
669 * Range: u32
670 */
671 u32 broadcast_timeout;
672
673 /*
674 * Enable/disable reception of broadcast packets in power save mode
675 *
676 * Range: 1 - enable, 0 - disable
677 */
678 u8 rx_broadcast_in_ps;
679
680 /*
681 * Consequtive PS Poll failures before sending event to driver
682 *
683 * Range: u8
684 */
685 u8 ps_poll_threshold;
686
687 /*
688 * Configuration of signal (rssi/snr) triggers.
689 */
690 u8 sig_trigger_count;
691 struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS];
692
693 /*
694 * Configuration of signal average weights.
695 */
696 struct conf_sig_weights sig_weights;
697
698 /*
699 * Specifies if beacon early termination procedure is enabled or
700 * disabled.
701 *
702 * Range: CONF_BET_MODE_*
703 */
704 u8 bet_enable;
705
706 /*
707 * Specifies the maximum number of consecutive beacons that may be
708 * early terminated. After this number is reached at least one full
709 * beacon must be correctly received in FW before beacon ET
710 * resumes.
711 *
712 * Range 0 - 255
713 */
714 u8 bet_max_consecutive;
715};
716
717#define CONF_SR_ERR_TBL_MAX_VALUES 14
718
719struct conf_mart_reflex_err_table {
720 /*
721 * Length of the error table values table.
722 *
723 * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
724 */
725 u8 len;
726
727 /*
728 * Smart Reflex error table upper limit.
729 *
730 * Range: s8
731 */
732 s8 upper_limit;
733
734 /*
735 * Smart Reflex error table values.
736 *
737 * Range: s8
738 */
739 s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
740};
741
742enum {
743 CONF_REF_CLK_19_2_E,
744 CONF_REF_CLK_26_E,
745 CONF_REF_CLK_38_4_E,
746 CONF_REF_CLK_52_E
747};
748
749enum single_dual_band_enum {
750 CONF_SINGLE_BAND,
751 CONF_DUAL_BAND
752};
753
754struct conf_general_parms {
755 /*
756 * RF Reference Clock type / speed
757 *
758 * Range: CONF_REF_CLK_*
759 */
760 u8 ref_clk;
761
762 /*
763 * Settling time of the reference clock after boot.
764 *
765 * Range: u8
766 */
767 u8 settling_time;
768
769 /*
770 * Flag defining whether clock is valid on wakeup.
771 *
772 * Range: 0 - not valid on wakeup, 1 - valid on wakeup
773 */
774 u8 clk_valid_on_wakeup;
775
776 /*
777 * DC-to-DC mode.
778 *
779 * Range: Unknown
780 */
781 u8 dc2dcmode;
782
783 /*
784 * Flag defining whether used as single or dual-band.
785 *
786 * Range: CONF_SINGLE_BAND, CONF_DUAL_BAND
787 */
788 u8 single_dual_band;
789
790 /*
791 * TX bip fem autodetect flag.
792 *
793 * Range: Unknown
794 */
795 u8 tx_bip_fem_autodetect;
796
797 /*
798 * TX bip gem manufacturer.
799 *
800 * Range: Unknown
801 */
802 u8 tx_bip_fem_manufacturer;
803
804 /*
805 * Settings flags.
806 *
807 * Range: Unknown
808 */
809 u8 settings;
810};
811
812#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
813#define CONF_NUMBER_OF_SUB_BANDS_5 7
814#define CONF_NUMBER_OF_RATE_GROUPS 6
815#define CONF_NUMBER_OF_CHANNELS_2_4 14
816#define CONF_NUMBER_OF_CHANNELS_5 35
817
818struct conf_radio_parms {
819 /*
820 * Static radio parameters for 2.4GHz
821 *
822 * Range: unknown
823 */
824 u8 rx_trace_loss;
825 u8 tx_trace_loss;
826 s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
827
828 /*
829 * Static radio parameters for 5GHz
830 *
831 * Range: unknown
832 */
833 u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
834 u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
835 s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
836
837 /*
838 * Dynamic radio parameters for 2.4GHz
839 *
840 * Range: unknown
841 */
842 s16 tx_ref_pd_voltage;
843 s8 tx_ref_power;
844 s8 tx_offset_db;
845
846 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
847 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
848
849 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
850 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
851 s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
852
853 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
854 u8 rx_fem_insertion_loss;
855
856 /*
857 * Dynamic radio parameters for 5GHz
858 *
859 * Range: unknown
860 */
861 s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
862 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
863 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
864
865 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
866 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
867
868 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
869 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
870
871 /* FIXME: this is inconsistent with the types for 2.4GHz */
872 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
873 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
874};
875
876#define CONF_SR_ERR_TBL_COUNT 3
877
878struct conf_init_settings {
879 /*
880 * Configure Smart Reflex error table values.
881 */
882 struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT];
883
884 /*
885 * Smart Reflex enable flag.
886 *
887 * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
888 */
889 u8 sr_enable;
890
891 /*
892 * Configure general parameters.
893 */
894 struct conf_general_parms genparam;
895
896 /*
897 * Configure radio parameters.
898 */
899 struct conf_radio_parms radioparam;
900
901};
902
903struct conf_drv_settings {
904 struct conf_sg_settings sg;
905 struct conf_rx_settings rx;
906 struct conf_tx_settings tx;
907 struct conf_conn_settings conn;
908 struct conf_init_settings init;
909};
910
911#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index f3afd4a6ff33..31d396ba9188 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -26,20 +26,45 @@
26#include "wl1271_spi.h" 26#include "wl1271_spi.h"
27#include "wl1271_event.h" 27#include "wl1271_event.h"
28#include "wl1271_ps.h" 28#include "wl1271_ps.h"
29#include "wl12xx_80211.h"
29 30
30static int wl1271_event_scan_complete(struct wl1271 *wl, 31static int wl1271_event_scan_complete(struct wl1271 *wl,
31 struct event_mailbox *mbox) 32 struct event_mailbox *mbox)
32{ 33{
34 int size = sizeof(struct wl12xx_probe_req_template);
33 wl1271_debug(DEBUG_EVENT, "status: 0x%x", 35 wl1271_debug(DEBUG_EVENT, "status: 0x%x",
34 mbox->scheduled_scan_status); 36 mbox->scheduled_scan_status);
35 37
36 if (wl->scanning) { 38 if (wl->scanning) {
37 mutex_unlock(&wl->mutex); 39 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
38 ieee80211_scan_completed(wl->hw, false); 40 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
39 mutex_lock(&wl->mutex); 41 NULL, size);
40 wl->scanning = false; 42 /* 2.4 GHz band scanned, scan 5 GHz band, pretend
43 * to the wl1271_cmd_scan function that we are not
44 * scanning as it checks that.
45 */
46 wl->scanning = false;
47 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
48 wl->scan.active,
49 wl->scan.high_prio,
50 WL1271_SCAN_BAND_5_GHZ,
51 wl->scan.probe_requests);
52 } else {
53 if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ)
54 wl1271_cmd_template_set(wl,
55 CMD_TEMPL_CFG_PROBE_REQ_2_4,
56 NULL, size);
57 else
58 wl1271_cmd_template_set(wl,
59 CMD_TEMPL_CFG_PROBE_REQ_5,
60 NULL, size);
61
62 mutex_unlock(&wl->mutex);
63 ieee80211_scan_completed(wl->hw, false);
64 mutex_lock(&wl->mutex);
65 wl->scanning = false;
66 }
41 } 67 }
42
43 return 0; 68 return 0;
44} 69}
45 70
@@ -57,7 +82,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
57 82
58 wl1271_event_mbox_dump(mbox); 83 wl1271_event_mbox_dump(mbox);
59 84
60 vector = mbox->events_vector & ~(mbox->events_mask); 85 vector = le32_to_cpu(mbox->events_vector);
86 vector &= ~(le32_to_cpu(mbox->events_mask));
61 wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector); 87 wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector);
62 88
63 if (vector & SCAN_COMPLETE_EVENT_ID) { 89 if (vector & SCAN_COMPLETE_EVENT_ID) {
@@ -66,14 +92,16 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
66 return ret; 92 return ret;
67 } 93 }
68 94
69 if (vector & BSS_LOSE_EVENT_ID) { 95 /*
96 * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
97 * filtering) is enabled. Without PSM, the stack will receive all
98 * beacons and can detect beacon loss by itself.
99 */
100 if (vector & BSS_LOSE_EVENT_ID && wl->psm) {
70 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); 101 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
71 102
72 if (wl->psm_requested && wl->psm) { 103 /* indicate to the stack, that beacons have been lost */
73 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE); 104 ieee80211_beacon_loss(wl->vif);
74 if (ret < 0)
75 return ret;
76 }
77 } 105 }
78 106
79 return 0; 107 return 0;
@@ -92,14 +120,14 @@ int wl1271_event_unmask(struct wl1271 *wl)
92 120
93void wl1271_event_mbox_config(struct wl1271 *wl) 121void wl1271_event_mbox_config(struct wl1271 *wl)
94{ 122{
95 wl->mbox_ptr[0] = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR); 123 wl->mbox_ptr[0] = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
96 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); 124 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
97 125
98 wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x", 126 wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
99 wl->mbox_ptr[0], wl->mbox_ptr[1]); 127 wl->mbox_ptr[0], wl->mbox_ptr[1]);
100} 128}
101 129
102int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) 130int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
103{ 131{
104 struct event_mailbox mbox; 132 struct event_mailbox mbox;
105 int ret; 133 int ret;
@@ -110,8 +138,8 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
110 return -EINVAL; 138 return -EINVAL;
111 139
112 /* first we read the mbox descriptor */ 140 /* first we read the mbox descriptor */
113 wl1271_spi_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox, 141 wl1271_spi_read(wl, wl->mbox_ptr[mbox_num], &mbox,
114 sizeof(struct event_mailbox)); 142 sizeof(struct event_mailbox), false);
115 143
116 /* process the descriptor */ 144 /* process the descriptor */
117 ret = wl1271_event_process(wl, &mbox); 145 ret = wl1271_event_process(wl, &mbox);
@@ -119,7 +147,9 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
119 return ret; 147 return ret;
120 148
121 /* then we let the firmware know it can go on...*/ 149 /* then we let the firmware know it can go on...*/
122 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); 150 if (do_ack)
151 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
152 INTR_TRIG_EVENT_ACK);
123 153
124 return 0; 154 return 0;
125} 155}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 2cdce7c34bf0..3ab53d331f15 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -66,33 +66,33 @@ enum {
66struct event_debug_report { 66struct event_debug_report {
67 u8 debug_event_id; 67 u8 debug_event_id;
68 u8 num_params; 68 u8 num_params;
69 u16 pad; 69 __le16 pad;
70 u32 report_1; 70 __le32 report_1;
71 u32 report_2; 71 __le32 report_2;
72 u32 report_3; 72 __le32 report_3;
73} __attribute__ ((packed)); 73} __attribute__ ((packed));
74 74
75#define NUM_OF_RSSI_SNR_TRIGGERS 8 75#define NUM_OF_RSSI_SNR_TRIGGERS 8
76 76
77struct event_mailbox { 77struct event_mailbox {
78 u32 events_vector; 78 __le32 events_vector;
79 u32 events_mask; 79 __le32 events_mask;
80 u32 reserved_1; 80 __le32 reserved_1;
81 u32 reserved_2; 81 __le32 reserved_2;
82 82
83 u8 dbg_event_id; 83 u8 dbg_event_id;
84 u8 num_relevant_params; 84 u8 num_relevant_params;
85 u16 reserved_3; 85 __le16 reserved_3;
86 u32 event_report_p1; 86 __le32 event_report_p1;
87 u32 event_report_p2; 87 __le32 event_report_p2;
88 u32 event_report_p3; 88 __le32 event_report_p3;
89 89
90 u8 number_of_scan_results; 90 u8 number_of_scan_results;
91 u8 scan_tag; 91 u8 scan_tag;
92 u8 reserved_4[2]; 92 u8 reserved_4[2];
93 u32 compl_scheduled_scan_status; 93 __le32 compl_scheduled_scan_status;
94 94
95 u16 scheduled_scan_attended_channels; 95 __le16 scheduled_scan_attended_channels;
96 u8 soft_gemini_sense_info; 96 u8 soft_gemini_sense_info;
97 u8 soft_gemini_protective_info; 97 u8 soft_gemini_protective_info;
98 s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS]; 98 s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
@@ -105,6 +105,6 @@ struct event_mailbox {
105 105
106int wl1271_event_unmask(struct wl1271 *wl); 106int wl1271_event_unmask(struct wl1271 *wl);
107void wl1271_event_mbox_config(struct wl1271 *wl); 107void wl1271_event_mbox_config(struct wl1271 *wl);
108int wl1271_event_handle(struct wl1271 *wl, u8 mbox); 108int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack);
109 109
110#endif 110#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 490df217605a..417b4152feb1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -59,6 +59,14 @@ static int wl1271_init_templates_config(struct wl1271 *wl)
59 if (ret < 0) 59 if (ret < 0)
60 return ret; 60 return ret;
61 61
62 if (wl1271_11a_enabled()) {
63 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
64 NULL,
65 sizeof(struct wl12xx_probe_req_template));
66 if (ret < 0)
67 return ret;
68 }
69
62 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, 70 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
63 sizeof(struct wl12xx_null_data_template)); 71 sizeof(struct wl12xx_null_data_template));
64 if (ret < 0) 72 if (ret < 0)
@@ -94,7 +102,7 @@ static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
94{ 102{
95 int ret; 103 int ret;
96 104
97 ret = wl1271_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF); 105 ret = wl1271_acx_rx_msdu_life_time(wl);
98 if (ret < 0) 106 if (ret < 0)
99 return ret; 107 return ret;
100 108
@@ -117,7 +125,7 @@ static int wl1271_init_phy_config(struct wl1271 *wl)
117 if (ret < 0) 125 if (ret < 0)
118 return ret; 126 return ret;
119 127
120 ret = wl1271_acx_group_address_tbl(wl); 128 ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
121 if (ret < 0) 129 if (ret < 0)
122 return ret; 130 return ret;
123 131
@@ -125,7 +133,7 @@ static int wl1271_init_phy_config(struct wl1271 *wl)
125 if (ret < 0) 133 if (ret < 0)
126 return ret; 134 return ret;
127 135
128 ret = wl1271_acx_rts_threshold(wl, RTS_THRESHOLD_DEF); 136 ret = wl1271_acx_rts_threshold(wl, wl->conf.rx.rts_threshold);
129 if (ret < 0) 137 if (ret < 0)
130 return ret; 138 return ret;
131 139
@@ -136,7 +144,8 @@ static int wl1271_init_beacon_filter(struct wl1271 *wl)
136{ 144{
137 int ret; 145 int ret;
138 146
139 ret = wl1271_acx_beacon_filter_opt(wl); 147 /* disable beacon filtering at this stage */
148 ret = wl1271_acx_beacon_filter_opt(wl, false);
140 if (ret < 0) 149 if (ret < 0)
141 return ret; 150 return ret;
142 151
@@ -187,6 +196,7 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
187static int wl1271_init_general_parms(struct wl1271 *wl) 196static int wl1271_init_general_parms(struct wl1271 *wl)
188{ 197{
189 struct wl1271_general_parms *gen_parms; 198 struct wl1271_general_parms *gen_parms;
199 struct conf_general_parms *g = &wl->conf.init.genparam;
190 int ret; 200 int ret;
191 201
192 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); 202 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
@@ -195,15 +205,14 @@ static int wl1271_init_general_parms(struct wl1271 *wl)
195 205
196 gen_parms->id = TEST_CMD_INI_FILE_GENERAL_PARAM; 206 gen_parms->id = TEST_CMD_INI_FILE_GENERAL_PARAM;
197 207
198 gen_parms->ref_clk = REF_CLK_38_4_E; 208 gen_parms->ref_clk = g->ref_clk;
199 /* FIXME: magic numbers */ 209 gen_parms->settling_time = g->settling_time;
200 gen_parms->settling_time = 5; 210 gen_parms->clk_valid_on_wakeup = g->clk_valid_on_wakeup;
201 gen_parms->clk_valid_on_wakeup = 0; 211 gen_parms->dc2dcmode = g->dc2dcmode;
202 gen_parms->dc2dcmode = 0; 212 gen_parms->single_dual_band = g->single_dual_band;
203 gen_parms->single_dual_band = 0; 213 gen_parms->tx_bip_fem_autodetect = g->tx_bip_fem_autodetect;
204 gen_parms->tx_bip_fem_autodetect = 1; 214 gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
205 gen_parms->tx_bip_fem_manufacturer = 1; 215 gen_parms->settings = g->settings;
206 gen_parms->settings = 1;
207 216
208 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); 217 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
209 if (ret < 0) { 218 if (ret < 0) {
@@ -217,32 +226,9 @@ static int wl1271_init_general_parms(struct wl1271 *wl)
217 226
218static int wl1271_init_radio_parms(struct wl1271 *wl) 227static int wl1271_init_radio_parms(struct wl1271 *wl)
219{ 228{
220 /*
221 * FIXME: All these magic numbers should be moved to some place where
222 * they can be configured (separate file?)
223 */
224
225 struct wl1271_radio_parms *radio_parms; 229 struct wl1271_radio_parms *radio_parms;
226 int ret; 230 struct conf_radio_parms *r = &wl->conf.init.radioparam;
227 u8 compensation[] = { 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8, 0xfc, 0x00, 231 int i, ret;
228 0x08, 0x10, 0xf0, 0xf8, 0x00, 0x0a, 0x14 };
229
230 u8 tx_rate_limits_normal[] = { 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 };
231 u8 tx_rate_limits_degraded[] = { 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 };
232
233 u8 tx_channel_limits_11b[] = { 0x22, 0x50, 0x50, 0x50,
234 0x50, 0x50, 0x50, 0x50,
235 0x50, 0x50, 0x22, 0x50,
236 0x22, 0x50 };
237
238 u8 tx_channel_limits_ofdm[] = { 0x20, 0x50, 0x50, 0x50,
239 0x50, 0x50, 0x50, 0x50,
240 0x50, 0x50, 0x20, 0x50,
241 0x20, 0x50 };
242
243 u8 tx_pdv_rate_offsets[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
244
245 u8 tx_ibias[] = { 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 };
246 232
247 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL); 233 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
248 if (!radio_parms) 234 if (!radio_parms)
@@ -251,33 +237,59 @@ static int wl1271_init_radio_parms(struct wl1271 *wl)
251 radio_parms->id = TEST_CMD_INI_FILE_RADIO_PARAM; 237 radio_parms->id = TEST_CMD_INI_FILE_RADIO_PARAM;
252 238
253 /* Static radio parameters */ 239 /* Static radio parameters */
254 radio_parms->rx_trace_loss = 10; 240 radio_parms->rx_trace_loss = r->rx_trace_loss;
255 radio_parms->tx_trace_loss = 10; 241 radio_parms->tx_trace_loss = r->tx_trace_loss;
256 memcpy(radio_parms->rx_rssi_and_proc_compens, compensation, 242 memcpy(radio_parms->rx_rssi_and_proc_compens,
257 sizeof(compensation)); 243 r->rx_rssi_and_proc_compens,
258 244 CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
259 /* We don't set the 5GHz -- N/A */ 245
246 memcpy(radio_parms->rx_trace_loss_5, r->rx_trace_loss_5,
247 CONF_NUMBER_OF_SUB_BANDS_5);
248 memcpy(radio_parms->tx_trace_loss_5, r->tx_trace_loss_5,
249 CONF_NUMBER_OF_SUB_BANDS_5);
250 memcpy(radio_parms->rx_rssi_and_proc_compens_5,
251 r->rx_rssi_and_proc_compens_5,
252 CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
260 253
261 /* Dynamic radio parameters */ 254 /* Dynamic radio parameters */
262 radio_parms->tx_ref_pd_voltage = cpu_to_le16(0x24e); 255 radio_parms->tx_ref_pd_voltage = cpu_to_le16(r->tx_ref_pd_voltage);
263 radio_parms->tx_ref_power = 0x78; 256 radio_parms->tx_ref_power = r->tx_ref_power;
264 radio_parms->tx_offset_db = 0x0; 257 radio_parms->tx_offset_db = r->tx_offset_db;
265 258
266 memcpy(radio_parms->tx_rate_limits_normal, tx_rate_limits_normal, 259 memcpy(radio_parms->tx_rate_limits_normal, r->tx_rate_limits_normal,
267 sizeof(tx_rate_limits_normal)); 260 CONF_NUMBER_OF_RATE_GROUPS);
268 memcpy(radio_parms->tx_rate_limits_degraded, tx_rate_limits_degraded, 261 memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
269 sizeof(tx_rate_limits_degraded)); 262 CONF_NUMBER_OF_RATE_GROUPS);
270 263
271 memcpy(radio_parms->tx_channel_limits_11b, tx_channel_limits_11b, 264 memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
272 sizeof(tx_channel_limits_11b)); 265 CONF_NUMBER_OF_CHANNELS_2_4);
273 memcpy(radio_parms->tx_channel_limits_ofdm, tx_channel_limits_ofdm, 266 memcpy(radio_parms->tx_channel_limits_ofdm, r->tx_channel_limits_ofdm,
274 sizeof(tx_channel_limits_ofdm)); 267 CONF_NUMBER_OF_CHANNELS_2_4);
275 memcpy(radio_parms->tx_pdv_rate_offsets, tx_pdv_rate_offsets, 268 memcpy(radio_parms->tx_pdv_rate_offsets, r->tx_pdv_rate_offsets,
276 sizeof(tx_pdv_rate_offsets)); 269 CONF_NUMBER_OF_RATE_GROUPS);
277 memcpy(radio_parms->tx_ibias, tx_ibias, 270 memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
278 sizeof(tx_ibias)); 271
279 272 radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
280 radio_parms->rx_fem_insertion_loss = 0x14; 273
274 for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
275 radio_parms->tx_ref_pd_voltage_5[i] =
276 cpu_to_le16(r->tx_ref_pd_voltage_5[i]);
277 memcpy(radio_parms->tx_ref_power_5, r->tx_ref_power_5,
278 CONF_NUMBER_OF_SUB_BANDS_5);
279 memcpy(radio_parms->tx_offset_db_5, r->tx_offset_db_5,
280 CONF_NUMBER_OF_SUB_BANDS_5);
281 memcpy(radio_parms->tx_rate_limits_normal_5,
282 r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
283 memcpy(radio_parms->tx_rate_limits_degraded_5,
284 r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
285 memcpy(radio_parms->tx_channel_limits_ofdm_5,
286 r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
287 memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
288 CONF_NUMBER_OF_RATE_GROUPS);
289 memcpy(radio_parms->tx_ibias_5, r->tx_ibias_5,
290 CONF_NUMBER_OF_RATE_GROUPS);
291 memcpy(radio_parms->rx_fem_insertion_loss_5,
292 r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
281 293
282 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0); 294 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
283 if (ret < 0) 295 if (ret < 0)
@@ -311,8 +323,8 @@ int wl1271_hw_init(struct wl1271 *wl)
311 323
312 /* RX config */ 324 /* RX config */
313 ret = wl1271_init_rx_config(wl, 325 ret = wl1271_init_rx_config(wl,
314 RX_CFG_PROMISCUOUS | RX_CFG_TSF, 326 RX_CFG_PROMISCUOUS | RX_CFG_TSF,
315 RX_FILTER_OPTION_DEF); 327 RX_FILTER_OPTION_DEF);
316 /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS, 328 /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
317 RX_FILTER_OPTION_FILTER_ALL); */ 329 RX_FILTER_OPTION_FILTER_ALL); */
318 if (ret < 0) 330 if (ret < 0)
@@ -323,6 +335,11 @@ int wl1271_hw_init(struct wl1271 *wl)
323 if (ret < 0) 335 if (ret < 0)
324 goto out_free_memmap; 336 goto out_free_memmap;
325 337
338 /* Initialize connection monitoring thresholds */
339 ret = wl1271_acx_conn_monit_params(wl);
340 if (ret < 0)
341 goto out_free_memmap;
342
326 /* Beacon filtering */ 343 /* Beacon filtering */
327 ret = wl1271_init_beacon_filter(wl); 344 ret = wl1271_init_beacon_filter(wl);
328 if (ret < 0) 345 if (ret < 0)
@@ -369,7 +386,7 @@ int wl1271_hw_init(struct wl1271 *wl)
369 goto out_free_memmap; 386 goto out_free_memmap;
370 387
371 /* Configure TX rate classes */ 388 /* Configure TX rate classes */
372 ret = wl1271_acx_rate_policies(wl); 389 ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL);
373 if (ret < 0) 390 if (ret < 0)
374 goto out_free_memmap; 391 goto out_free_memmap;
375 392
@@ -388,10 +405,16 @@ int wl1271_hw_init(struct wl1271 *wl)
388 if (ret < 0) 405 if (ret < 0)
389 goto out_free_memmap; 406 goto out_free_memmap;
390 407
408 /* Configure smart reflex */
409 ret = wl1271_acx_smart_reflex(wl);
410 if (ret < 0)
411 goto out_free_memmap;
412
391 return 0; 413 return 0;
392 414
393 out_free_memmap: 415 out_free_memmap:
394 kfree(wl->target_mem_map); 416 kfree(wl->target_mem_map);
417 wl->target_mem_map = NULL;
395 418
396 return ret; 419 return ret;
397} 420}
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/wl1271_init.h
index bd8ff0fa2272..6e21ceee76a6 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/wl1271_init.h
@@ -48,19 +48,6 @@ struct wl1271_general_parms {
48 u8 settings; 48 u8 settings;
49} __attribute__ ((packed)); 49} __attribute__ ((packed));
50 50
51enum ref_clk_enum {
52 REF_CLK_19_2_E,
53 REF_CLK_26_E,
54 REF_CLK_38_4_E,
55 REF_CLK_52_E
56};
57
58#define RSSI_AND_PROCESS_COMPENSATION_SIZE 15
59#define NUMBER_OF_SUB_BANDS_5 7
60#define NUMBER_OF_RATE_GROUPS 6
61#define NUMBER_OF_CHANNELS_2_4 14
62#define NUMBER_OF_CHANNELS_5 35
63
64struct wl1271_radio_parms { 51struct wl1271_radio_parms {
65 u8 id; 52 u8 id;
66 u8 padding[3]; 53 u8 padding[3];
@@ -69,45 +56,45 @@ struct wl1271_radio_parms {
69 /* 2.4GHz */ 56 /* 2.4GHz */
70 u8 rx_trace_loss; 57 u8 rx_trace_loss;
71 u8 tx_trace_loss; 58 u8 tx_trace_loss;
72 s8 rx_rssi_and_proc_compens[RSSI_AND_PROCESS_COMPENSATION_SIZE]; 59 s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
73 60
74 /* 5GHz */ 61 /* 5GHz */
75 u8 rx_trace_loss_5[NUMBER_OF_SUB_BANDS_5]; 62 u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
76 u8 tx_trace_loss_5[NUMBER_OF_SUB_BANDS_5]; 63 u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
77 s8 rx_rssi_and_proc_compens_5[RSSI_AND_PROCESS_COMPENSATION_SIZE]; 64 s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
78 65
79 /* Dynamic radio parameters */ 66 /* Dynamic radio parameters */
80 /* 2.4GHz */ 67 /* 2.4GHz */
81 s16 tx_ref_pd_voltage; 68 __le16 tx_ref_pd_voltage;
82 s8 tx_ref_power; 69 s8 tx_ref_power;
83 s8 tx_offset_db; 70 s8 tx_offset_db;
84 71
85 s8 tx_rate_limits_normal[NUMBER_OF_RATE_GROUPS]; 72 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
86 s8 tx_rate_limits_degraded[NUMBER_OF_RATE_GROUPS]; 73 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
87 74
88 s8 tx_channel_limits_11b[NUMBER_OF_CHANNELS_2_4]; 75 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
89 s8 tx_channel_limits_ofdm[NUMBER_OF_CHANNELS_2_4]; 76 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
90 s8 tx_pdv_rate_offsets[NUMBER_OF_RATE_GROUPS]; 77 s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
91 78
92 u8 tx_ibias[NUMBER_OF_RATE_GROUPS]; 79 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
93 u8 rx_fem_insertion_loss; 80 u8 rx_fem_insertion_loss;
94 81
95 u8 padding2; 82 u8 padding2;
96 83
97 /* 5GHz */ 84 /* 5GHz */
98 s16 tx_ref_pd_voltage_5[NUMBER_OF_SUB_BANDS_5]; 85 __le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
99 s8 tx_ref_power_5[NUMBER_OF_SUB_BANDS_5]; 86 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
100 s8 tx_offset_db_5[NUMBER_OF_SUB_BANDS_5]; 87 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
101 88
102 s8 tx_rate_limits_normal_5[NUMBER_OF_RATE_GROUPS]; 89 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
103 s8 tx_rate_limits_degraded_5[NUMBER_OF_RATE_GROUPS]; 90 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
104 91
105 s8 tx_channel_limits_ofdm_5[NUMBER_OF_CHANNELS_5]; 92 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
106 s8 tx_pdv_rate_offsets_5[NUMBER_OF_RATE_GROUPS]; 93 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
107 94
108 /* FIXME: this is inconsistent with the types for 2.4GHz */ 95 /* FIXME: this is inconsistent with the types for 2.4GHz */
109 s8 tx_ibias_5[NUMBER_OF_RATE_GROUPS]; 96 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
110 s8 rx_fem_insertion_loss_5[NUMBER_OF_SUB_BANDS_5]; 97 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
111 98
112 u8 padding3[2]; 99 u8 padding3[2];
113} __attribute__ ((packed)); 100} __attribute__ ((packed));
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 27298b19d5bd..86132bb00787 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -30,7 +30,9 @@
30#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
31#include <linux/crc32.h> 31#include <linux/crc32.h>
32#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
33#include <linux/vmalloc.h>
33#include <linux/spi/wl12xx.h> 34#include <linux/spi/wl12xx.h>
35#include <linux/inetdevice.h>
34 36
35#include "wl1271.h" 37#include "wl1271.h"
36#include "wl12xx_80211.h" 38#include "wl12xx_80211.h"
@@ -45,6 +47,308 @@
45#include "wl1271_cmd.h" 47#include "wl1271_cmd.h"
46#include "wl1271_boot.h" 48#include "wl1271_boot.h"
47 49
50static struct conf_drv_settings default_conf = {
51 .sg = {
52 .per_threshold = 7500,
53 .max_scan_compensation_time = 120000,
54 .nfs_sample_interval = 400,
55 .load_ratio = 50,
56 .auto_ps_mode = 0,
57 .probe_req_compensation = 170,
58 .scan_window_compensation = 50,
59 .antenna_config = 0,
60 .beacon_miss_threshold = 60,
61 .rate_adaptation_threshold = CONF_HW_BIT_RATE_12MBPS,
62 .rate_adaptation_snr = 0
63 },
64 .rx = {
65 .rx_msdu_life_time = 512000,
66 .packet_detection_threshold = 0,
67 .ps_poll_timeout = 15,
68 .upsd_timeout = 15,
69 .rts_threshold = 2347,
70 .rx_cca_threshold = 0xFFEF,
71 .irq_blk_threshold = 0,
72 .irq_pkt_threshold = USHORT_MAX,
73 .irq_timeout = 5,
74 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
75 },
76 .tx = {
77 .tx_energy_detection = 0,
78 .rc_conf = {
79 .enabled_rates = CONF_TX_RATE_MASK_UNSPECIFIED,
80 .short_retry_limit = 10,
81 .long_retry_limit = 10,
82 .aflags = 0
83 },
84 .ac_conf_count = 4,
85 .ac_conf = {
86 [0] = {
87 .ac = CONF_TX_AC_BE,
88 .cw_min = 15,
89 .cw_max = 63,
90 .aifsn = 3,
91 .tx_op_limit = 0,
92 },
93 [1] = {
94 .ac = CONF_TX_AC_BK,
95 .cw_min = 15,
96 .cw_max = 63,
97 .aifsn = 7,
98 .tx_op_limit = 0,
99 },
100 [2] = {
101 .ac = CONF_TX_AC_VI,
102 .cw_min = 15,
103 .cw_max = 63,
104 .aifsn = CONF_TX_AIFS_PIFS,
105 .tx_op_limit = 3008,
106 },
107 [3] = {
108 .ac = CONF_TX_AC_VO,
109 .cw_min = 15,
110 .cw_max = 63,
111 .aifsn = CONF_TX_AIFS_PIFS,
112 .tx_op_limit = 1504,
113 },
114 },
115 .tid_conf_count = 7,
116 .tid_conf = {
117 [0] = {
118 .queue_id = 0,
119 .channel_type = CONF_CHANNEL_TYPE_DCF,
120 .tsid = CONF_TX_AC_BE,
121 .ps_scheme = CONF_PS_SCHEME_LEGACY,
122 .ack_policy = CONF_ACK_POLICY_LEGACY,
123 .apsd_conf = {0, 0},
124 },
125 [1] = {
126 .queue_id = 1,
127 .channel_type = CONF_CHANNEL_TYPE_DCF,
128 .tsid = CONF_TX_AC_BE,
129 .ps_scheme = CONF_PS_SCHEME_LEGACY,
130 .ack_policy = CONF_ACK_POLICY_LEGACY,
131 .apsd_conf = {0, 0},
132 },
133 [2] = {
134 .queue_id = 2,
135 .channel_type = CONF_CHANNEL_TYPE_DCF,
136 .tsid = CONF_TX_AC_BE,
137 .ps_scheme = CONF_PS_SCHEME_LEGACY,
138 .ack_policy = CONF_ACK_POLICY_LEGACY,
139 .apsd_conf = {0, 0},
140 },
141 [3] = {
142 .queue_id = 3,
143 .channel_type = CONF_CHANNEL_TYPE_DCF,
144 .tsid = CONF_TX_AC_BE,
145 .ps_scheme = CONF_PS_SCHEME_LEGACY,
146 .ack_policy = CONF_ACK_POLICY_LEGACY,
147 .apsd_conf = {0, 0},
148 },
149 [4] = {
150 .queue_id = 4,
151 .channel_type = CONF_CHANNEL_TYPE_DCF,
152 .tsid = CONF_TX_AC_BE,
153 .ps_scheme = CONF_PS_SCHEME_LEGACY,
154 .ack_policy = CONF_ACK_POLICY_LEGACY,
155 .apsd_conf = {0, 0},
156 },
157 [5] = {
158 .queue_id = 5,
159 .channel_type = CONF_CHANNEL_TYPE_DCF,
160 .tsid = CONF_TX_AC_BE,
161 .ps_scheme = CONF_PS_SCHEME_LEGACY,
162 .ack_policy = CONF_ACK_POLICY_LEGACY,
163 .apsd_conf = {0, 0},
164 },
165 [6] = {
166 .queue_id = 6,
167 .channel_type = CONF_CHANNEL_TYPE_DCF,
168 .tsid = CONF_TX_AC_BE,
169 .ps_scheme = CONF_PS_SCHEME_LEGACY,
170 .ack_policy = CONF_ACK_POLICY_LEGACY,
171 .apsd_conf = {0, 0},
172 }
173 },
174 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
175 .tx_compl_timeout = 5,
176 .tx_compl_threshold = 5
177 },
178 .conn = {
179 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
180 .listen_interval = 0,
181 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
182 .bcn_filt_ie_count = 1,
183 .bcn_filt_ie = {
184 [0] = {
185 .ie = WLAN_EID_CHANNEL_SWITCH,
186 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
187 }
188 },
189 .synch_fail_thold = 5,
190 .bss_lose_timeout = 100,
191 .beacon_rx_timeout = 10000,
192 .broadcast_timeout = 20000,
193 .rx_broadcast_in_ps = 1,
194 .ps_poll_threshold = 4,
195 .sig_trigger_count = 2,
196 .sig_trigger = {
197 [0] = {
198 .threshold = -75,
199 .pacing = 500,
200 .metric = CONF_TRIG_METRIC_RSSI_BEACON,
201 .type = CONF_TRIG_EVENT_TYPE_EDGE,
202 .direction = CONF_TRIG_EVENT_DIR_LOW,
203 .hysteresis = 2,
204 .index = 0,
205 .enable = 1
206 },
207 [1] = {
208 .threshold = -75,
209 .pacing = 500,
210 .metric = CONF_TRIG_METRIC_RSSI_BEACON,
211 .type = CONF_TRIG_EVENT_TYPE_EDGE,
212 .direction = CONF_TRIG_EVENT_DIR_HIGH,
213 .hysteresis = 2,
214 .index = 1,
215 .enable = 1
216 }
217 },
218 .sig_weights = {
219 .rssi_bcn_avg_weight = 10,
220 .rssi_pkt_avg_weight = 10,
221 .snr_bcn_avg_weight = 10,
222 .snr_pkt_avg_weight = 10
223 },
224 .bet_enable = CONF_BET_MODE_ENABLE,
225 .bet_max_consecutive = 100
226 },
227 .init = {
228 .sr_err_tbl = {
229 [0] = {
230 .len = 7,
231 .upper_limit = 0x03,
232 .values = {
233 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
234 0x00 }
235 },
236 [1] = {
237 .len = 7,
238 .upper_limit = 0x03,
239 .values = {
240 0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
241 0x00 }
242 },
243 [2] = {
244 .len = 7,
245 .upper_limit = 0x03,
246 .values = {
247 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
248 0x00 }
249 }
250 },
251 .sr_enable = 1,
252 .genparam = {
253 /*
254 * FIXME: The correct value CONF_REF_CLK_38_4_E
255 * causes the firmware to crash on boot.
256 * The value 5 apparently is an
257 * unnoficial XTAL configuration of the
258 * same frequency, which appears to work.
259 */
260 .ref_clk = 5,
261 .settling_time = 5,
262 .clk_valid_on_wakeup = 0,
263 .dc2dcmode = 0,
264 .single_dual_band = CONF_SINGLE_BAND,
265 .tx_bip_fem_autodetect = 0,
266 .tx_bip_fem_manufacturer = 1,
267 .settings = 1,
268 },
269 .radioparam = {
270 .rx_trace_loss = 10,
271 .tx_trace_loss = 10,
272 .rx_rssi_and_proc_compens = {
273 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
274 0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8,
275 0x00, 0x0a, 0x14 },
276 .rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
277 .tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
278 .rx_rssi_and_proc_compens_5 = {
279 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00 },
282 .tx_ref_pd_voltage = 0x24e,
283 .tx_ref_power = 0x78,
284 .tx_offset_db = 0x0,
285 .tx_rate_limits_normal = {
286 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 },
287 .tx_rate_limits_degraded = {
288 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
289 .tx_channel_limits_11b = {
290 0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
291 0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
292 0x22, 0x50 },
293 .tx_channel_limits_ofdm = {
294 0x20, 0x50, 0x50, 0x50, 0x50, 0x50,
295 0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
296 0x20, 0x50 },
297 .tx_pdv_rate_offsets = {
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
299 .tx_ibias = {
300 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 },
301 .rx_fem_insertion_loss = 0x14,
302 .tx_ref_pd_voltage_5 = {
303 0x0190, 0x01a4, 0x01c3, 0x01d8,
304 0x020a, 0x021c },
305 .tx_ref_power_5 = {
306 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 },
307 .tx_offset_db_5 = {
308 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
309 .tx_rate_limits_normal_5 = {
310 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
311 .tx_rate_limits_degraded_5 = {
312 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
313 .tx_channel_limits_ofdm_5 = {
314 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
315 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
316 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
317 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
318 0x50, 0x50, 0x50 },
319 .tx_pdv_rate_offsets_5 = {
320 0x01, 0x02, 0x02, 0x02, 0x02, 0x00 },
321 .tx_ibias_5 = {
322 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
323 .rx_fem_insertion_loss_5 = {
324 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }
325 }
326 }
327};
328
329static LIST_HEAD(wl_list);
330
331static void wl1271_conf_init(struct wl1271 *wl)
332{
333
334 /*
335 * This function applies the default configuration to the driver. This
336 * function is invoked upon driver load (spi probe.)
337 *
338 * The configuration is stored in a run-time structure in order to
339 * facilitate for run-time adjustment of any of the parameters. Making
340 * changes to the configuration structure will apply the new values on
341 * the next interface up (wl1271_op_start.)
342 */
343
344 /* apply driver default configuration */
345 memcpy(&wl->conf, &default_conf, sizeof(default_conf));
346
347 if (wl1271_11a_enabled())
348 wl->conf.init.genparam.single_dual_band = CONF_DUAL_BAND;
349}
350
351
48static int wl1271_plt_init(struct wl1271 *wl) 352static int wl1271_plt_init(struct wl1271 *wl)
49{ 353{
50 int ret; 354 int ret;
@@ -75,20 +379,14 @@ static void wl1271_power_on(struct wl1271 *wl)
75 wl->set_power(true); 379 wl->set_power(true);
76} 380}
77 381
78static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status) 382static void wl1271_fw_status(struct wl1271 *wl,
383 struct wl1271_fw_status *status)
79{ 384{
80 u32 total = 0; 385 u32 total = 0;
81 int i; 386 int i;
82 387
83 /* 388 wl1271_spi_read(wl, FW_STATUS_ADDR, status,
84 * FIXME: Reading the FW status directly from the registers seems to 389 sizeof(*status), false);
85 * be the right thing to do, but it doesn't work. And in the
86 * reference driver, there is a workaround called
87 * USE_SDIO_24M_WORKAROUND, which reads the status from memory
88 * instead, so we do the same here.
89 */
90
91 wl1271_spi_mem_read(wl, STATUS_MEM_ADDRESS, status, sizeof(*status));
92 390
93 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 391 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
94 "drv_rx_counter = %d, tx_results_counter = %d)", 392 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -99,25 +397,28 @@ static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status)
99 397
100 /* update number of available TX blocks */ 398 /* update number of available TX blocks */
101 for (i = 0; i < NUM_TX_QUEUES; i++) { 399 for (i = 0; i < NUM_TX_QUEUES; i++) {
102 u32 cnt = status->tx_released_blks[i] - wl->tx_blocks_freed[i]; 400 u32 cnt = le32_to_cpu(status->tx_released_blks[i]) -
103 wl->tx_blocks_freed[i] = status->tx_released_blks[i]; 401 wl->tx_blocks_freed[i];
402
403 wl->tx_blocks_freed[i] =
404 le32_to_cpu(status->tx_released_blks[i]);
104 wl->tx_blocks_available += cnt; 405 wl->tx_blocks_available += cnt;
105 total += cnt; 406 total += cnt;
106 } 407 }
107 408
108 /* if more blocks are available now, schedule some tx work */ 409 /* if more blocks are available now, schedule some tx work */
109 if (total && !skb_queue_empty(&wl->tx_queue)) 410 if (total && !skb_queue_empty(&wl->tx_queue))
110 schedule_work(&wl->tx_work); 411 ieee80211_queue_work(wl->hw, &wl->tx_work);
111 412
112 /* update the host-chipset time offset */ 413 /* update the host-chipset time offset */
113 wl->time_offset = jiffies_to_usecs(jiffies) - status->fw_localtime; 414 wl->time_offset = jiffies_to_usecs(jiffies) -
415 le32_to_cpu(status->fw_localtime);
114} 416}
115 417
116#define WL1271_IRQ_MAX_LOOPS 10
117static void wl1271_irq_work(struct work_struct *work) 418static void wl1271_irq_work(struct work_struct *work)
118{ 419{
119 u32 intr, ctr = WL1271_IRQ_MAX_LOOPS;
120 int ret; 420 int ret;
421 u32 intr;
121 struct wl1271 *wl = 422 struct wl1271 *wl =
122 container_of(work, struct wl1271, irq_work); 423 container_of(work, struct wl1271, irq_work);
123 424
@@ -132,9 +433,10 @@ static void wl1271_irq_work(struct work_struct *work)
132 if (ret < 0) 433 if (ret < 0)
133 goto out; 434 goto out;
134 435
135 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 436 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
136 437
137 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); 438 wl1271_fw_status(wl, wl->fw_status);
439 intr = le32_to_cpu(wl->fw_status->intr);
138 if (!intr) { 440 if (!intr) {
139 wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); 441 wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
140 goto out_sleep; 442 goto out_sleep;
@@ -142,46 +444,39 @@ static void wl1271_irq_work(struct work_struct *work)
142 444
143 intr &= WL1271_INTR_MASK; 445 intr &= WL1271_INTR_MASK;
144 446
145 do { 447 if (intr & WL1271_ACX_INTR_EVENT_A) {
146 wl1271_fw_status(wl, wl->fw_status); 448 bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
147 449 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
148 450 wl1271_event_handle(wl, 0, do_ack);
149 if (intr & (WL1271_ACX_INTR_EVENT_A | 451 }
150 WL1271_ACX_INTR_EVENT_B)) {
151 wl1271_debug(DEBUG_IRQ,
152 "WL1271_ACX_INTR_EVENT (0x%x)", intr);
153 if (intr & WL1271_ACX_INTR_EVENT_A)
154 wl1271_event_handle(wl, 0);
155 else
156 wl1271_event_handle(wl, 1);
157 }
158 452
159 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 453 if (intr & WL1271_ACX_INTR_EVENT_B) {
160 wl1271_debug(DEBUG_IRQ, 454 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
161 "WL1271_ACX_INTR_INIT_COMPLETE"); 455 wl1271_event_handle(wl, 1, true);
456 }
162 457
163 if (intr & WL1271_ACX_INTR_HW_AVAILABLE) 458 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
164 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); 459 wl1271_debug(DEBUG_IRQ,
460 "WL1271_ACX_INTR_INIT_COMPLETE");
165 461
166 if (intr & WL1271_ACX_INTR_DATA) { 462 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
167 u8 tx_res_cnt = wl->fw_status->tx_results_counter - 463 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
168 wl->tx_results_count;
169 464
170 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 465 if (intr & WL1271_ACX_INTR_DATA) {
466 u8 tx_res_cnt = wl->fw_status->tx_results_counter -
467 wl->tx_results_count;
171 468
172 /* check for tx results */ 469 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
173 if (tx_res_cnt)
174 wl1271_tx_complete(wl, tx_res_cnt);
175 470
176 wl1271_rx(wl, wl->fw_status); 471 /* check for tx results */
177 } 472 if (tx_res_cnt)
473 wl1271_tx_complete(wl, tx_res_cnt);
178 474
179 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); 475 wl1271_rx(wl, wl->fw_status);
180 intr &= WL1271_INTR_MASK; 476 }
181 } while (intr && --ctr);
182 477
183out_sleep: 478out_sleep:
184 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, 479 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
185 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 480 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
186 wl1271_ps_elp_sleep(wl); 481 wl1271_ps_elp_sleep(wl);
187 482
@@ -205,7 +500,7 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
205 wl->elp_compl = NULL; 500 wl->elp_compl = NULL;
206 } 501 }
207 502
208 schedule_work(&wl->irq_work); 503 ieee80211_queue_work(wl->hw, &wl->irq_work);
209 spin_unlock_irqrestore(&wl->wl_lock, flags); 504 spin_unlock_irqrestore(&wl->wl_lock, flags);
210 505
211 return IRQ_HANDLED; 506 return IRQ_HANDLED;
@@ -231,7 +526,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
231 } 526 }
232 527
233 wl->fw_len = fw->size; 528 wl->fw_len = fw->size;
234 wl->fw = kmalloc(wl->fw_len, GFP_KERNEL); 529 wl->fw = vmalloc(wl->fw_len);
235 530
236 if (!wl->fw) { 531 if (!wl->fw) {
237 wl1271_error("could not allocate memory for the firmware"); 532 wl1271_error("could not allocate memory for the firmware");
@@ -292,7 +587,7 @@ static void wl1271_fw_wakeup(struct wl1271 *wl)
292 u32 elp_reg; 587 u32 elp_reg;
293 588
294 elp_reg = ELPCTRL_WAKE_UP; 589 elp_reg = ELPCTRL_WAKE_UP;
295 wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); 590 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
296} 591}
297 592
298static int wl1271_setup(struct wl1271 *wl) 593static int wl1271_setup(struct wl1271 *wl)
@@ -314,6 +609,7 @@ static int wl1271_setup(struct wl1271 *wl)
314 609
315static int wl1271_chip_wakeup(struct wl1271 *wl) 610static int wl1271_chip_wakeup(struct wl1271 *wl)
316{ 611{
612 struct wl1271_partition_set partition;
317 int ret = 0; 613 int ret = 0;
318 614
319 wl1271_power_on(wl); 615 wl1271_power_on(wl);
@@ -323,11 +619,10 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
323 619
324 /* We don't need a real memory partition here, because we only want 620 /* We don't need a real memory partition here, because we only want
325 * to use the registers at this point. */ 621 * to use the registers at this point. */
326 wl1271_set_partition(wl, 622 memset(&partition, 0, sizeof(partition));
327 0x00000000, 623 partition.reg.start = REGISTERS_BASE;
328 0x00000000, 624 partition.reg.size = REGISTERS_DOWN_SIZE;
329 REGISTERS_BASE, 625 wl1271_set_partition(wl, &partition);
330 REGISTERS_DOWN_SIZE);
331 626
332 /* ELP module wake up */ 627 /* ELP module wake up */
333 wl1271_fw_wakeup(wl); 628 wl1271_fw_wakeup(wl);
@@ -335,7 +630,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
335 /* whal_FwCtrl_BootSm() */ 630 /* whal_FwCtrl_BootSm() */
336 631
337 /* 0. read chip id from CHIP_ID */ 632 /* 0. read chip id from CHIP_ID */
338 wl->chip.id = wl1271_reg_read32(wl, CHIP_ID_B); 633 wl->chip.id = wl1271_spi_read32(wl, CHIP_ID_B);
339 634
340 /* 1. check if chip id is valid */ 635 /* 1. check if chip id is valid */
341 636
@@ -346,7 +641,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
346 641
347 ret = wl1271_setup(wl); 642 ret = wl1271_setup(wl);
348 if (ret < 0) 643 if (ret < 0)
349 goto out; 644 goto out_power_off;
350 break; 645 break;
351 case CHIP_ID_1271_PG20: 646 case CHIP_ID_1271_PG20:
352 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 647 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -354,56 +649,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
354 649
355 ret = wl1271_setup(wl); 650 ret = wl1271_setup(wl);
356 if (ret < 0) 651 if (ret < 0)
357 goto out; 652 goto out_power_off;
358 break; 653 break;
359 default: 654 default:
360 wl1271_error("unsupported chip id: 0x%x", wl->chip.id); 655 wl1271_error("unsupported chip id: 0x%x", wl->chip.id);
361 ret = -ENODEV; 656 ret = -ENODEV;
362 goto out; 657 goto out_power_off;
363 } 658 }
364 659
365 if (wl->fw == NULL) { 660 if (wl->fw == NULL) {
366 ret = wl1271_fetch_firmware(wl); 661 ret = wl1271_fetch_firmware(wl);
367 if (ret < 0) 662 if (ret < 0)
368 goto out; 663 goto out_power_off;
369 } 664 }
370 665
371 /* No NVS from netlink, try to get it from the filesystem */ 666 /* No NVS from netlink, try to get it from the filesystem */
372 if (wl->nvs == NULL) { 667 if (wl->nvs == NULL) {
373 ret = wl1271_fetch_nvs(wl); 668 ret = wl1271_fetch_nvs(wl);
374 if (ret < 0) 669 if (ret < 0)
375 goto out; 670 goto out_power_off;
376 } 671 }
377 672
378out: 673 goto out;
379 return ret;
380}
381
382static void wl1271_filter_work(struct work_struct *work)
383{
384 struct wl1271 *wl =
385 container_of(work, struct wl1271, filter_work);
386 int ret;
387
388 mutex_lock(&wl->mutex);
389
390 if (wl->state == WL1271_STATE_OFF)
391 goto out;
392
393 ret = wl1271_ps_elp_wakeup(wl, false);
394 if (ret < 0)
395 goto out;
396
397 /* FIXME: replace the magic numbers with proper definitions */
398 ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
399 if (ret < 0)
400 goto out_sleep;
401 674
402out_sleep: 675out_power_off:
403 wl1271_ps_elp_sleep(wl); 676 wl1271_power_off(wl);
404 677
405out: 678out:
406 mutex_unlock(&wl->mutex); 679 return ret;
407} 680}
408 681
409int wl1271_plt_start(struct wl1271 *wl) 682int wl1271_plt_start(struct wl1271 *wl)
@@ -429,13 +702,26 @@ int wl1271_plt_start(struct wl1271 *wl)
429 702
430 ret = wl1271_boot(wl); 703 ret = wl1271_boot(wl);
431 if (ret < 0) 704 if (ret < 0)
432 goto out; 705 goto out_power_off;
433 706
434 wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver); 707 wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
435 708
436 ret = wl1271_plt_init(wl); 709 ret = wl1271_plt_init(wl);
437 if (ret < 0) 710 if (ret < 0)
438 goto out; 711 goto out_irq_disable;
712
713 /* Make sure power saving is disabled */
714 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
715 if (ret < 0)
716 goto out_irq_disable;
717
718 goto out;
719
720out_irq_disable:
721 wl1271_disable_interrupts(wl);
722
723out_power_off:
724 wl1271_power_off(wl);
439 725
440out: 726out:
441 mutex_unlock(&wl->mutex); 727 mutex_unlock(&wl->mutex);
@@ -462,6 +748,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
462 wl1271_power_off(wl); 748 wl1271_power_off(wl);
463 749
464 wl->state = WL1271_STATE_OFF; 750 wl->state = WL1271_STATE_OFF;
751 wl->rx_counter = 0;
465 752
466out: 753out:
467 mutex_unlock(&wl->mutex); 754 mutex_unlock(&wl->mutex);
@@ -481,7 +768,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
481 * before that, the tx_work will not be initialized! 768 * before that, the tx_work will not be initialized!
482 */ 769 */
483 770
484 schedule_work(&wl->tx_work); 771 ieee80211_queue_work(wl->hw, &wl->tx_work);
485 772
486 /* 773 /*
487 * The workqueue is slow to process the tx_queue and we need stop 774 * The workqueue is slow to process the tx_queue and we need stop
@@ -501,6 +788,93 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
501 return NETDEV_TX_OK; 788 return NETDEV_TX_OK;
502} 789}
503 790
791static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
792 void *arg)
793{
794 struct net_device *dev;
795 struct wireless_dev *wdev;
796 struct wiphy *wiphy;
797 struct ieee80211_hw *hw;
798 struct wl1271 *wl;
799 struct wl1271 *wl_temp;
800 struct in_device *idev;
801 struct in_ifaddr *ifa = arg;
802 int ret = 0;
803
804 /* FIXME: this ugly function should probably be implemented in the
805 * mac80211, and here should only be a simple callback handling actual
806 * setting of the filters. Now we need to dig up references to
807 * various structures to gain access to what we need.
808 * Also, because of this, there is no "initial" setting of the filter
809 * in "op_start", because we don't want to dig up struct net_device
810 * there - the filter will be set upon first change of the interface
811 * IP address. */
812
813 dev = ifa->ifa_dev->dev;
814
815 wdev = dev->ieee80211_ptr;
816 if (wdev == NULL)
817 return -ENODEV;
818
819 wiphy = wdev->wiphy;
820 if (wiphy == NULL)
821 return -ENODEV;
822
823 hw = wiphy_priv(wiphy);
824 if (hw == NULL)
825 return -ENODEV;
826
827 /* Check that the interface is one supported by this driver. */
828 wl_temp = hw->priv;
829 list_for_each_entry(wl, &wl_list, list) {
830 if (wl == wl_temp)
831 break;
832 }
833 if (wl == NULL)
834 return -ENODEV;
835
836 /* Get the interface IP address for the device. "ifa" will become
837 NULL if:
838 - there is no IPV4 protocol address configured
839 - there are multiple (virtual) IPV4 addresses configured
840 When "ifa" is NULL, filtering will be disabled.
841 */
842 ifa = NULL;
843 idev = dev->ip_ptr;
844 if (idev)
845 ifa = idev->ifa_list;
846
847 if (ifa && ifa->ifa_next)
848 ifa = NULL;
849
850 mutex_lock(&wl->mutex);
851
852 if (wl->state == WL1271_STATE_OFF)
853 goto out;
854
855 ret = wl1271_ps_elp_wakeup(wl, false);
856 if (ret < 0)
857 goto out;
858 if (ifa)
859 ret = wl1271_acx_arp_ip_filter(wl, true,
860 (u8 *)&ifa->ifa_address,
861 ACX_IPV4_VERSION);
862 else
863 ret = wl1271_acx_arp_ip_filter(wl, false, NULL,
864 ACX_IPV4_VERSION);
865 wl1271_ps_elp_sleep(wl);
866
867out:
868 mutex_unlock(&wl->mutex);
869
870 return ret;
871}
872
873static struct notifier_block wl1271_dev_notifier = {
874 .notifier_call = wl1271_dev_notify,
875};
876
877
504static int wl1271_op_start(struct ieee80211_hw *hw) 878static int wl1271_op_start(struct ieee80211_hw *hw)
505{ 879{
506 struct wl1271 *wl = hw->priv; 880 struct wl1271 *wl = hw->priv;
@@ -523,22 +897,32 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
523 897
524 ret = wl1271_boot(wl); 898 ret = wl1271_boot(wl);
525 if (ret < 0) 899 if (ret < 0)
526 goto out; 900 goto out_power_off;
527 901
528 ret = wl1271_hw_init(wl); 902 ret = wl1271_hw_init(wl);
529 if (ret < 0) 903 if (ret < 0)
530 goto out; 904 goto out_irq_disable;
531 905
532 wl->state = WL1271_STATE_ON; 906 wl->state = WL1271_STATE_ON;
533 907
534 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 908 wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
535 909
536out: 910 goto out;
537 if (ret < 0) 911
538 wl1271_power_off(wl); 912out_irq_disable:
913 wl1271_disable_interrupts(wl);
539 914
915out_power_off:
916 wl1271_power_off(wl);
917
918out:
540 mutex_unlock(&wl->mutex); 919 mutex_unlock(&wl->mutex);
541 920
921 if (!ret) {
922 list_add(&wl->list, &wl_list);
923 register_inetaddr_notifier(&wl1271_dev_notifier);
924 }
925
542 return ret; 926 return ret;
543} 927}
544 928
@@ -551,6 +935,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
551 935
552 wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); 936 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
553 937
938 unregister_inetaddr_notifier(&wl1271_dev_notifier);
939 list_del(&wl->list);
940
554 mutex_lock(&wl->mutex); 941 mutex_lock(&wl->mutex);
555 942
556 WARN_ON(wl->state != WL1271_STATE_ON); 943 WARN_ON(wl->state != WL1271_STATE_ON);
@@ -570,7 +957,6 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
570 957
571 cancel_work_sync(&wl->irq_work); 958 cancel_work_sync(&wl->irq_work);
572 cancel_work_sync(&wl->tx_work); 959 cancel_work_sync(&wl->tx_work);
573 cancel_work_sync(&wl->filter_work);
574 960
575 mutex_lock(&wl->mutex); 961 mutex_lock(&wl->mutex);
576 962
@@ -581,8 +967,8 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
581 memset(wl->bssid, 0, ETH_ALEN); 967 memset(wl->bssid, 0, ETH_ALEN);
582 memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1); 968 memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
583 wl->ssid_len = 0; 969 wl->ssid_len = 0;
584 wl->listen_int = 1;
585 wl->bss_type = MAX_BSS_TYPE; 970 wl->bss_type = MAX_BSS_TYPE;
971 wl->band = IEEE80211_BAND_2GHZ;
586 972
587 wl->rx_counter = 0; 973 wl->rx_counter = 0;
588 wl->elp = false; 974 wl->elp = false;
@@ -592,8 +978,13 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
592 wl->tx_blocks_available = 0; 978 wl->tx_blocks_available = 0;
593 wl->tx_results_count = 0; 979 wl->tx_results_count = 0;
594 wl->tx_packets_count = 0; 980 wl->tx_packets_count = 0;
981 wl->tx_security_last_seq = 0;
982 wl->tx_security_seq_16 = 0;
983 wl->tx_security_seq_32 = 0;
595 wl->time_offset = 0; 984 wl->time_offset = 0;
596 wl->session_counter = 0; 985 wl->session_counter = 0;
986 wl->joined = false;
987
597 for (i = 0; i < NUM_TX_QUEUES; i++) 988 for (i = 0; i < NUM_TX_QUEUES; i++)
598 wl->tx_blocks_freed[i] = 0; 989 wl->tx_blocks_freed[i] = 0;
599 990
@@ -611,6 +1002,12 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
611 conf->type, conf->mac_addr); 1002 conf->type, conf->mac_addr);
612 1003
613 mutex_lock(&wl->mutex); 1004 mutex_lock(&wl->mutex);
1005 if (wl->vif) {
1006 ret = -EBUSY;
1007 goto out;
1008 }
1009
1010 wl->vif = conf->vif;
614 1011
615 switch (conf->type) { 1012 switch (conf->type) {
616 case NL80211_IFTYPE_STATION: 1013 case NL80211_IFTYPE_STATION:
@@ -634,7 +1031,12 @@ out:
634static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 1031static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
635 struct ieee80211_if_init_conf *conf) 1032 struct ieee80211_if_init_conf *conf)
636{ 1033{
1034 struct wl1271 *wl = hw->priv;
1035
1036 mutex_lock(&wl->mutex);
637 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 1037 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
1038 wl->vif = NULL;
1039 mutex_unlock(&wl->mutex);
638} 1040}
639 1041
640#if 0 1042#if 0
@@ -657,7 +1059,15 @@ static int wl1271_op_config_interface(struct ieee80211_hw *hw,
657 if (ret < 0) 1059 if (ret < 0)
658 goto out; 1060 goto out;
659 1061
660 memcpy(wl->bssid, conf->bssid, ETH_ALEN); 1062 if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) {
1063 wl1271_debug(DEBUG_MAC80211, "bssid changed");
1064
1065 memcpy(wl->bssid, conf->bssid, ETH_ALEN);
1066
1067 ret = wl1271_cmd_join(wl);
1068 if (ret < 0)
1069 goto out_sleep;
1070 }
661 1071
662 ret = wl1271_cmd_build_null_data(wl); 1072 ret = wl1271_cmd_build_null_data(wl);
663 if (ret < 0) 1073 if (ret < 0)
@@ -667,13 +1077,6 @@ static int wl1271_op_config_interface(struct ieee80211_hw *hw,
667 if (wl->ssid_len) 1077 if (wl->ssid_len)
668 memcpy(wl->ssid, conf->ssid, wl->ssid_len); 1078 memcpy(wl->ssid, conf->ssid, wl->ssid_len);
669 1079
670 if (wl->bss_type != BSS_TYPE_IBSS) {
671 /* FIXME: replace the magic numbers with proper definitions */
672 ret = wl1271_cmd_join(wl, wl->bss_type, 5, 100, 1);
673 if (ret < 0)
674 goto out_sleep;
675 }
676
677 if (conf->changed & IEEE80211_IFCC_BEACON) { 1080 if (conf->changed & IEEE80211_IFCC_BEACON) {
678 beacon = ieee80211_beacon_get(hw, vif); 1081 beacon = ieee80211_beacon_get(hw, vif);
679 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, 1082 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
@@ -691,12 +1094,6 @@ static int wl1271_op_config_interface(struct ieee80211_hw *hw,
691 1094
692 if (ret < 0) 1095 if (ret < 0)
693 goto out_sleep; 1096 goto out_sleep;
694
695 /* FIXME: replace the magic numbers with proper definitions */
696 ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
697
698 if (ret < 0)
699 goto out_sleep;
700 } 1097 }
701 1098
702out_sleep: 1099out_sleep:
@@ -724,20 +1121,20 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
724 1121
725 mutex_lock(&wl->mutex); 1122 mutex_lock(&wl->mutex);
726 1123
1124 wl->band = conf->channel->band;
1125
727 ret = wl1271_ps_elp_wakeup(wl, false); 1126 ret = wl1271_ps_elp_wakeup(wl, false);
728 if (ret < 0) 1127 if (ret < 0)
729 goto out; 1128 goto out;
730 1129
731 if (channel != wl->channel) { 1130 if (channel != wl->channel) {
732 u8 old_channel = wl->channel; 1131 /*
1132 * We assume that the stack will configure the right channel
1133 * before associating, so we don't need to send a join
1134 * command here. We will join the right channel when the
1135 * BSSID changes
1136 */
733 wl->channel = channel; 1137 wl->channel = channel;
734
735 /* FIXME: use beacon interval provided by mac80211 */
736 ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
737 if (ret < 0) {
738 wl->channel = old_channel;
739 goto out_sleep;
740 }
741 } 1138 }
742 1139
743 ret = wl1271_cmd_build_null_data(wl); 1140 ret = wl1271_cmd_build_null_data(wl);
@@ -782,6 +1179,45 @@ out:
782 return ret; 1179 return ret;
783} 1180}
784 1181
1182struct wl1271_filter_params {
1183 bool enabled;
1184 int mc_list_length;
1185 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
1186};
1187
1188static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
1189 struct dev_addr_list *mc_list)
1190{
1191 struct wl1271_filter_params *fp;
1192 int i;
1193
1194 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
1195 if (!fp) {
1196 wl1271_error("Out of memory setting filters.");
1197 return 0;
1198 }
1199
1200 /* update multicast filtering parameters */
1201 fp->enabled = true;
1202 if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) {
1203 mc_count = 0;
1204 fp->enabled = false;
1205 }
1206
1207 fp->mc_list_length = 0;
1208 for (i = 0; i < mc_count; i++) {
1209 if (mc_list->da_addrlen == ETH_ALEN) {
1210 memcpy(fp->mc_list[fp->mc_list_length],
1211 mc_list->da_addr, ETH_ALEN);
1212 fp->mc_list_length++;
1213 } else
1214 wl1271_warning("Unknown mc address length.");
1215 mc_list = mc_list->next;
1216 }
1217
1218 return (u64)(unsigned long)fp;
1219}
1220
785#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ 1221#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
786 FIF_ALLMULTI | \ 1222 FIF_ALLMULTI | \
787 FIF_FCSFAIL | \ 1223 FIF_FCSFAIL | \
@@ -791,28 +1227,53 @@ out:
791 1227
792static void wl1271_op_configure_filter(struct ieee80211_hw *hw, 1228static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
793 unsigned int changed, 1229 unsigned int changed,
794 unsigned int *total,u64 multicast) 1230 unsigned int *total, u64 multicast)
795{ 1231{
1232 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
796 struct wl1271 *wl = hw->priv; 1233 struct wl1271 *wl = hw->priv;
1234 int ret;
797 1235
798 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter"); 1236 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
799 1237
1238 mutex_lock(&wl->mutex);
1239
1240 if (wl->state == WL1271_STATE_OFF)
1241 goto out;
1242
1243 ret = wl1271_ps_elp_wakeup(wl, false);
1244 if (ret < 0)
1245 goto out;
1246
800 *total &= WL1271_SUPPORTED_FILTERS; 1247 *total &= WL1271_SUPPORTED_FILTERS;
801 changed &= WL1271_SUPPORTED_FILTERS; 1248 changed &= WL1271_SUPPORTED_FILTERS;
802 1249
1250 if (*total & FIF_ALLMULTI)
1251 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
1252 else if (fp)
1253 ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
1254 fp->mc_list,
1255 fp->mc_list_length);
1256 if (ret < 0)
1257 goto out_sleep;
1258
1259 kfree(fp);
1260
1261 /* FIXME: We still need to set our filters properly */
1262
1263 /* determine, whether supported filter values have changed */
803 if (changed == 0) 1264 if (changed == 0)
804 return; 1265 goto out_sleep;
805 1266
806 /* FIXME: wl->rx_config and wl->rx_filter are not protected */ 1267 /* apply configured filters */
807 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1268 ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
808 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 1269 if (ret < 0)
1270 goto out_sleep;
809 1271
810 /* 1272out_sleep:
811 * FIXME: workqueues need to be properly cancelled on stop(), for 1273 wl1271_ps_elp_sleep(wl);
812 * now let's just disable changing the filter settings. They will 1274
813 * be updated any on config(). 1275out:
814 */ 1276 mutex_unlock(&wl->mutex);
815 /* schedule_work(&wl->filter_work); */
816} 1277}
817 1278
818static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1279static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -823,6 +1284,8 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
823 struct wl1271 *wl = hw->priv; 1284 struct wl1271 *wl = hw->priv;
824 const u8 *addr; 1285 const u8 *addr;
825 int ret; 1286 int ret;
1287 u32 tx_seq_32 = 0;
1288 u16 tx_seq_16 = 0;
826 u8 key_type; 1289 u8 key_type;
827 1290
828 static const u8 bcast_addr[ETH_ALEN] = 1291 static const u8 bcast_addr[ETH_ALEN] =
@@ -861,11 +1324,15 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
861 key_type = KEY_TKIP; 1324 key_type = KEY_TKIP;
862 1325
863 key_conf->hw_key_idx = key_conf->keyidx; 1326 key_conf->hw_key_idx = key_conf->keyidx;
1327 tx_seq_32 = wl->tx_security_seq_32;
1328 tx_seq_16 = wl->tx_security_seq_16;
864 break; 1329 break;
865 case ALG_CCMP: 1330 case ALG_CCMP:
866 key_type = KEY_AES; 1331 key_type = KEY_AES;
867 1332
868 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1333 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1334 tx_seq_32 = wl->tx_security_seq_32;
1335 tx_seq_16 = wl->tx_security_seq_16;
869 break; 1336 break;
870 default: 1337 default:
871 wl1271_error("Unknown key algo 0x%x", key_conf->alg); 1338 wl1271_error("Unknown key algo 0x%x", key_conf->alg);
@@ -879,7 +1346,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
879 ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE, 1346 ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
880 key_conf->keyidx, key_type, 1347 key_conf->keyidx, key_type,
881 key_conf->keylen, key_conf->key, 1348 key_conf->keylen, key_conf->key,
882 addr); 1349 addr, tx_seq_32, tx_seq_16);
883 if (ret < 0) { 1350 if (ret < 0) {
884 wl1271_error("Could not add or replace key"); 1351 wl1271_error("Could not add or replace key");
885 goto out_sleep; 1352 goto out_sleep;
@@ -890,7 +1357,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
890 ret = wl1271_cmd_set_key(wl, KEY_REMOVE, 1357 ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
891 key_conf->keyidx, key_type, 1358 key_conf->keyidx, key_type,
892 key_conf->keylen, key_conf->key, 1359 key_conf->keylen, key_conf->key,
893 addr); 1360 addr, 0, 0);
894 if (ret < 0) { 1361 if (ret < 0) {
895 wl1271_error("Could not remove key"); 1362 wl1271_error("Could not remove key");
896 goto out_sleep; 1363 goto out_sleep;
@@ -921,13 +1388,13 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
921 struct wl1271 *wl = hw->priv; 1388 struct wl1271 *wl = hw->priv;
922 int ret; 1389 int ret;
923 u8 *ssid = NULL; 1390 u8 *ssid = NULL;
924 size_t ssid_len = 0; 1391 size_t len = 0;
925 1392
926 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan"); 1393 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
927 1394
928 if (req->n_ssids) { 1395 if (req->n_ssids) {
929 ssid = req->ssids[0].ssid; 1396 ssid = req->ssids[0].ssid;
930 ssid_len = req->ssids[0].ssid_len; 1397 len = req->ssids[0].ssid_len;
931 } 1398 }
932 1399
933 mutex_lock(&wl->mutex); 1400 mutex_lock(&wl->mutex);
@@ -936,7 +1403,12 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
936 if (ret < 0) 1403 if (ret < 0)
937 goto out; 1404 goto out;
938 1405
939 ret = wl1271_cmd_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3); 1406 if (wl1271_11a_enabled())
1407 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
1408 WL1271_SCAN_BAND_DUAL, 3);
1409 else
1410 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
1411 WL1271_SCAN_BAND_2_4_GHZ, 3);
940 1412
941 wl1271_ps_elp_sleep(wl); 1413 wl1271_ps_elp_sleep(wl);
942 1414
@@ -969,6 +1441,22 @@ out:
969 return ret; 1441 return ret;
970} 1442}
971 1443
1444static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
1445{
1446 struct ieee80211_supported_band *band;
1447 u32 enabled_rates = 0;
1448 int bit;
1449
1450 band = wl->hw->wiphy->bands[wl->band];
1451 for (bit = 0; bit < band->n_bitrates; bit++) {
1452 if (basic_rate_set & 0x1)
1453 enabled_rates |= band->bitrates[bit].hw_value;
1454 basic_rate_set >>= 1;
1455 }
1456
1457 return enabled_rates;
1458}
1459
972static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 1460static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
973 struct ieee80211_vif *vif, 1461 struct ieee80211_vif *vif,
974 struct ieee80211_bss_conf *bss_conf, 1462 struct ieee80211_bss_conf *bss_conf,
@@ -990,6 +1478,12 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
990 if (bss_conf->assoc) { 1478 if (bss_conf->assoc) {
991 wl->aid = bss_conf->aid; 1479 wl->aid = bss_conf->aid;
992 1480
1481 /*
1482 * with wl1271, we don't need to update the
1483 * beacon_int and dtim_period, because the firmware
1484 * updates it by itself when the first beacon is
1485 * received after a join.
1486 */
993 ret = wl1271_cmd_build_ps_poll(wl, wl->aid); 1487 ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
994 if (ret < 0) 1488 if (ret < 0)
995 goto out_sleep; 1489 goto out_sleep;
@@ -1005,8 +1499,14 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1005 if (ret < 0) 1499 if (ret < 0)
1006 goto out_sleep; 1500 goto out_sleep;
1007 } 1501 }
1502 } else {
1503 /* use defaults when not associated */
1504 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
1505 wl->aid = 0;
1008 } 1506 }
1507
1009 } 1508 }
1509
1010 if (changed & BSS_CHANGED_ERP_SLOT) { 1510 if (changed & BSS_CHANGED_ERP_SLOT) {
1011 if (bss_conf->use_short_slot) 1511 if (bss_conf->use_short_slot)
1012 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); 1512 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
@@ -1036,6 +1536,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1036 } 1536 }
1037 } 1537 }
1038 1538
1539 if (changed & BSS_CHANGED_BASIC_RATES) {
1540 wl->basic_rate_set = wl1271_enabled_rates_get(
1541 wl, bss_conf->basic_rates);
1542
1543 ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
1544 if (ret < 0) {
1545 wl1271_warning("Set rate policies failed %d", ret);
1546 goto out_sleep;
1547 }
1548 }
1549
1039out_sleep: 1550out_sleep:
1040 wl1271_ps_elp_sleep(wl); 1551 wl1271_ps_elp_sleep(wl);
1041 1552
@@ -1047,44 +1558,44 @@ out:
1047/* can't be const, mac80211 writes to this */ 1558/* can't be const, mac80211 writes to this */
1048static struct ieee80211_rate wl1271_rates[] = { 1559static struct ieee80211_rate wl1271_rates[] = {
1049 { .bitrate = 10, 1560 { .bitrate = 10,
1050 .hw_value = 0x1, 1561 .hw_value = CONF_HW_BIT_RATE_1MBPS,
1051 .hw_value_short = 0x1, }, 1562 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
1052 { .bitrate = 20, 1563 { .bitrate = 20,
1053 .hw_value = 0x2, 1564 .hw_value = CONF_HW_BIT_RATE_2MBPS,
1054 .hw_value_short = 0x2, 1565 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
1055 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 1566 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
1056 { .bitrate = 55, 1567 { .bitrate = 55,
1057 .hw_value = 0x4, 1568 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
1058 .hw_value_short = 0x4, 1569 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
1059 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 1570 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
1060 { .bitrate = 110, 1571 { .bitrate = 110,
1061 .hw_value = 0x20, 1572 .hw_value = CONF_HW_BIT_RATE_11MBPS,
1062 .hw_value_short = 0x20, 1573 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
1063 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 1574 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
1064 { .bitrate = 60, 1575 { .bitrate = 60,
1065 .hw_value = 0x8, 1576 .hw_value = CONF_HW_BIT_RATE_6MBPS,
1066 .hw_value_short = 0x8, }, 1577 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
1067 { .bitrate = 90, 1578 { .bitrate = 90,
1068 .hw_value = 0x10, 1579 .hw_value = CONF_HW_BIT_RATE_9MBPS,
1069 .hw_value_short = 0x10, }, 1580 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
1070 { .bitrate = 120, 1581 { .bitrate = 120,
1071 .hw_value = 0x40, 1582 .hw_value = CONF_HW_BIT_RATE_12MBPS,
1072 .hw_value_short = 0x40, }, 1583 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
1073 { .bitrate = 180, 1584 { .bitrate = 180,
1074 .hw_value = 0x80, 1585 .hw_value = CONF_HW_BIT_RATE_18MBPS,
1075 .hw_value_short = 0x80, }, 1586 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
1076 { .bitrate = 240, 1587 { .bitrate = 240,
1077 .hw_value = 0x200, 1588 .hw_value = CONF_HW_BIT_RATE_24MBPS,
1078 .hw_value_short = 0x200, }, 1589 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
1079 { .bitrate = 360, 1590 { .bitrate = 360,
1080 .hw_value = 0x400, 1591 .hw_value = CONF_HW_BIT_RATE_36MBPS,
1081 .hw_value_short = 0x400, }, 1592 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
1082 { .bitrate = 480, 1593 { .bitrate = 480,
1083 .hw_value = 0x800, 1594 .hw_value = CONF_HW_BIT_RATE_48MBPS,
1084 .hw_value_short = 0x800, }, 1595 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
1085 { .bitrate = 540, 1596 { .bitrate = 540,
1086 .hw_value = 0x1000, 1597 .hw_value = CONF_HW_BIT_RATE_54MBPS,
1087 .hw_value_short = 0x1000, }, 1598 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
1088}; 1599};
1089 1600
1090/* can't be const, mac80211 writes to this */ 1601/* can't be const, mac80211 writes to this */
@@ -1112,6 +1623,88 @@ static struct ieee80211_supported_band wl1271_band_2ghz = {
1112 .n_bitrates = ARRAY_SIZE(wl1271_rates), 1623 .n_bitrates = ARRAY_SIZE(wl1271_rates),
1113}; 1624};
1114 1625
1626/* 5 GHz data rates for WL1273 */
1627static struct ieee80211_rate wl1271_rates_5ghz[] = {
1628 { .bitrate = 60,
1629 .hw_value = CONF_HW_BIT_RATE_6MBPS,
1630 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
1631 { .bitrate = 90,
1632 .hw_value = CONF_HW_BIT_RATE_9MBPS,
1633 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
1634 { .bitrate = 120,
1635 .hw_value = CONF_HW_BIT_RATE_12MBPS,
1636 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
1637 { .bitrate = 180,
1638 .hw_value = CONF_HW_BIT_RATE_18MBPS,
1639 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
1640 { .bitrate = 240,
1641 .hw_value = CONF_HW_BIT_RATE_24MBPS,
1642 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
1643 { .bitrate = 360,
1644 .hw_value = CONF_HW_BIT_RATE_36MBPS,
1645 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
1646 { .bitrate = 480,
1647 .hw_value = CONF_HW_BIT_RATE_48MBPS,
1648 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
1649 { .bitrate = 540,
1650 .hw_value = CONF_HW_BIT_RATE_54MBPS,
1651 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
1652};
1653
1654/* 5 GHz band channels for WL1273 */
1655static struct ieee80211_channel wl1271_channels_5ghz[] = {
1656 { .hw_value = 183, .center_freq = 4915},
1657 { .hw_value = 184, .center_freq = 4920},
1658 { .hw_value = 185, .center_freq = 4925},
1659 { .hw_value = 187, .center_freq = 4935},
1660 { .hw_value = 188, .center_freq = 4940},
1661 { .hw_value = 189, .center_freq = 4945},
1662 { .hw_value = 192, .center_freq = 4960},
1663 { .hw_value = 196, .center_freq = 4980},
1664 { .hw_value = 7, .center_freq = 5035},
1665 { .hw_value = 8, .center_freq = 5040},
1666 { .hw_value = 9, .center_freq = 5045},
1667 { .hw_value = 11, .center_freq = 5055},
1668 { .hw_value = 12, .center_freq = 5060},
1669 { .hw_value = 16, .center_freq = 5080},
1670 { .hw_value = 34, .center_freq = 5170},
1671 { .hw_value = 36, .center_freq = 5180},
1672 { .hw_value = 38, .center_freq = 5190},
1673 { .hw_value = 40, .center_freq = 5200},
1674 { .hw_value = 42, .center_freq = 5210},
1675 { .hw_value = 44, .center_freq = 5220},
1676 { .hw_value = 46, .center_freq = 5230},
1677 { .hw_value = 48, .center_freq = 5240},
1678 { .hw_value = 52, .center_freq = 5260},
1679 { .hw_value = 56, .center_freq = 5280},
1680 { .hw_value = 60, .center_freq = 5300},
1681 { .hw_value = 64, .center_freq = 5320},
1682 { .hw_value = 100, .center_freq = 5500},
1683 { .hw_value = 104, .center_freq = 5520},
1684 { .hw_value = 108, .center_freq = 5540},
1685 { .hw_value = 112, .center_freq = 5560},
1686 { .hw_value = 116, .center_freq = 5580},
1687 { .hw_value = 120, .center_freq = 5600},
1688 { .hw_value = 124, .center_freq = 5620},
1689 { .hw_value = 128, .center_freq = 5640},
1690 { .hw_value = 132, .center_freq = 5660},
1691 { .hw_value = 136, .center_freq = 5680},
1692 { .hw_value = 140, .center_freq = 5700},
1693 { .hw_value = 149, .center_freq = 5745},
1694 { .hw_value = 153, .center_freq = 5765},
1695 { .hw_value = 157, .center_freq = 5785},
1696 { .hw_value = 161, .center_freq = 5805},
1697 { .hw_value = 165, .center_freq = 5825},
1698};
1699
1700
1701static struct ieee80211_supported_band wl1271_band_5ghz = {
1702 .channels = wl1271_channels_5ghz,
1703 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
1704 .bitrates = wl1271_rates_5ghz,
1705 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
1706};
1707
1115static const struct ieee80211_ops wl1271_ops = { 1708static const struct ieee80211_ops wl1271_ops = {
1116 .start = wl1271_op_start, 1709 .start = wl1271_op_start,
1117 .stop = wl1271_op_stop, 1710 .stop = wl1271_op_stop,
@@ -1119,6 +1712,7 @@ static const struct ieee80211_ops wl1271_ops = {
1119 .remove_interface = wl1271_op_remove_interface, 1712 .remove_interface = wl1271_op_remove_interface,
1120 .config = wl1271_op_config, 1713 .config = wl1271_op_config,
1121/* .config_interface = wl1271_op_config_interface, */ 1714/* .config_interface = wl1271_op_config_interface, */
1715 .prepare_multicast = wl1271_op_prepare_multicast,
1122 .configure_filter = wl1271_op_configure_filter, 1716 .configure_filter = wl1271_op_configure_filter,
1123 .tx = wl1271_op_tx, 1717 .tx = wl1271_op_tx,
1124 .set_key = wl1271_op_set_key, 1718 .set_key = wl1271_op_set_key,
@@ -1151,24 +1745,25 @@ static int wl1271_register_hw(struct wl1271 *wl)
1151 1745
1152static int wl1271_init_ieee80211(struct wl1271 *wl) 1746static int wl1271_init_ieee80211(struct wl1271 *wl)
1153{ 1747{
1154 /* 1748 /* The tx descriptor buffer and the TKIP space. */
1155 * The tx descriptor buffer and the TKIP space. 1749 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
1156 * 1750 sizeof(struct wl1271_tx_hw_descr);
1157 * FIXME: add correct 1271 descriptor size
1158 */
1159 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE;
1160 1751
1161 /* unit us */ 1752 /* unit us */
1162 /* FIXME: find a proper value */ 1753 /* FIXME: find a proper value */
1163 wl->hw->channel_change_time = 10000; 1754 wl->hw->channel_change_time = 10000;
1164 1755
1165 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1756 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1166 IEEE80211_HW_NOISE_DBM; 1757 IEEE80211_HW_NOISE_DBM |
1758 IEEE80211_HW_BEACON_FILTER;
1167 1759
1168 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1760 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1169 wl->hw->wiphy->max_scan_ssids = 1; 1761 wl->hw->wiphy->max_scan_ssids = 1;
1170 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; 1762 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
1171 1763
1764 if (wl1271_11a_enabled())
1765 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
1766
1172 SET_IEEE80211_DEV(wl->hw, &wl->spi->dev); 1767 SET_IEEE80211_DEV(wl->hw, &wl->spi->dev);
1173 1768
1174 return 0; 1769 return 0;
@@ -1213,17 +1808,18 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1213 wl = hw->priv; 1808 wl = hw->priv;
1214 memset(wl, 0, sizeof(*wl)); 1809 memset(wl, 0, sizeof(*wl));
1215 1810
1811 INIT_LIST_HEAD(&wl->list);
1812
1216 wl->hw = hw; 1813 wl->hw = hw;
1217 dev_set_drvdata(&spi->dev, wl); 1814 dev_set_drvdata(&spi->dev, wl);
1218 wl->spi = spi; 1815 wl->spi = spi;
1219 1816
1220 skb_queue_head_init(&wl->tx_queue); 1817 skb_queue_head_init(&wl->tx_queue);
1221 1818
1222 INIT_WORK(&wl->filter_work, wl1271_filter_work); 1819 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
1223 wl->channel = WL1271_DEFAULT_CHANNEL; 1820 wl->channel = WL1271_DEFAULT_CHANNEL;
1224 wl->scanning = false; 1821 wl->scanning = false;
1225 wl->default_key = 0; 1822 wl->default_key = 0;
1226 wl->listen_int = 1;
1227 wl->rx_counter = 0; 1823 wl->rx_counter = 0;
1228 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1824 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1229 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 1825 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
@@ -1232,10 +1828,12 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1232 wl->psm_requested = false; 1828 wl->psm_requested = false;
1233 wl->tx_queue_stopped = false; 1829 wl->tx_queue_stopped = false;
1234 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1830 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1831 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
1832 wl->band = IEEE80211_BAND_2GHZ;
1833 wl->vif = NULL;
1834 wl->joined = false;
1235 1835
1236 /* We use the default power on sleep time until we know which chip 1836 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
1237 * we're using */
1238 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
1239 wl->tx_frames[i] = NULL; 1837 wl->tx_frames[i] = NULL;
1240 1838
1241 spin_lock_init(&wl->wl_lock); 1839 spin_lock_init(&wl->wl_lock);
@@ -1250,13 +1848,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1250 wl->state = WL1271_STATE_OFF; 1848 wl->state = WL1271_STATE_OFF;
1251 mutex_init(&wl->mutex); 1849 mutex_init(&wl->mutex);
1252 1850
1253 wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL);
1254 if (!wl->rx_descriptor) {
1255 wl1271_error("could not allocate memory for rx descriptor");
1256 ret = -ENOMEM;
1257 goto out_free;
1258 }
1259
1260 /* This is the only SPI value that we need to set here, the rest 1851 /* This is the only SPI value that we need to set here, the rest
1261 * comes from the board-peripherals file */ 1852 * comes from the board-peripherals file */
1262 spi->bits_per_word = 32; 1853 spi->bits_per_word = 32;
@@ -1298,6 +1889,9 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1298 } 1889 }
1299 dev_set_drvdata(&wl1271_device.dev, wl); 1890 dev_set_drvdata(&wl1271_device.dev, wl);
1300 1891
1892 /* Apply default driver configuration. */
1893 wl1271_conf_init(wl);
1894
1301 ret = wl1271_init_ieee80211(wl); 1895 ret = wl1271_init_ieee80211(wl);
1302 if (ret) 1896 if (ret)
1303 goto out_platform; 1897 goto out_platform;
@@ -1319,9 +1913,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1319 free_irq(wl->irq, wl); 1913 free_irq(wl->irq, wl);
1320 1914
1321 out_free: 1915 out_free:
1322 kfree(wl->rx_descriptor);
1323 wl->rx_descriptor = NULL;
1324
1325 ieee80211_free_hw(hw); 1916 ieee80211_free_hw(hw);
1326 1917
1327 return ret; 1918 return ret;
@@ -1337,14 +1928,11 @@ static int __devexit wl1271_remove(struct spi_device *spi)
1337 platform_device_unregister(&wl1271_device); 1928 platform_device_unregister(&wl1271_device);
1338 free_irq(wl->irq, wl); 1929 free_irq(wl->irq, wl);
1339 kfree(wl->target_mem_map); 1930 kfree(wl->target_mem_map);
1340 kfree(wl->fw); 1931 vfree(wl->fw);
1341 wl->fw = NULL; 1932 wl->fw = NULL;
1342 kfree(wl->nvs); 1933 kfree(wl->nvs);
1343 wl->nvs = NULL; 1934 wl->nvs = NULL;
1344 1935
1345 kfree(wl->rx_descriptor);
1346 wl->rx_descriptor = NULL;
1347
1348 kfree(wl->fw_status); 1936 kfree(wl->fw_status);
1349 kfree(wl->tx_res_if); 1937 kfree(wl->tx_res_if);
1350 1938
@@ -1391,3 +1979,4 @@ module_exit(wl1271_exit);
1391 1979
1392MODULE_LICENSE("GPL"); 1980MODULE_LICENSE("GPL");
1393MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 1981MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
1982MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 1dc74b0c7736..507cd91d7eed 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -27,25 +27,38 @@
27 27
28#define WL1271_WAKEUP_TIMEOUT 500 28#define WL1271_WAKEUP_TIMEOUT 500
29 29
30void wl1271_elp_work(struct work_struct *work)
31{
32 struct delayed_work *dwork;
33 struct wl1271 *wl;
34
35 dwork = container_of(work, struct delayed_work, work);
36 wl = container_of(dwork, struct wl1271, elp_work);
37
38 wl1271_debug(DEBUG_PSM, "elp work");
39
40 mutex_lock(&wl->mutex);
41
42 if (wl->elp || !wl->psm)
43 goto out;
44
45 wl1271_debug(DEBUG_PSM, "chip to elp");
46 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
47 wl->elp = true;
48
49out:
50 mutex_unlock(&wl->mutex);
51}
52
53#define ELP_ENTRY_DELAY 5
54
30/* Routines to toggle sleep mode while in ELP */ 55/* Routines to toggle sleep mode while in ELP */
31void wl1271_ps_elp_sleep(struct wl1271 *wl) 56void wl1271_ps_elp_sleep(struct wl1271 *wl)
32{ 57{
33 /* 58 if (wl->psm) {
34 * FIXME: due to a problem in the firmware (causing a firmware 59 cancel_delayed_work(&wl->elp_work);
35 * crash), ELP entry is prevented below. Remove the "true" to 60 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
36 * re-enable ELP entry. 61 msecs_to_jiffies(ELP_ENTRY_DELAY));
37 */
38 if (true || wl->elp || !wl->psm)
39 return;
40
41 /*
42 * Go to ELP unless there is work already pending - pending work
43 * will immediately wakeup the chipset anyway.
44 */
45 if (!work_pending(&wl->irq_work) && !work_pending(&wl->tx_work)) {
46 wl1271_debug(DEBUG_PSM, "chip to elp");
47 wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
48 wl->elp = true;
49 } 62 }
50} 63}
51 64
@@ -73,7 +86,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
73 wl->elp_compl = &compl; 86 wl->elp_compl = &compl;
74 spin_unlock_irqrestore(&wl->wl_lock, flags); 87 spin_unlock_irqrestore(&wl->wl_lock, flags);
75 88
76 wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); 89 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
77 90
78 if (!pending) { 91 if (!pending) {
79 ret = wait_for_completion_timeout( 92 ret = wait_for_completion_timeout(
@@ -111,6 +124,17 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
111 switch (mode) { 124 switch (mode) {
112 case STATION_POWER_SAVE_MODE: 125 case STATION_POWER_SAVE_MODE:
113 wl1271_debug(DEBUG_PSM, "entering psm"); 126 wl1271_debug(DEBUG_PSM, "entering psm");
127
128 /* enable beacon filtering */
129 ret = wl1271_acx_beacon_filter_opt(wl, true);
130 if (ret < 0)
131 return ret;
132
133 /* enable beacon early termination */
134 ret = wl1271_acx_bet_enable(wl, true);
135 if (ret < 0)
136 return ret;
137
114 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); 138 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
115 if (ret < 0) 139 if (ret < 0)
116 return ret; 140 return ret;
@@ -128,6 +152,16 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
128 if (ret < 0) 152 if (ret < 0)
129 return ret; 153 return ret;
130 154
155 /* disable beacon early termination */
156 ret = wl1271_acx_bet_enable(wl, false);
157 if (ret < 0)
158 return ret;
159
160 /* disable beacon filtering */
161 ret = wl1271_acx_beacon_filter_opt(wl, false);
162 if (ret < 0)
163 return ret;
164
131 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE); 165 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
132 if (ret < 0) 166 if (ret < 0)
133 return ret; 167 return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/wl1271_ps.h
index de2bd3c7dc9c..779653d0ae85 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.h
@@ -30,6 +30,6 @@
30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode); 30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode);
31void wl1271_ps_elp_sleep(struct wl1271 *wl); 31void wl1271_ps_elp_sleep(struct wl1271 *wl);
32int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); 32int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
33 33void wl1271_elp_work(struct work_struct *work);
34 34
35#endif /* __WL1271_PS_H__ */ 35#endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index f8ed4a4fc691..1f237389d1c7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -34,7 +34,7 @@
34#define REGISTERS_WORK_SIZE 0x0000b000 34#define REGISTERS_WORK_SIZE 0x0000b000
35 35
36#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC 36#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC
37#define STATUS_MEM_ADDRESS 0x40400 37#define FW_STATUS_ADDR (0x14FC0 + 0xA000)
38 38
39/* ELP register commands */ 39/* ELP register commands */
40#define ELPCTRL_WAKE_UP 0x1 40#define ELPCTRL_WAKE_UP 0x1
@@ -213,7 +213,6 @@
213==============================================*/ 213==============================================*/
214#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0) 214#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0)
215 215
216#define RX_DRIVER_DUMMY_WRITE_ADDRESS (REGISTERS_BASE + 0x0534)
217#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538) 216#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538)
218 217
219/* Device Configuration registers*/ 218/* Device Configuration registers*/
@@ -614,50 +613,6 @@ enum {
614 MAX_RADIO_BANDS = 0xFF 613 MAX_RADIO_BANDS = 0xFF
615}; 614};
616 615
617enum {
618 NO_RATE = 0,
619 RATE_1MBPS = 0x0A,
620 RATE_2MBPS = 0x14,
621 RATE_5_5MBPS = 0x37,
622 RATE_6MBPS = 0x0B,
623 RATE_9MBPS = 0x0F,
624 RATE_11MBPS = 0x6E,
625 RATE_12MBPS = 0x0A,
626 RATE_18MBPS = 0x0E,
627 RATE_22MBPS = 0xDC,
628 RATE_24MBPS = 0x09,
629 RATE_36MBPS = 0x0D,
630 RATE_48MBPS = 0x08,
631 RATE_54MBPS = 0x0C
632};
633
634enum {
635 RATE_INDEX_1MBPS = 0,
636 RATE_INDEX_2MBPS = 1,
637 RATE_INDEX_5_5MBPS = 2,
638 RATE_INDEX_6MBPS = 3,
639 RATE_INDEX_9MBPS = 4,
640 RATE_INDEX_11MBPS = 5,
641 RATE_INDEX_12MBPS = 6,
642 RATE_INDEX_18MBPS = 7,
643 RATE_INDEX_22MBPS = 8,
644 RATE_INDEX_24MBPS = 9,
645 RATE_INDEX_36MBPS = 10,
646 RATE_INDEX_48MBPS = 11,
647 RATE_INDEX_54MBPS = 12,
648 RATE_INDEX_MAX = RATE_INDEX_54MBPS,
649 MAX_RATE_INDEX,
650 INVALID_RATE_INDEX = MAX_RATE_INDEX,
651 RATE_INDEX_ENUM_MAX_SIZE = 0x7FFFFFFF
652};
653
654enum {
655 RATE_MASK_1MBPS = 0x1,
656 RATE_MASK_2MBPS = 0x2,
657 RATE_MASK_5_5MBPS = 0x4,
658 RATE_MASK_11MBPS = 0x20,
659};
660
661#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */ 616#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */
662#define OFDM_RATE_BIT BIT(6) 617#define OFDM_RATE_BIT BIT(6)
663#define PBCC_RATE_BIT BIT(7) 618#define PBCC_RATE_BIT BIT(7)
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index ad8b6904c5eb..37d81ab6acc0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -30,14 +30,15 @@
30static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, 30static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
31 u32 drv_rx_counter) 31 u32 drv_rx_counter)
32{ 32{
33 return status->rx_pkt_descs[drv_rx_counter] & RX_MEM_BLOCK_MASK; 33 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
34 RX_MEM_BLOCK_MASK;
34} 35}
35 36
36static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status, 37static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
37 u32 drv_rx_counter) 38 u32 drv_rx_counter)
38{ 39{
39 return (status->rx_pkt_descs[drv_rx_counter] & RX_BUF_SIZE_MASK) >> 40 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
40 RX_BUF_SIZE_SHIFT_DIV; 41 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
41} 42}
42 43
43/* The values of this table must match the wl1271_rates[] array */ 44/* The values of this table must match the wl1271_rates[] array */
@@ -70,6 +71,36 @@ static u8 wl1271_rx_rate_to_idx[] = {
70 0 /* WL1271_RATE_1 */ 71 0 /* WL1271_RATE_1 */
71}; 72};
72 73
74/* The values of this table must match the wl1271_rates[] array */
75static u8 wl1271_5_ghz_rx_rate_to_idx[] = {
76 /* MCS rates are used only with 11n */
77 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
78 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
79 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
80 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
81 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
82 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
83 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
84 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
85
86 7, /* WL1271_RATE_54 */
87 6, /* WL1271_RATE_48 */
88 5, /* WL1271_RATE_36 */
89 4, /* WL1271_RATE_24 */
90
91 /* TI-specific rate */
92 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
93
94 3, /* WL1271_RATE_18 */
95 2, /* WL1271_RATE_12 */
96 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11 */
97 1, /* WL1271_RATE_9 */
98 0, /* WL1271_RATE_6 */
99 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5 */
100 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2 */
101 WL1271_RX_RATE_UNSUPPORTED /* WL1271_RATE_1 */
102};
103
73static void wl1271_rx_status(struct wl1271 *wl, 104static void wl1271_rx_status(struct wl1271 *wl,
74 struct wl1271_rx_descriptor *desc, 105 struct wl1271_rx_descriptor *desc,
75 struct ieee80211_rx_status *status, 106 struct ieee80211_rx_status *status,
@@ -77,12 +108,21 @@ static void wl1271_rx_status(struct wl1271 *wl,
77{ 108{
78 memset(status, 0, sizeof(struct ieee80211_rx_status)); 109 memset(status, 0, sizeof(struct ieee80211_rx_status));
79 110
80 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG) 111 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
112 WL1271_RX_DESC_BAND_BG) {
81 status->band = IEEE80211_BAND_2GHZ; 113 status->band = IEEE80211_BAND_2GHZ;
82 else 114 status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
115 } else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
116 WL1271_RX_DESC_BAND_A) {
117 status->band = IEEE80211_BAND_5GHZ;
118 status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate];
119 } else
83 wl1271_warning("unsupported band 0x%x", 120 wl1271_warning("unsupported band 0x%x",
84 desc->flags & WL1271_RX_DESC_BAND_MASK); 121 desc->flags & WL1271_RX_DESC_BAND_MASK);
85 122
123 if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED))
124 wl1271_warning("unsupported rate");
125
86 /* 126 /*
87 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the 127 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the
88 * timestamp from the beacon (acx_tsf_info). In BSS mode (infra) we 128 * timestamp from the beacon (acx_tsf_info). In BSS mode (infra) we
@@ -91,12 +131,6 @@ static void wl1271_rx_status(struct wl1271 *wl,
91 */ 131 */
92 status->signal = desc->rssi; 132 status->signal = desc->rssi;
93 133
94 /* FIXME: Should this be optimized? */
95 status->qual = (desc->rssi - WL1271_RX_MIN_RSSI) * 100 /
96 (WL1271_RX_MAX_RSSI - WL1271_RX_MIN_RSSI);
97 status->qual = min(status->qual, 100);
98 status->qual = max(status->qual, 0);
99
100 /* 134 /*
101 * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we 135 * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
102 * need to divide by two for now, but TI has been discussing about 136 * need to divide by two for now, but TI has been discussing about
@@ -109,17 +143,11 @@ static void wl1271_rx_status(struct wl1271 *wl,
109 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { 143 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
110 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; 144 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
111 145
112 if (likely(!(desc->flags & WL1271_RX_DESC_DECRYPT_FAIL))) 146 if (likely(!(desc->status & WL1271_RX_DESC_DECRYPT_FAIL)))
113 status->flag |= RX_FLAG_DECRYPTED; 147 status->flag |= RX_FLAG_DECRYPTED;
114 148 if (unlikely(desc->status & WL1271_RX_DESC_MIC_FAIL))
115 if (unlikely(desc->flags & WL1271_RX_DESC_MIC_FAIL))
116 status->flag |= RX_FLAG_MMIC_ERROR; 149 status->flag |= RX_FLAG_MMIC_ERROR;
117 } 150 }
118
119 status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
120
121 if (status->rate_idx == WL1271_RX_RATE_UNSUPPORTED)
122 wl1271_warning("unsupported rate");
123} 151}
124 152
125static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length) 153static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
@@ -138,7 +166,7 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
138 } 166 }
139 167
140 buf = skb_put(skb, length); 168 buf = skb_put(skb, length);
141 wl1271_spi_reg_read(wl, WL1271_SLV_MEM_DATA, buf, length, true); 169 wl1271_spi_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
142 170
143 /* the data read starts with the descriptor */ 171 /* the data read starts with the descriptor */
144 desc = (struct wl1271_rx_descriptor *) buf; 172 desc = (struct wl1271_rx_descriptor *) buf;
@@ -156,7 +184,7 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
156 beacon ? "beacon" : ""); 184 beacon ? "beacon" : "");
157 185
158 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 186 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
159 ieee80211_rx(wl->hw, skb); 187 ieee80211_rx_ni(wl->hw, skb);
160} 188}
161 189
162void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) 190void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
@@ -176,15 +204,15 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
176 break; 204 break;
177 } 205 }
178 206
179 wl->rx_mem_pool_addr.addr = 207 wl->rx_mem_pool_addr.addr = (mem_block << 8) +
180 (mem_block << 8) + wl_mem_map->packet_memory_pool_start; 208 le32_to_cpu(wl_mem_map->packet_memory_pool_start);
181 wl->rx_mem_pool_addr.addr_extra = 209 wl->rx_mem_pool_addr.addr_extra =
182 wl->rx_mem_pool_addr.addr + 4; 210 wl->rx_mem_pool_addr.addr + 4;
183 211
184 /* Choose the block we want to read */ 212 /* Choose the block we want to read */
185 wl1271_spi_reg_write(wl, WL1271_SLV_REG_DATA, 213 wl1271_spi_write(wl, WL1271_SLV_REG_DATA,
186 &wl->rx_mem_pool_addr, 214 &wl->rx_mem_pool_addr,
187 sizeof(wl->rx_mem_pool_addr), false); 215 sizeof(wl->rx_mem_pool_addr), false);
188 216
189 wl1271_rx_handle_data(wl, buf_size); 217 wl1271_rx_handle_data(wl, buf_size);
190 218
@@ -192,9 +220,5 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
192 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 220 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
193 } 221 }
194 222
195 wl1271_reg_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); 223 wl1271_spi_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
196
197 /* This is a workaround for some problems in the chip */
198 wl1271_reg_write32(wl, RX_DRIVER_DUMMY_WRITE_ADDRESS, 0x1);
199
200} 224}
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index d1ca60e43a25..1ae6d1783ed4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -102,14 +102,14 @@
102#define RX_BUF_SIZE_SHIFT_DIV 6 102#define RX_BUF_SIZE_SHIFT_DIV 6
103 103
104struct wl1271_rx_descriptor { 104struct wl1271_rx_descriptor {
105 u16 length; 105 __le16 length;
106 u8 status; 106 u8 status;
107 u8 flags; 107 u8 flags;
108 u8 rate; 108 u8 rate;
109 u8 channel; 109 u8 channel;
110 s8 rssi; 110 s8 rssi;
111 u8 snr; 111 u8 snr;
112 u32 timestamp; 112 __le32 timestamp;
113 u8 packet_class; 113 u8 packet_class;
114 u8 process_id; 114 u8 process_id;
115 u8 pad_len; 115 u8 pad_len;
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 4a12880c16a8..02978a16e732 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -30,17 +30,29 @@
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "wl1271_spi.h" 31#include "wl1271_spi.h"
32 32
33static int wl1271_translate_reg_addr(struct wl1271 *wl, int addr) 33static int wl1271_translate_addr(struct wl1271 *wl, int addr)
34{ 34{
35 return addr - wl->physical_reg_addr + wl->virtual_reg_addr; 35 /*
36} 36 * To translate, first check to which window of addresses the
37 37 * particular address belongs. Then subtract the starting address
38static int wl1271_translate_mem_addr(struct wl1271 *wl, int addr) 38 * of that window from the address. Then, add offset of the
39{ 39 * translated region.
40 return addr - wl->physical_mem_addr + wl->virtual_mem_addr; 40 *
41 * The translated regions occur next to each other in physical device
42 * memory, so just add the sizes of the preceeding address regions to
43 * get the offset to the new region.
44 *
45 * Currently, only the two first regions are addressed, and the
46 * assumption is that all addresses will fall into either of those
47 * two.
48 */
49 if ((addr >= wl->part.reg.start) &&
50 (addr < wl->part.reg.start + wl->part.reg.size))
51 return addr - wl->part.reg.start + wl->part.mem.size;
52 else
53 return addr - wl->part.mem.start;
41} 54}
42 55
43
44void wl1271_spi_reset(struct wl1271 *wl) 56void wl1271_spi_reset(struct wl1271 *wl)
45{ 57{
46 u8 *cmd; 58 u8 *cmd;
@@ -123,133 +135,137 @@ void wl1271_spi_init(struct wl1271 *wl)
123 135
124/* Set the SPI partitions to access the chip addresses 136/* Set the SPI partitions to access the chip addresses
125 * 137 *
126 * There are two VIRTUAL (SPI) partitions (the memory partition and the 138 * To simplify driver code, a fixed (virtual) memory map is defined for
127 * registers partition), which are mapped to two different areas of the 139 * register and memory addresses. Because in the chipset, in different stages
128 * PHYSICAL (hardware) memory. This function also makes other checks to 140 * of operation, those addresses will move around, an address translation
129 * ensure that the partitions are not overlapping. In the diagram below, the 141 * mechanism is required.
130 * memory partition comes before the register partition, but the opposite is
131 * also supported.
132 * 142 *
133 * PHYSICAL address 143 * There are four partitions (three memory and one register partition),
144 * which are mapped to two different areas of the hardware memory.
145 *
146 * Virtual address
134 * space 147 * space
135 * 148 *
136 * | | 149 * | |
137 * ...+----+--> mem_start 150 * ...+----+--> mem.start
138 * VIRTUAL address ... | | 151 * Physical address ... | |
139 * space ... | | [PART_0] 152 * space ... | | [PART_0]
140 * ... | | 153 * ... | |
141 * 0x00000000 <--+----+... ...+----+--> mem_start + mem_size 154 * 00000000 <--+----+... ...+----+--> mem.start + mem.size
142 * | | ... | | 155 * | | ... | |
143 * |MEM | ... | | 156 * |MEM | ... | |
144 * | | ... | | 157 * | | ... | |
145 * part_size <--+----+... | | {unused area) 158 * mem.size <--+----+... | | {unused area)
146 * | | ... | | 159 * | | ... | |
147 * |REG | ... | | 160 * |REG | ... | |
148 * part_size | | ... | | 161 * mem.size | | ... | |
149 * + <--+----+... ...+----+--> reg_start 162 * + <--+----+... ...+----+--> reg.start
150 * reg_size ... | | 163 * reg.size | | ... | |
151 * ... | | [PART_1] 164 * |MEM2| ... | | [PART_1]
152 * ... | | 165 * | | ... | |
153 * ...+----+--> reg_start + reg_size 166 * ...+----+--> reg.start + reg.size
154 * | | 167 * | |
155 * 168 *
156 */ 169 */
157int wl1271_set_partition(struct wl1271 *wl, 170int wl1271_set_partition(struct wl1271 *wl,
158 u32 mem_start, u32 mem_size, 171 struct wl1271_partition_set *p)
159 u32 reg_start, u32 reg_size)
160{ 172{
161 struct wl1271_partition *partition; 173 /* copy partition info */
162 struct spi_transfer t; 174 memcpy(&wl->part, p, sizeof(*p));
163 struct spi_message m;
164 size_t len, cmd_len;
165 u32 *cmd;
166 int addr;
167
168 cmd_len = sizeof(u32) + 2 * sizeof(struct wl1271_partition);
169 cmd = kzalloc(cmd_len, GFP_KERNEL);
170 if (!cmd)
171 return -ENOMEM;
172
173 spi_message_init(&m);
174 memset(&t, 0, sizeof(t));
175
176 partition = (struct wl1271_partition *) (cmd + 1);
177 addr = HW_ACCESS_PART0_SIZE_ADDR;
178 len = 2 * sizeof(struct wl1271_partition);
179
180 *cmd |= WSPI_CMD_WRITE;
181 *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
182 *cmd |= addr & WSPI_CMD_BYTE_ADDR;
183 175
184 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", 176 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
185 mem_start, mem_size); 177 p->mem.start, p->mem.size);
186 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", 178 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
187 reg_start, reg_size); 179 p->reg.start, p->reg.size);
188 180 wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
189 /* Make sure that the two partitions together don't exceed the 181 p->mem2.start, p->mem2.size);
190 * address range */ 182 wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
191 if ((mem_size + reg_size) > HW_ACCESS_MEMORY_MAX_RANGE) { 183 p->mem3.start, p->mem3.size);
192 wl1271_debug(DEBUG_SPI, "Total size exceeds maximum virtual" 184
193 " address range. Truncating partition[0]."); 185 /* write partition info to the chipset */
194 mem_size = HW_ACCESS_MEMORY_MAX_RANGE - reg_size; 186 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
195 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", 187 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
196 mem_start, mem_size); 188 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
197 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", 189 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
198 reg_start, reg_size); 190 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
199 } 191 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
192 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
200 193
201 if ((mem_start < reg_start) && 194 return 0;
202 ((mem_start + mem_size) > reg_start)) { 195}
203 /* Guarantee that the memory partition doesn't overlap the
204 * registers partition */
205 wl1271_debug(DEBUG_SPI, "End of partition[0] is "
206 "overlapping partition[1]. Adjusted.");
207 mem_size = reg_start - mem_start;
208 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
209 mem_start, mem_size);
210 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
211 reg_start, reg_size);
212 } else if ((reg_start < mem_start) &&
213 ((reg_start + reg_size) > mem_start)) {
214 /* Guarantee that the register partition doesn't overlap the
215 * memory partition */
216 wl1271_debug(DEBUG_SPI, "End of partition[1] is"
217 " overlapping partition[0]. Adjusted.");
218 reg_size = mem_start - reg_start;
219 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
220 mem_start, mem_size);
221 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
222 reg_start, reg_size);
223 }
224 196
225 partition[0].start = mem_start; 197#define WL1271_BUSY_WORD_TIMEOUT 1000
226 partition[0].size = mem_size;
227 partition[1].start = reg_start;
228 partition[1].size = reg_size;
229 198
230 wl->physical_mem_addr = mem_start; 199/* FIXME: Check busy words, removed due to SPI bug */
231 wl->physical_reg_addr = reg_start; 200#if 0
201static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
202{
203 struct spi_transfer t[1];
204 struct spi_message m;
205 u32 *busy_buf;
206 int num_busy_bytes = 0;
232 207
233 wl->virtual_mem_addr = 0; 208 wl1271_info("spi read BUSY!");
234 wl->virtual_reg_addr = mem_size;
235 209
236 t.tx_buf = cmd; 210 /*
237 t.len = cmd_len; 211 * Look for the non-busy word in the read buffer, and if found,
238 spi_message_add_tail(&t, &m); 212 * read in the remaining data into the buffer.
213 */
214 busy_buf = (u32 *)buf;
215 for (; (u32)busy_buf < (u32)buf + len; busy_buf++) {
216 num_busy_bytes += sizeof(u32);
217 if (*busy_buf & 0x1) {
218 spi_message_init(&m);
219 memset(t, 0, sizeof(t));
220 memmove(buf, busy_buf, len - num_busy_bytes);
221 t[0].rx_buf = buf + (len - num_busy_bytes);
222 t[0].len = num_busy_bytes;
223 spi_message_add_tail(&t[0], &m);
224 spi_sync(wl->spi, &m);
225 return;
226 }
227 }
239 228
240 spi_sync(wl->spi, &m); 229 /*
230 * Read further busy words from SPI until a non-busy word is
231 * encountered, then read the data itself into the buffer.
232 */
233 wl1271_info("spi read BUSY-polling needed!");
241 234
242 kfree(cmd); 235 num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
236 busy_buf = wl->buffer_busyword;
237 while (num_busy_bytes) {
238 num_busy_bytes--;
239 spi_message_init(&m);
240 memset(t, 0, sizeof(t));
241 t[0].rx_buf = busy_buf;
242 t[0].len = sizeof(u32);
243 spi_message_add_tail(&t[0], &m);
244 spi_sync(wl->spi, &m);
245
246 if (*busy_buf & 0x1) {
247 spi_message_init(&m);
248 memset(t, 0, sizeof(t));
249 t[0].rx_buf = buf;
250 t[0].len = len;
251 spi_message_add_tail(&t[0], &m);
252 spi_sync(wl->spi, &m);
253 return;
254 }
255 }
243 256
244 return 0; 257 /* The SPI bus is unresponsive, the read failed. */
258 memset(buf, 0, len);
259 wl1271_error("SPI read busy-word timeout!\n");
245} 260}
261#endif
246 262
247void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, 263void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
248 size_t len, bool fixed) 264 size_t len, bool fixed)
249{ 265{
250 struct spi_transfer t[3]; 266 struct spi_transfer t[3];
251 struct spi_message m; 267 struct spi_message m;
252 u8 *busy_buf; 268 u32 *busy_buf;
253 u32 *cmd; 269 u32 *cmd;
254 270
255 cmd = &wl->buffer_cmd; 271 cmd = &wl->buffer_cmd;
@@ -281,14 +297,16 @@ void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf,
281 297
282 spi_sync(wl->spi, &m); 298 spi_sync(wl->spi, &m);
283 299
284 /* FIXME: check busy words */ 300 /* FIXME: Check busy words, removed due to SPI bug */
301 /* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1))
302 wl1271_spi_read_busy(wl, buf, len); */
285 303
286 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); 304 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
287 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); 305 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
288} 306}
289 307
290void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, 308void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
291 size_t len, bool fixed) 309 size_t len, bool fixed)
292{ 310{
293 struct spi_transfer t[2]; 311 struct spi_transfer t[2];
294 struct spi_message m; 312 struct spi_message m;
@@ -321,62 +339,77 @@ void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf,
321 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); 339 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
322} 340}
323 341
324void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf, 342void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
325 size_t len) 343 bool fixed)
326{ 344{
327 int physical; 345 int physical;
328 346
329 physical = wl1271_translate_mem_addr(wl, addr); 347 physical = wl1271_translate_addr(wl, addr);
330 348
331 wl1271_spi_read(wl, physical, buf, len, false); 349 wl1271_spi_raw_read(wl, physical, buf, len, fixed);
332} 350}
333 351
334void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf, 352void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
335 size_t len) 353 bool fixed)
336{ 354{
337 int physical; 355 int physical;
338 356
339 physical = wl1271_translate_mem_addr(wl, addr); 357 physical = wl1271_translate_addr(wl, addr);
340 358
341 wl1271_spi_write(wl, physical, buf, len, false); 359 wl1271_spi_raw_write(wl, physical, buf, len, fixed);
342} 360}
343 361
344void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len, 362u32 wl1271_spi_read32(struct wl1271 *wl, int addr)
345 bool fixed)
346{ 363{
347 int physical; 364 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
348 365}
349 physical = wl1271_translate_reg_addr(wl, addr);
350 366
351 wl1271_spi_read(wl, physical, buf, len, fixed); 367void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val)
368{
369 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
352} 370}
353 371
354void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len, 372void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
355 bool fixed)
356{ 373{
357 int physical; 374 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
375 addr = (addr >> 1) + 0x30000;
376 wl1271_spi_write32(wl, OCP_POR_CTR, addr);
358 377
359 physical = wl1271_translate_reg_addr(wl, addr); 378 /* write value to OCP_POR_WDATA */
379 wl1271_spi_write32(wl, OCP_DATA_WRITE, val);
360 380
361 wl1271_spi_write(wl, physical, buf, len, fixed); 381 /* write 1 to OCP_CMD */
382 wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_WRITE);
362} 383}
363 384
364u32 wl1271_mem_read32(struct wl1271 *wl, int addr) 385u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
365{ 386{
366 return wl1271_read32(wl, wl1271_translate_mem_addr(wl, addr)); 387 u32 val;
367} 388 int timeout = OCP_CMD_LOOP;
368 389
369void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val) 390 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
370{ 391 addr = (addr >> 1) + 0x30000;
371 wl1271_write32(wl, wl1271_translate_mem_addr(wl, addr), val); 392 wl1271_spi_write32(wl, OCP_POR_CTR, addr);
372}
373 393
374u32 wl1271_reg_read32(struct wl1271 *wl, int addr) 394 /* write 2 to OCP_CMD */
375{ 395 wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_READ);
376 return wl1271_read32(wl, wl1271_translate_reg_addr(wl, addr));
377}
378 396
379void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val) 397 /* poll for data ready */
380{ 398 do {
381 wl1271_write32(wl, wl1271_translate_reg_addr(wl, addr), val); 399 val = wl1271_spi_read32(wl, OCP_DATA_READ);
400 timeout--;
401 } while (!(val & OCP_READY_MASK) && timeout);
402
403 if (!timeout) {
404 wl1271_warning("Top register access timed out.");
405 return 0xffff;
406 }
407
408 /* check data status and return if OK */
409 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
410 return val & 0xffff;
411 else {
412 wl1271_warning("Top register access returned error.");
413 return 0xffff;
414 }
382} 415}
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
index 2c9968458646..cb7df1c56314 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.h
@@ -29,10 +29,14 @@
29 29
30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
31 31
32#define HW_ACCESS_PART0_SIZE_ADDR 0x1FFC0 32#define HW_PARTITION_REGISTERS_ADDR 0x1ffc0
33#define HW_ACCESS_PART0_START_ADDR 0x1FFC4 33#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
34#define HW_ACCESS_PART1_SIZE_ADDR 0x1FFC8 34#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
35#define HW_ACCESS_PART1_START_ADDR 0x1FFCC 35#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
36 40
37#define HW_ACCESS_REGISTER_SIZE 4 41#define HW_ACCESS_REGISTER_SIZE 4
38 42
@@ -67,47 +71,56 @@
67 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32)) 71 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
68#define HW_ACCESS_WSPI_INIT_CMD_MASK 0 72#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
69 73
74#define OCP_CMD_LOOP 32
75
76#define OCP_CMD_WRITE 0x1
77#define OCP_CMD_READ 0x2
78
79#define OCP_READY_MASK BIT(18)
80#define OCP_STATUS_MASK (BIT(16) | BIT(17))
81
82#define OCP_STATUS_NO_RESP 0x00000
83#define OCP_STATUS_OK 0x10000
84#define OCP_STATUS_REQ_FAILED 0x20000
85#define OCP_STATUS_RESP_ERROR 0x30000
70 86
71/* Raw target IO, address is not translated */ 87/* Raw target IO, address is not translated */
72void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, 88void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
73 size_t len, bool fixed); 89 size_t len, bool fixed);
74void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, 90void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
75 size_t len, bool fixed); 91 size_t len, bool fixed);
76 92
77/* Memory target IO, address is tranlated to partition 0 */ 93/* Translated target IO */
78void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf, size_t len); 94void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
79void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf, size_t len); 95 bool fixed);
80u32 wl1271_mem_read32(struct wl1271 *wl, int addr); 96void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
81void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val); 97 bool fixed);
98u32 wl1271_spi_read32(struct wl1271 *wl, int addr);
99void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val);
82 100
83/* Registers IO */ 101/* Top Register IO */
84void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len, 102void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
85 bool fixed); 103u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
86void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len,
87 bool fixed);
88u32 wl1271_reg_read32(struct wl1271 *wl, int addr);
89void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val);
90 104
91/* INIT and RESET words */ 105/* INIT and RESET words */
92void wl1271_spi_reset(struct wl1271 *wl); 106void wl1271_spi_reset(struct wl1271 *wl);
93void wl1271_spi_init(struct wl1271 *wl); 107void wl1271_spi_init(struct wl1271 *wl);
94int wl1271_set_partition(struct wl1271 *wl, 108int wl1271_set_partition(struct wl1271 *wl,
95 u32 part_start, u32 part_size, 109 struct wl1271_partition_set *p);
96 u32 reg_start, u32 reg_size);
97 110
98static inline u32 wl1271_read32(struct wl1271 *wl, int addr) 111static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
99{ 112{
100 wl1271_spi_read(wl, addr, &wl->buffer_32, 113 wl1271_spi_raw_read(wl, addr, &wl->buffer_32,
101 sizeof(wl->buffer_32), false); 114 sizeof(wl->buffer_32), false);
102 115
103 return wl->buffer_32; 116 return wl->buffer_32;
104} 117}
105 118
106static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) 119static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
107{ 120{
108 wl->buffer_32 = val; 121 wl->buffer_32 = val;
109 wl1271_spi_write(wl, addr, &wl->buffer_32, 122 wl1271_spi_raw_write(wl, addr, &wl->buffer_32,
110 sizeof(wl->buffer_32), false); 123 sizeof(wl->buffer_32), false);
111} 124}
112 125
113#endif /* __WL1271_SPI_H__ */ 126#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index ff221258b941..00af065c77c2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -33,8 +33,7 @@
33static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb) 33static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
34{ 34{
35 int i; 35 int i;
36 36 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
37 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
38 if (wl->tx_frames[i] == NULL) { 37 if (wl->tx_frames[i] == NULL) {
39 wl->tx_frames[i] = skb; 38 wl->tx_frames[i] = skb;
40 return i; 39 return i;
@@ -58,8 +57,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
58 /* approximate the number of blocks required for this packet 57 /* approximate the number of blocks required for this packet
59 in the firmware */ 58 in the firmware */
60 /* FIXME: try to figure out what is done here and make it cleaner */ 59 /* FIXME: try to figure out what is done here and make it cleaner */
61 total_blocks = (skb->len) >> TX_HW_BLOCK_SHIFT_DIV; 60 total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV;
62 excluded = (total_blocks << 2) + (skb->len & 0xff) + 34; 61 excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34;
63 total_blocks += (excluded > 252) ? 2 : 1; 62 total_blocks += (excluded > 252) ? 2 : 1;
64 total_blocks += TX_HW_BLOCK_SPARE; 63 total_blocks += TX_HW_BLOCK_SPARE;
65 64
@@ -89,15 +88,25 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
89{ 88{
90 struct wl1271_tx_hw_descr *desc; 89 struct wl1271_tx_hw_descr *desc;
91 int pad; 90 int pad;
91 u16 tx_attr;
92 92
93 desc = (struct wl1271_tx_hw_descr *) skb->data; 93 desc = (struct wl1271_tx_hw_descr *) skb->data;
94 94
95 /* relocate space for security header */
96 if (extra) {
97 void *framestart = skb->data + sizeof(*desc);
98 u16 fc = *(u16 *)(framestart + extra);
99 int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
100 memmove(framestart, framestart + extra, hdrlen);
101 }
102
95 /* configure packet life time */ 103 /* configure packet life time */
96 desc->start_time = jiffies_to_usecs(jiffies) - wl->time_offset; 104 desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) -
97 desc->life_time = TX_HW_MGMT_PKT_LIFETIME_TU; 105 wl->time_offset);
106 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
98 107
99 /* configure the tx attributes */ 108 /* configure the tx attributes */
100 desc->tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; 109 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
101 /* FIXME: do we know the packet priority? can we identify mgmt 110 /* FIXME: do we know the packet priority? can we identify mgmt
102 packets, and use max prio for them at least? */ 111 packets, and use max prio for them at least? */
103 desc->tid = 0; 112 desc->tid = 0;
@@ -106,11 +115,13 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
106 115
107 /* align the length (and store in terms of words) */ 116 /* align the length (and store in terms of words) */
108 pad = WL1271_TX_ALIGN(skb->len); 117 pad = WL1271_TX_ALIGN(skb->len);
109 desc->length = pad >> 2; 118 desc->length = cpu_to_le16(pad >> 2);
110 119
111 /* calculate number of padding bytes */ 120 /* calculate number of padding bytes */
112 pad = pad - skb->len; 121 pad = pad - skb->len;
113 desc->tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; 122 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
123
124 desc->tx_attr = cpu_to_le16(tx_attr);
114 125
115 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); 126 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
116 return 0; 127 return 0;
@@ -147,11 +158,11 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
147 len = WL1271_TX_ALIGN(skb->len); 158 len = WL1271_TX_ALIGN(skb->len);
148 159
149 /* perform a fixed address block write with the packet */ 160 /* perform a fixed address block write with the packet */
150 wl1271_spi_reg_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true); 161 wl1271_spi_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
151 162
152 /* write packet new counter into the write access register */ 163 /* write packet new counter into the write access register */
153 wl->tx_packets_count++; 164 wl->tx_packets_count++;
154 wl1271_reg_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 165 wl1271_spi_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
155 166
156 desc = (struct wl1271_tx_hw_descr *) skb->data; 167 desc = (struct wl1271_tx_hw_descr *) skb->data;
157 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)", 168 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -254,14 +265,13 @@ out:
254static void wl1271_tx_complete_packet(struct wl1271 *wl, 265static void wl1271_tx_complete_packet(struct wl1271 *wl,
255 struct wl1271_tx_hw_res_descr *result) 266 struct wl1271_tx_hw_res_descr *result)
256{ 267{
257
258 struct ieee80211_tx_info *info; 268 struct ieee80211_tx_info *info;
259 struct sk_buff *skb; 269 struct sk_buff *skb;
260 u32 header_len; 270 u16 seq;
261 int id = result->id; 271 int id = result->id;
262 272
263 /* check for id legality */ 273 /* check for id legality */
264 if (id >= TX_HW_RESULT_QUEUE_LEN || wl->tx_frames[id] == NULL) { 274 if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) {
265 wl1271_warning("TX result illegal id: %d", id); 275 wl1271_warning("TX result illegal id: %d", id);
266 return; 276 return;
267 } 277 }
@@ -284,22 +294,32 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
284 /* info->status.retry_count = result->ack_failures; */ 294 /* info->status.retry_count = result->ack_failures; */
285 wl->stats.retry_count += result->ack_failures; 295 wl->stats.retry_count += result->ack_failures;
286 296
287 /* get header len */ 297 /* update security sequence number */
298 seq = wl->tx_security_seq_16 +
299 (result->lsb_security_sequence_number -
300 wl->tx_security_last_seq);
301 wl->tx_security_last_seq = result->lsb_security_sequence_number;
302
303 if (seq < wl->tx_security_seq_16)
304 wl->tx_security_seq_32++;
305 wl->tx_security_seq_16 = seq;
306
307 /* remove private header from packet */
308 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
309
310 /* remove TKIP header space if present */
288 if (info->control.hw_key && 311 if (info->control.hw_key &&
289 info->control.hw_key->alg == ALG_TKIP) 312 info->control.hw_key->alg == ALG_TKIP) {
290 header_len = WL1271_TKIP_IV_SPACE + 313 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
291 sizeof(struct wl1271_tx_hw_descr); 314 memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
292 else 315 skb_pull(skb, WL1271_TKIP_IV_SPACE);
293 header_len = sizeof(struct wl1271_tx_hw_descr); 316 }
294 317
295 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" 318 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
296 " status 0x%x", 319 " status 0x%x",
297 result->id, skb, result->ack_failures, 320 result->id, skb, result->ack_failures,
298 result->rate_class_index, result->status); 321 result->rate_class_index, result->status);
299 322
300 /* remove private header from packet */
301 skb_pull(skb, header_len);
302
303 /* return the packet to the stack */ 323 /* return the packet to the stack */
304 ieee80211_tx_status(wl->hw, skb); 324 ieee80211_tx_status(wl->hw, skb);
305 wl->tx_frames[result->id] = NULL; 325 wl->tx_frames[result->id] = NULL;
@@ -315,8 +335,8 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
315 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); 335 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
316 336
317 /* read the tx results from the chipset */ 337 /* read the tx results from the chipset */
318 wl1271_spi_mem_read(wl, memmap->tx_result, 338 wl1271_spi_read(wl, le32_to_cpu(memmap->tx_result),
319 wl->tx_res_if, sizeof(*wl->tx_res_if)); 339 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
320 340
321 /* verify that the result buffer is not getting overrun */ 341 /* verify that the result buffer is not getting overrun */
322 if (count > TX_HW_RESULT_QUEUE_LEN) { 342 if (count > TX_HW_RESULT_QUEUE_LEN) {
@@ -337,10 +357,10 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
337 } 357 }
338 358
339 /* write host counter to chipset (to ack) */ 359 /* write host counter to chipset (to ack) */
340 wl1271_mem_write32(wl, memmap->tx_result + 360 wl1271_spi_write32(wl, le32_to_cpu(memmap->tx_result) +
341 offsetof(struct wl1271_tx_hw_res_if, 361 offsetof(struct wl1271_tx_hw_res_if,
342 tx_result_host_counter), 362 tx_result_host_counter),
343 wl->tx_res_if->tx_result_fw_counter); 363 le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
344} 364}
345 365
346/* caller must hold wl->mutex */ 366/* caller must hold wl->mutex */
@@ -364,7 +384,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
364 ieee80211_tx_status(wl->hw, skb); 384 ieee80211_tx_status(wl->hw, skb);
365 } 385 }
366 386
367 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) 387 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
368 if (wl->tx_frames[i] != NULL) { 388 if (wl->tx_frames[i] != NULL) {
369 skb = wl->tx_frames[i]; 389 skb = wl->tx_frames[i];
370 info = IEEE80211_SKB_CB(skb); 390 info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 4a614067ddba..416396caf0a0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -58,7 +58,7 @@
58 58
59struct wl1271_tx_hw_descr { 59struct wl1271_tx_hw_descr {
60 /* Length of packet in words, including descriptor+header+data */ 60 /* Length of packet in words, including descriptor+header+data */
61 u16 length; 61 __le16 length;
62 /* Number of extra memory blocks to allocate for this packet in 62 /* Number of extra memory blocks to allocate for this packet in
63 addition to the number of blocks derived from the packet length */ 63 addition to the number of blocks derived from the packet length */
64 u8 extra_mem_blocks; 64 u8 extra_mem_blocks;
@@ -67,12 +67,12 @@ struct wl1271_tx_hw_descr {
67 HW!! */ 67 HW!! */
68 u8 total_mem_blocks; 68 u8 total_mem_blocks;
69 /* Device time (in us) when the packet arrived to the driver */ 69 /* Device time (in us) when the packet arrived to the driver */
70 u32 start_time; 70 __le32 start_time;
71 /* Max delay in TUs until transmission. The last device time the 71 /* Max delay in TUs until transmission. The last device time the
72 packet can be transmitted is: startTime+(1024*LifeTime) */ 72 packet can be transmitted is: startTime+(1024*LifeTime) */
73 u16 life_time; 73 __le16 life_time;
74 /* Bitwise fields - see TX_ATTR... definitions above. */ 74 /* Bitwise fields - see TX_ATTR... definitions above. */
75 u16 tx_attr; 75 __le16 tx_attr;
76 /* Packet identifier used also in the Tx-Result. */ 76 /* Packet identifier used also in the Tx-Result. */
77 u8 id; 77 u8 id;
78 /* The packet TID value (as User-Priority) */ 78 /* The packet TID value (as User-Priority) */
@@ -100,12 +100,12 @@ struct wl1271_tx_hw_res_descr {
100 several possible reasons for failure. */ 100 several possible reasons for failure. */
101 u8 status; 101 u8 status;
102 /* Total air access duration including all retrys and overheads.*/ 102 /* Total air access duration including all retrys and overheads.*/
103 u16 medium_usage; 103 __le16 medium_usage;
104 /* The time passed from host xfer to Tx-complete.*/ 104 /* The time passed from host xfer to Tx-complete.*/
105 u32 fw_handling_time; 105 __le32 fw_handling_time;
106 /* Total media delay 106 /* Total media delay
107 (from 1st EDCA AIFS counter until TX Complete). */ 107 (from 1st EDCA AIFS counter until TX Complete). */
108 u32 medium_delay; 108 __le32 medium_delay;
109 /* LS-byte of last TKIP seq-num (saved per AC for recovery). */ 109 /* LS-byte of last TKIP seq-num (saved per AC for recovery). */
110 u8 lsb_security_sequence_number; 110 u8 lsb_security_sequence_number;
111 /* Retry count - number of transmissions without successful ACK.*/ 111 /* Retry count - number of transmissions without successful ACK.*/
@@ -118,8 +118,8 @@ struct wl1271_tx_hw_res_descr {
118} __attribute__ ((packed)); 118} __attribute__ ((packed));
119 119
120struct wl1271_tx_hw_res_if { 120struct wl1271_tx_hw_res_if {
121 u32 tx_result_fw_counter; 121 __le32 tx_result_fw_counter;
122 u32 tx_result_host_counter; 122 __le32 tx_result_host_counter;
123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; 123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
124} __attribute__ ((packed)); 124} __attribute__ ((packed));
125 125
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 657c2dbcb7d3..055d7bc6f592 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -122,8 +122,8 @@ struct wl12xx_null_data_template {
122} __attribute__ ((packed)); 122} __attribute__ ((packed));
123 123
124struct wl12xx_ps_poll_template { 124struct wl12xx_ps_poll_template {
125 u16 fc; 125 __le16 fc;
126 u16 aid; 126 __le16 aid;
127 u8 bssid[ETH_ALEN]; 127 u8 bssid[ETH_ALEN];
128 u8 ta[ETH_ALEN]; 128 u8 ta[ETH_ALEN];
129} __attribute__ ((packed)); 129} __attribute__ ((packed));
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 4e79a9800134..dfa1b9bc22c8 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -755,7 +755,7 @@ static int hw_reset_phy(struct zd_chip *chip)
755static int zd1211_hw_init_hmac(struct zd_chip *chip) 755static int zd1211_hw_init_hmac(struct zd_chip *chip)
756{ 756{
757 static const struct zd_ioreq32 ioreqs[] = { 757 static const struct zd_ioreq32 ioreqs[] = {
758 { CR_ZD1211_RETRY_MAX, 0x2 }, 758 { CR_ZD1211_RETRY_MAX, ZD1211_RETRY_COUNT },
759 { CR_RX_THRESHOLD, 0x000c0640 }, 759 { CR_RX_THRESHOLD, 0x000c0640 },
760 }; 760 };
761 761
@@ -767,7 +767,7 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip)
767static int zd1211b_hw_init_hmac(struct zd_chip *chip) 767static int zd1211b_hw_init_hmac(struct zd_chip *chip)
768{ 768{
769 static const struct zd_ioreq32 ioreqs[] = { 769 static const struct zd_ioreq32 ioreqs[] = {
770 { CR_ZD1211B_RETRY_MAX, 0x02020202 }, 770 { CR_ZD1211B_RETRY_MAX, ZD1211B_RETRY_COUNT },
771 { CR_ZD1211B_CWIN_MAX_MIN_AC0, 0x007f003f }, 771 { CR_ZD1211B_CWIN_MAX_MIN_AC0, 0x007f003f },
772 { CR_ZD1211B_CWIN_MAX_MIN_AC1, 0x007f003f }, 772 { CR_ZD1211B_CWIN_MAX_MIN_AC1, 0x007f003f },
773 { CR_ZD1211B_CWIN_MAX_MIN_AC2, 0x003f001f }, 773 { CR_ZD1211B_CWIN_MAX_MIN_AC2, 0x003f001f },
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 678c139a840c..9fd8f3508d66 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -642,13 +642,29 @@ enum {
642#define CR_ZD1211B_TXOP CTL_REG(0x0b20) 642#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
643#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) 643#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28)
644 644
645/* Value for CR_ZD1211_RETRY_MAX & CR_ZD1211B_RETRY_MAX. Vendor driver uses 2,
646 * we use 0. The first rate is tried (count+2), then all next rates are tried
647 * twice, until 1 Mbits is tried. */
648#define ZD1211_RETRY_COUNT 0
649#define ZD1211B_RETRY_COUNT \
650 (ZD1211_RETRY_COUNT << 0)| \
651 (ZD1211_RETRY_COUNT << 8)| \
652 (ZD1211_RETRY_COUNT << 16)| \
653 (ZD1211_RETRY_COUNT << 24)
654
645/* Used to detect PLL lock */ 655/* Used to detect PLL lock */
646#define UW2453_INTR_REG ((zd_addr_t)0x85c1) 656#define UW2453_INTR_REG ((zd_addr_t)0x85c1)
647 657
648#define CWIN_SIZE 0x007f043f 658#define CWIN_SIZE 0x007f043f
649 659
650 660
651#define HWINT_ENABLED 0x004f0000 661#define HWINT_ENABLED \
662 (INT_TX_COMPLETE_EN| \
663 INT_RX_COMPLETE_EN| \
664 INT_RETRY_FAIL_EN| \
665 INT_WAKEUP_EN| \
666 INT_CFG_NEXT_BCN_EN)
667
652#define HWINT_DISABLED 0 668#define HWINT_DISABLED 0
653 669
654#define E2P_PWR_INT_GUARD 8 670#define E2P_PWR_INT_GUARD 8
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6d666359a42f..8a243732c519 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -88,6 +88,34 @@ static const struct ieee80211_rate zd_rates[] = {
88 .flags = 0 }, 88 .flags = 0 },
89}; 89};
90 90
91/*
92 * Zydas retry rates table. Each line is listed in the same order as
93 * in zd_rates[] and contains all the rate used when a packet is sent
94 * starting with a given rates. Let's consider an example :
95 *
96 * "11 Mbits : 4, 3, 2, 1, 0" means :
97 * - packet is sent using 4 different rates
98 * - 1st rate is index 3 (ie 11 Mbits)
99 * - 2nd rate is index 2 (ie 5.5 Mbits)
100 * - 3rd rate is index 1 (ie 2 Mbits)
101 * - 4th rate is index 0 (ie 1 Mbits)
102 */
103
104static const struct tx_retry_rate zd_retry_rates[] = {
105 { /* 1 Mbits */ 1, { 0 }},
106 { /* 2 Mbits */ 2, { 1, 0 }},
107 { /* 5.5 Mbits */ 3, { 2, 1, 0 }},
108 { /* 11 Mbits */ 4, { 3, 2, 1, 0 }},
109 { /* 6 Mbits */ 5, { 4, 3, 2, 1, 0 }},
110 { /* 9 Mbits */ 6, { 5, 4, 3, 2, 1, 0}},
111 { /* 12 Mbits */ 5, { 6, 3, 2, 1, 0 }},
112 { /* 18 Mbits */ 6, { 7, 6, 3, 2, 1, 0 }},
113 { /* 24 Mbits */ 6, { 8, 6, 3, 2, 1, 0 }},
114 { /* 36 Mbits */ 7, { 9, 8, 6, 3, 2, 1, 0 }},
115 { /* 48 Mbits */ 8, {10, 9, 8, 6, 3, 2, 1, 0 }},
116 { /* 54 Mbits */ 9, {11, 10, 9, 8, 6, 3, 2, 1, 0 }}
117};
118
91static const struct ieee80211_channel zd_channels[] = { 119static const struct ieee80211_channel zd_channels[] = {
92 { .center_freq = 2412, .hw_value = 1 }, 120 { .center_freq = 2412, .hw_value = 1 },
93 { .center_freq = 2417, .hw_value = 2 }, 121 { .center_freq = 2417, .hw_value = 2 },
@@ -282,7 +310,7 @@ static void zd_op_stop(struct ieee80211_hw *hw)
282} 310}
283 311
284/** 312/**
285 * tx_status - reports tx status of a packet if required 313 * zd_mac_tx_status - reports tx status of a packet if required
286 * @hw - a &struct ieee80211_hw pointer 314 * @hw - a &struct ieee80211_hw pointer
287 * @skb - a sk-buffer 315 * @skb - a sk-buffer
288 * @flags: extra flags to set in the TX status info 316 * @flags: extra flags to set in the TX status info
@@ -295,15 +323,49 @@ static void zd_op_stop(struct ieee80211_hw *hw)
295 * 323 *
296 * If no status information has been requested, the skb is freed. 324 * If no status information has been requested, the skb is freed.
297 */ 325 */
298static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 326static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
299 int ackssi, bool success) 327 int ackssi, struct tx_status *tx_status)
300{ 328{
301 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 329 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
330 int i;
331 int success = 1, retry = 1;
332 int first_idx;
333 const struct tx_retry_rate *retries;
302 334
303 ieee80211_tx_info_clear_status(info); 335 ieee80211_tx_info_clear_status(info);
304 336
305 if (success) 337 if (tx_status) {
338 success = !tx_status->failure;
339 retry = tx_status->retry + success;
340 }
341
342 if (success) {
343 /* success */
306 info->flags |= IEEE80211_TX_STAT_ACK; 344 info->flags |= IEEE80211_TX_STAT_ACK;
345 } else {
346 /* failure */
347 info->flags &= ~IEEE80211_TX_STAT_ACK;
348 }
349
350 first_idx = info->status.rates[0].idx;
351 ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
352 retries = &zd_retry_rates[first_idx];
353 ZD_ASSERT(0<=retry && retry<=retries->count);
354
355 info->status.rates[0].idx = retries->rate[0];
356 info->status.rates[0].count = 1; // (retry > 1 ? 2 : 1);
357
358 for (i=1; i<IEEE80211_TX_MAX_RATES-1 && i<retry; i++) {
359 info->status.rates[i].idx = retries->rate[i];
360 info->status.rates[i].count = 1; // ((i==retry-1) && success ? 1:2);
361 }
362 for (; i<IEEE80211_TX_MAX_RATES && i<retry; i++) {
363 info->status.rates[i].idx = retries->rate[retry-1];
364 info->status.rates[i].count = 1; // (success ? 1:2);
365 }
366 if (i<IEEE80211_TX_MAX_RATES)
367 info->status.rates[i].idx = -1; /* terminate */
368
307 info->status.ack_signal = ackssi; 369 info->status.ack_signal = ackssi;
308 ieee80211_tx_status_irqsafe(hw, skb); 370 ieee80211_tx_status_irqsafe(hw, skb);
309} 371}
@@ -316,16 +378,79 @@ static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
316 * transferred. The first frame from the tx queue, will be selected and 378 * transferred. The first frame from the tx queue, will be selected and
317 * reported as error to the upper layers. 379 * reported as error to the upper layers.
318 */ 380 */
319void zd_mac_tx_failed(struct ieee80211_hw *hw) 381void zd_mac_tx_failed(struct urb *urb)
320{ 382{
321 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue; 383 struct ieee80211_hw * hw = zd_usb_to_hw(urb->context);
384 struct zd_mac *mac = zd_hw_mac(hw);
385 struct sk_buff_head *q = &mac->ack_wait_queue;
322 struct sk_buff *skb; 386 struct sk_buff *skb;
387 struct tx_status *tx_status = (struct tx_status *)urb->transfer_buffer;
388 unsigned long flags;
389 int success = !tx_status->failure;
390 int retry = tx_status->retry + success;
391 int found = 0;
392 int i, position = 0;
323 393
324 skb = skb_dequeue(q); 394 q = &mac->ack_wait_queue;
325 if (skb == NULL) 395 spin_lock_irqsave(&q->lock, flags);
326 return; 396
397 skb_queue_walk(q, skb) {
398 struct ieee80211_hdr *tx_hdr;
399 struct ieee80211_tx_info *info;
400 int first_idx, final_idx;
401 const struct tx_retry_rate *retries;
402 u8 final_rate;
403
404 position ++;
405
406 /* if the hardware reports a failure and we had a 802.11 ACK
407 * pending, then we skip the first skb when searching for a
408 * matching frame */
409 if (tx_status->failure && mac->ack_pending &&
410 skb_queue_is_first(q, skb)) {
411 continue;
412 }
413
414 tx_hdr = (struct ieee80211_hdr *)skb->data;
415
416 /* we skip all frames not matching the reported destination */
417 if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
418 continue;
419 }
420
421 /* we skip all frames not matching the reported final rate */
327 422
328 tx_status(hw, skb, 0, 0); 423 info = IEEE80211_SKB_CB(skb);
424 first_idx = info->status.rates[0].idx;
425 ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
426 retries = &zd_retry_rates[first_idx];
427 if (retry < 0 || retry > retries->count) {
428 continue;
429 }
430
431 ZD_ASSERT(0<=retry && retry<=retries->count);
432 final_idx = retries->rate[retry-1];
433 final_rate = zd_rates[final_idx].hw_value;
434
435 if (final_rate != tx_status->rate) {
436 continue;
437 }
438
439 found = 1;
440 break;
441 }
442
443 if (found) {
444 for (i=1; i<=position; i++) {
445 skb = __skb_dequeue(q);
446 zd_mac_tx_status(hw, skb,
447 mac->ack_pending ? mac->ack_signal : 0,
448 i == position ? tx_status : NULL);
449 mac->ack_pending = 0;
450 }
451 }
452
453 spin_unlock_irqrestore(&q->lock, flags);
329} 454}
330 455
331/** 456/**
@@ -342,18 +467,27 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
342{ 467{
343 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
344 struct ieee80211_hw *hw = info->rate_driver_data[0]; 469 struct ieee80211_hw *hw = info->rate_driver_data[0];
470 struct zd_mac *mac = zd_hw_mac(hw);
471
472 ieee80211_tx_info_clear_status(info);
345 473
346 skb_pull(skb, sizeof(struct zd_ctrlset)); 474 skb_pull(skb, sizeof(struct zd_ctrlset));
347 if (unlikely(error || 475 if (unlikely(error ||
348 (info->flags & IEEE80211_TX_CTL_NO_ACK))) { 476 (info->flags & IEEE80211_TX_CTL_NO_ACK))) {
349 tx_status(hw, skb, 0, !error); 477 /*
478 * FIXME : do we need to fill in anything ?
479 */
480 ieee80211_tx_status_irqsafe(hw, skb);
350 } else { 481 } else {
351 struct sk_buff_head *q = 482 struct sk_buff_head *q = &mac->ack_wait_queue;
352 &zd_hw_mac(hw)->ack_wait_queue;
353 483
354 skb_queue_tail(q, skb); 484 skb_queue_tail(q, skb);
355 while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) 485 while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) {
356 zd_mac_tx_failed(hw); 486 zd_mac_tx_status(hw, skb_dequeue(q),
487 mac->ack_pending ? mac->ack_signal : 0,
488 NULL);
489 mac->ack_pending = 0;
490 }
357 } 491 }
358} 492}
359 493
@@ -606,27 +740,47 @@ fail:
606static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr, 740static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
607 struct ieee80211_rx_status *stats) 741 struct ieee80211_rx_status *stats)
608{ 742{
743 struct zd_mac *mac = zd_hw_mac(hw);
609 struct sk_buff *skb; 744 struct sk_buff *skb;
610 struct sk_buff_head *q; 745 struct sk_buff_head *q;
611 unsigned long flags; 746 unsigned long flags;
747 int found = 0;
748 int i, position = 0;
612 749
613 if (!ieee80211_is_ack(rx_hdr->frame_control)) 750 if (!ieee80211_is_ack(rx_hdr->frame_control))
614 return 0; 751 return 0;
615 752
616 q = &zd_hw_mac(hw)->ack_wait_queue; 753 q = &mac->ack_wait_queue;
617 spin_lock_irqsave(&q->lock, flags); 754 spin_lock_irqsave(&q->lock, flags);
618 skb_queue_walk(q, skb) { 755 skb_queue_walk(q, skb) {
619 struct ieee80211_hdr *tx_hdr; 756 struct ieee80211_hdr *tx_hdr;
620 757
758 position ++;
759
760 if (mac->ack_pending && skb_queue_is_first(q, skb))
761 continue;
762
621 tx_hdr = (struct ieee80211_hdr *)skb->data; 763 tx_hdr = (struct ieee80211_hdr *)skb->data;
622 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN))) 764 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
623 { 765 {
624 __skb_unlink(skb, q); 766 found = 1;
625 tx_status(hw, skb, stats->signal, 1); 767 break;
626 goto out;
627 } 768 }
628 } 769 }
629out: 770
771 if (found) {
772 for (i=1; i<position; i++) {
773 skb = __skb_dequeue(q);
774 zd_mac_tx_status(hw, skb,
775 mac->ack_pending ? mac->ack_signal : 0,
776 NULL);
777 mac->ack_pending = 0;
778 }
779
780 mac->ack_pending = 1;
781 mac->ack_signal = stats->signal;
782 }
783
630 spin_unlock_irqrestore(&q->lock, flags); 784 spin_unlock_irqrestore(&q->lock, flags);
631 return 1; 785 return 1;
632} 786}
@@ -709,6 +863,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
709 skb_reserve(skb, 2); 863 skb_reserve(skb, 2);
710 } 864 }
711 865
866 /* FIXME : could we avoid this big memcpy ? */
712 memcpy(skb_put(skb, length), buffer, length); 867 memcpy(skb_put(skb, length), buffer, length);
713 868
714 memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats)); 869 memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
@@ -999,7 +1154,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
999 hw->queues = 1; 1154 hw->queues = 1;
1000 hw->extra_tx_headroom = sizeof(struct zd_ctrlset); 1155 hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
1001 1156
1157 /*
1158 * Tell mac80211 that we support multi rate retries
1159 */
1160 hw->max_rates = IEEE80211_TX_MAX_RATES;
1161 hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */
1162
1002 skb_queue_head_init(&mac->ack_wait_queue); 1163 skb_queue_head_init(&mac->ack_wait_queue);
1164 mac->ack_pending = 0;
1003 1165
1004 zd_chip_init(&mac->chip, hw, intf); 1166 zd_chip_init(&mac->chip, hw, intf);
1005 housekeeping_init(mac); 1167 housekeeping_init(mac);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 7c2759118d13..630c298a730e 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -140,6 +140,21 @@ struct rx_status {
140#define ZD_RX_CRC16_ERROR 0x40 140#define ZD_RX_CRC16_ERROR 0x40
141#define ZD_RX_ERROR 0x80 141#define ZD_RX_ERROR 0x80
142 142
143struct tx_retry_rate {
144 int count; /* number of valid element in rate[] array */
145 int rate[10]; /* retry rates, described by an index in zd_rates[] */
146};
147
148struct tx_status {
149 u8 type; /* must always be 0x01 : USB_INT_TYPE */
150 u8 id; /* must always be 0xa0 : USB_INT_ID_RETRY_FAILED */
151 u8 rate;
152 u8 pad;
153 u8 mac[ETH_ALEN];
154 u8 retry;
155 u8 failure;
156} __attribute__((packed));
157
143enum mac_flags { 158enum mac_flags {
144 MAC_FIXED_CHANNEL = 0x01, 159 MAC_FIXED_CHANNEL = 0x01,
145}; 160};
@@ -150,7 +165,7 @@ struct housekeeping {
150 165
151#define ZD_MAC_STATS_BUFFER_SIZE 16 166#define ZD_MAC_STATS_BUFFER_SIZE 16
152 167
153#define ZD_MAC_MAX_ACK_WAITERS 10 168#define ZD_MAC_MAX_ACK_WAITERS 50
154 169
155struct zd_mac { 170struct zd_mac {
156 struct zd_chip chip; 171 struct zd_chip chip;
@@ -184,6 +199,12 @@ struct zd_mac {
184 199
185 /* whether to pass control frames to stack */ 200 /* whether to pass control frames to stack */
186 unsigned int pass_ctrl:1; 201 unsigned int pass_ctrl:1;
202
203 /* whether we have received a 802.11 ACK that is pending */
204 unsigned int ack_pending:1;
205
206 /* signal strength of the last 802.11 ACK received */
207 int ack_signal;
187}; 208};
188 209
189#define ZD_REGDOMAIN_FCC 0x10 210#define ZD_REGDOMAIN_FCC 0x10
@@ -279,7 +300,7 @@ int zd_mac_preinit_hw(struct ieee80211_hw *hw);
279int zd_mac_init_hw(struct ieee80211_hw *hw); 300int zd_mac_init_hw(struct ieee80211_hw *hw);
280 301
281int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length); 302int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
282void zd_mac_tx_failed(struct ieee80211_hw *hw); 303void zd_mac_tx_failed(struct urb *urb);
283void zd_mac_tx_to_dev(struct sk_buff *skb, int error); 304void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
284 305
285#ifdef DEBUG 306#ifdef DEBUG
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 23a6a6d4863b..d46f20a57b7d 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -419,7 +419,7 @@ static void int_urb_complete(struct urb *urb)
419 handle_regs_int(urb); 419 handle_regs_int(urb);
420 break; 420 break;
421 case USB_INT_ID_RETRY_FAILED: 421 case USB_INT_ID_RETRY_FAILED:
422 zd_mac_tx_failed(zd_usb_to_hw(urb->context)); 422 zd_mac_tx_failed(urb);
423 break; 423 break;
424 default: 424 default:
425 dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb, 425 dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb,
@@ -553,6 +553,8 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
553 553
554 if (length < sizeof(struct rx_length_info)) { 554 if (length < sizeof(struct rx_length_info)) {
555 /* It's not a complete packet anyhow. */ 555 /* It's not a complete packet anyhow. */
556 printk("%s: invalid, small RX packet : %d\n",
557 __func__, length);
556 return; 558 return;
557 } 559 }
558 length_info = (struct rx_length_info *) 560 length_info = (struct rx_length_info *)
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index ff4617e21426..7c7914f5fa02 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -879,10 +879,10 @@ static struct pcmcia_device_id serial_ids[] = {
879 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"), 879 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"),
880 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), 880 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"),
881 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), 881 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"),
882 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ 882 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */
883 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */ 883 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
884 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ 884 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
885 PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ 885 PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
886 PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), 886 PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"),
887 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"), 887 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"),
888 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), 888 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 538c570df337..f1dcd7969a5c 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -551,13 +551,13 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
551 might_sleep_if(pdev->id.coreid != SSB_DEV_PCI); 551 might_sleep_if(pdev->id.coreid != SSB_DEV_PCI);
552 552
553 /* Enable interrupts for this device. */ 553 /* Enable interrupts for this device. */
554 if (bus->host_pci && 554 if ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE)) {
555 ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) {
556 u32 coremask; 555 u32 coremask;
557 556
558 /* Calculate the "coremask" for the device. */ 557 /* Calculate the "coremask" for the device. */
559 coremask = (1 << dev->core_index); 558 coremask = (1 << dev->core_index);
560 559
560 SSB_WARN_ON(bus->bustype != SSB_BUSTYPE_PCI);
561 err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp); 561 err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp);
562 if (err) 562 if (err)
563 goto out; 563 goto out;